Exemple #1
0
    def __init__(self, context, name, parent, nIn, nOut):
        """
    Constructor, initializes the component.

    Parameters
    ----------

    self: GainMatrix
        Mandatory object handle for Python methods.
    context: visr.SignalFlowContext
        Object providing the sampling frequency and the period (buffer size) for the processing.
    name: string
        Name of the component, must be unique within the containing component.
    parent: visr.CompositeComponent or None
        The containing component, or None if this is a top-level component.
    nIn: int
        Number of input channels.
    nOut:int
        Number of output channels.
    """
        super().__init__(context, name, parent)
        self.audioIn = visr.AudioInputFloat("in", self, nIn)
        self.audioOut = visr.AudioOutputFloat("out", self, nOut)
        self.mtxIn = visr.ParameterInput("gainInput", self,
                                         pml.MatrixParameterFloat.staticType,
                                         pml.SharedDataProtocol.staticType,
                                         pml.MatrixParameterConfig(nOut, nIn))
    def __init__(
            self,
            context,
            name,
            parent,
            numberOfChannels,
            numberOfInputs,
            initialInput=0,
            controlDataType=pml.UnsignedInteger  # Data type
    ):
        super(AudioSignalSwitch, self).__init__(context, name, parent)
        self.inputs = []
        for inIdx in range(0, numberOfInputs):
            portName = "in_%d" % inIdx
            self.inputs.append(
                visr.AudioInputFloat(portName, self, numberOfChannels))

        self.output = visr.AudioOutputFloat("out", self, numberOfChannels)
        self.controlInput = visr.ParameterInput(
            "controlIn",
            self,
            protocolType=pml.MessageQueueProtocol.staticType,
            parameterType=controlDataType.staticType,
            parameterConfig=pml.EmptyParameterConfig())
        self.activeInput = initialInput
Exemple #3
0
    def __init__(self,
                 context,
                 name,
                 parent,
                 numberOfObjects,
                 port,
                 baud,
                 maxHoaOrder,
                 sofaFile,
                 interpolationSteps,
                 headTracking,
                 udpReceivePort=4242,
                 headTrackingCalibrationPort=None):
        super(HoaBinauralRendererSAW, self).__init__(context, name, parent)
        self.hoaBinauralRenderer = HoaBinauralRendererSerial(
            context,
            "HoaBinauralRendererSerial",
            self,
            numberOfObjects,
            port,
            baud,
            maxHoaOrder,
            sofaFile,
            interpolationSteps,
            headTracking,
            headTrackingCalibrationPort=headTrackingCalibrationPort)
        self.sceneReceiver = rcl.UdpReceiver(
            context,
            "SceneReceiver",
            self,
            port=udpReceivePort,
            mode=rcl.UdpReceiver.Mode.Asynchronous)
        self.sceneDecoder = rcl.SceneDecoder(context, "SceneDecoder", self)
        self.parameterConnection(
            self.sceneReceiver.parameterPort("messageOutput"),
            self.sceneDecoder.parameterPort("datagramInput"))
        self.parameterConnection(
            self.sceneDecoder.parameterPort("objectVectorOutput"),
            self.hoaBinauralRenderer.parameterPort("objectVector"))

        self.objectSignalInput = visr.AudioInputFloat("audioIn", self,
                                                      numberOfObjects)
        self.binauralOutput = visr.AudioOutputFloat("audioOut", self, 2)

        self.audioConnection(self.objectSignalInput,
                             self.hoaBinauralRenderer.audioPort("audioIn"))
        self.audioConnection(self.hoaBinauralRenderer.audioPort("audioOut"),
                             self.binauralOutput)

        if headTrackingCalibrationPort is not None:
            self.calibrationTriggerReceiver = rcl.UdpReceiver(
                context,
                "CalibrationTriggerReceiver",
                self,
                port=headTrackingCalibrationPort)
            self.parameterConnection(
                self.calibrationTriggerReceiver.parameterPort("messageOutput"),
                self.hoaBinauralRenderer.parameterPort(
                    "headTrackingCalibration"))
Exemple #4
0
 def __init__(self, context, name, parent, numInputs, width):
     super(PythonAdder, self).__init__(context, name, parent)
     self.output = visr.AudioOutputFloat("out", self, width)
     self.inputs = []
     for inputIdx in range(0, numInputs):
         portName = "in%d" % inputIdx
         inPort = visr.AudioInputFloat(portName, self, width)
         self.inputs.append(inPort)
    def __init__(self,
                 context,
                 name,
                 parent,
                 numLoudspeakers,
                 port,
                 baud,
                 sofaFile,
                 enableSerial=True,
                 dynITD=True,
                 hrirInterp=True,
                 irTruncationLength=None,
                 headTrackingCalibrationPort=None,
                 switchUdpPort=12345):
        super(Comparison, self).__init__(context, name, parent)
        self.input = visr.AudioInputFloat("in", self, 2)
        self.output = visr.AudioOutputFloat("out", self, 2)

        self.renderer = VirtualLoudspeakerRendererSerial(
            context,
            "renderer",
            self,
            numLoudspeakers,
            port,
            baud,
            sofaFile,
            enableSerial=useTracking,
            dynITD=useDynamicITD,
            hrirInterp=useHRIRinterpolation,
            irTruncationLength=BRIRtruncationLength,
            headTrackingCalibrationPort=headTrackingCalibrationPort)

        self.controlReceiver = rcl.UdpReceiver(
            context,
            "ControlReceiver",
            self,
            port=switchUdpPort,
            mode=rcl.UdpReceiver.Mode.Asynchronous)
        self.switch = AudioSignalSwitch(context,
                                        "OutputSwitch",
                                        self,
                                        numberOfChannels=2,
                                        numberOfInputs=2,
                                        controlDataType=pml.Float)
        self.controlDecoder = rcl.ScalarOscDecoder(context, "ControlDecoder",
                                                   self)
        self.controlDecoder.setup(dataType='float')
        self.parameterConnection(
            self.controlReceiver.parameterPort("messageOutput"),
            self.controlDecoder.parameterPort("datagramInput"))
        self.parameterConnection(self.controlDecoder.parameterPort("dataOut"),
                                 self.switch.parameterPort("controlIn"))

        self.audioConnection(self.input, self.renderer.audioPort("audioIn"))
        self.audioConnection(self.renderer.audioPort("audioOut"),
                             self.switch.audioPort("in_0"))
        self.audioConnection(self.input, self.switch.audioPort("in_1"))
        self.audioConnection(self.switch.audioPort("out"), self.output)
    def __init__(self,
                 context,
                 name,
                 parent,
                 numberOfChannels,
                 measurePeriod=0.4,
                 channelWeights=None,
                 audioOut=False):
        # Call the base class constructor
        super(LoudnessMeter, self).__init__(context, name, parent)
        # Define an audio input port with name "audioIn" and width (number of signal waveforms) numberOfChannels
        self.audioInput = visr.AudioInputFloat("audioIn", self,
                                               numberOfChannels)

        # If the option is set, add an audio output to put out the K-weigth input signals
        # Some audio interfaces don't like configs with no outputs.
        if audioOut:
            self.audioOutput = visr.AudioOutputFloat("audioOut", self,
                                                     numberOfChannels)
        else:
            self.audioOutput = None

        # Define a parameter output port with type "Float" and communication protocol "MessageQueue"
        # MessageQueue means that all computed data are hold in a first-in-first-out queue,
        # which decouples the parameter update rate from the buffer size.
        self.loudnessOut = visr.ParameterOutput(
            "loudnessOut", self, pml.Float.staticType,
            pml.MessageQueueProtocol.staticType, pml.EmptyParameterConfig())

        # %% Setup data used in the process() function.

        # Round the measurement period to the next multiple of the buffer period
        numMeanBlocks = int(
            np.ceil(
                (measurePeriod * context.samplingFrequency) / context.period))
        self.pastPower = np.zeros(numMeanBlocks, dtype=np.float32)

        # IIR filter state to be saved in betweem
        self.filterState = np.zeros((2, numberOfChannels, 2), dtype=np.float32)

        # IIR coefficients for K-weighting, taken from ITU-R BS.1770-4
        # https://www.itu.int/dms_pubrec/itu-r/rec/bs/R-REC-BS.1770-4-201510-I!!PDF-E.pdf
        self.Kweighting = np.asarray([[
            1.53512485958697, -2.69169618940638, 1.19839281085285, 1.0,
            -1.69065929318241, 0.73248077421585
        ], [1.0, -2.0, 1.0, 1.0, -1.99004745483398, 0.99007225036621]],
                                     dtype=np.float32)

        # Initialise weightings for the channels.
        # Use unit weighting if none are given
        if channelWeights is not None:
            self.channelWeights = np.asarray(channelWeights, dtype=np.float32)
            if self.channelWeights.shape[0] != numberOfChannels:
                raise ValueError(
                    "The channelWeights argument does not match the number of channels"
                )
        else:
            self.channelWeights = np.ones(numberOfChannels, dtype=np.float32)
Exemple #7
0
    def __init__( self, context, name, parent,
                 loudspeakerConfigFiles,
                 numberOfInputs,
                 numberOfOutputs,
                 interpolationPeriod,
                 diffusionFilterFile,
                 trajectoryPositions,
                 trajectoryUpdateSamples = None,
                 sourceLevel=1.0,
                 sourceObjectId = 0,
                 controlReceivePort=8889,
                 trackingConfiguration='',
                 ):
        super(RealTimeMultiRendererTrajectory,self).__init__( context, name, parent)
        self.input = visr.AudioInputFloat( "in", self, numberOfInputs )
        self.output = visr.AudioOutputFloat( "out", self, numberOfOutputs )

        rendererConfigs = []
        for cfgFile in loudspeakerConfigFiles:
            rendererConfigs.append( panning.LoudspeakerArray(cfgFile) )

        diffFilters = np.array(pml.MatrixParameterFloat.fromAudioFile( diffusionFilterFile ))

        if trajectoryUpdateSamples is None:
            trajectoryUpdateSamples = self.period()

        self.multiRenderer = MultiRenderer(context, name, self,
                                           loudspeakerConfigs=rendererConfigs,
                                           numberOfInputs=numberOfInputs,
                                           numberOfOutputs=numberOfOutputs,
                                           interpolationPeriod=trajectoryUpdateSamples,
                                           diffusionFilters=diffFilters,
                                           trackingConfiguration='',
                                           controlDataType=pml.Float)
        self.audioConnection( self.input, self.multiRenderer.audioPort("in" ) )
        self.audioConnection( self.multiRenderer.audioPort("out" ), self.output )

        self.sceneGenerator = PointSourceTrajectoryGenerator( context, "SceneDecoder", self,
                 positions=trajectoryPositions,
                 updateRateSamples = trajectoryUpdateSamples,
                 objectId = sourceObjectId,
                 groupId = 0,
                 priority = 0,
                 objectLevel = sourceLevel )

        self.parameterConnection( self.sceneGenerator.parameterPort( "objectVectorOutput"),
                                 self.multiRenderer.parameterPort( "objectIn" ) )

        self.controlReceiver = rcl.UdpReceiver( context, "ControlReceiver", self,
                                               port=controlReceivePort,
                                               mode=rcl.UdpReceiver.Mode.Asynchronous)
        self.controlDecoder = rcl.ScalarOscDecoder( context, "ControlDecoder", self )
        self.controlDecoder.setup(dataType='float')
        self.parameterConnection( self.controlReceiver.parameterPort("messageOutput"),
                                 self.controlDecoder.parameterPort("datagramInput") )
        self.parameterConnection( self.controlDecoder.parameterPort( "dataOut"),
                                 self.multiRenderer.parameterPort( "controlIn" ) )
Exemple #8
0
 def __init__(self,
              context,
              name,
              parent,
              loudspeakerConfig,
              numberOfInputs,
              rendererOutputs,
              interpolationPeriod,
              diffusionFilters,
              trackingConfiguration,
              brirRouting,
              brirFilters,
              scenePort=4242,
              reverbConfiguration=''):
     super(ReverbToBinaural, self).__init__(context, name, parent)
     self.coreRenderer = signalflows.BaselineRenderer(
         ctxt,
         'renderer',
         self,
         loudspeakerConfig=loudspeakerConfig,
         numberOfInputs=numberOfInputs,
         numberOfOutputs=rendererOutputs,
         interpolationPeriod=interpolationPeriod,
         diffusionFilters=diffusionFilters,
         reverbConfig=reverbConfiguration,
         sceneReceiverPort=scenePort,
         trackingConfiguration=trackingConfiguration)
     numFilters = brirFilters.numberOfRows
     firLength = brirFilters.numberOfColumns
     numRoutings = brirRouting.size
     self.convolver = rcl.FirFilterMatrix(
         ctxt,
         'convolver',
         self,
         numberOfInputs=rendererOutputs,
         numberOfOutputs=2,
         maxFilters=numFilters,
         filterLength=firLength,
         maxRoutings=numRoutings,
         filters=brirFilters,
         routings=brirRouting,
         controlInputs=rcl.FirFilterMatrix.ControlPortConfig.NoInputs)
     self.audioIn = visr.AudioInputFloat("audioIn", self, numberOfInputs)
     self.audioOut = visr.AudioOutputFloat("audioOut", self, 2)
     self.audioConnection(self.audioIn,
                          self.coreRenderer.audioPort("input"))
     self.audioConnection(self.coreRenderer.audioPort("output"),
                          self.convolver.audioPort("in"))
     self.audioConnection(self.convolver.audioPort("out"), self.audioOut)
     if len(trackingConfiguration) > 0:
         self.posIn = visr.ParameterInput(
             "posIn", self, pml.ListenerPosition.staticType,
             pml.DoubleBufferingProtocol.staticType,
             pml.EmptyParameterConfig())
         self.parameterConnection(
             self.posIn,
             self.coreRenderer.parameterPort("trackingPositionInput"))
    def __init__(self,
                 context,
                 name,
                 parent,
                 numberOfChannels,
                 dftLength,
                 hopSize,
                 window=None,
                 windowLength=None,
                 fftImplementation="default"):
        super(TFProcessing, self).__init__(context, name, parent)
        if not (window is None):
            self.ForwardTransform = rcl.TimeFrequencyTransform(
                context,
                "FwdTransform",
                self,
                numberOfChannels=numberOfChannels,
                dftLength=dftLength,
                window=window,
                hopSize=hopSize,
                fftImplementation=fftImplementation)
        elif not (windowLength is None):
            self.ForwardTransform = rcl.TimeFrequencyTransform(
                context,
                "FwdTransform",
                self,
                numberOfChannels=numberOfChannels,
                dftLength=dftLength,
                windowLength=windowLength,
                hopSize=hopSize,
                fftImplementation=fftImplementation)
        else:
            raise ValueError(
                "One of the arguments 'window' or 'windowLength' must be given."
            )

        self.InverseTransform = rcl.TimeFrequencyInverseTransform(
            context,
            "InverseTransform",
            self,
            numberOfChannels=numberOfChannels,
            dftLength=dftLength,
            hopSize=hopSize,
            fftImplementation=fftImplementation)
        self.audioIn = visr.AudioInputFloat("in", self, numberOfChannels)
        self.audioOut = visr.AudioOutputFloat("out", self, numberOfChannels)
        self.audioConnection(self.audioIn,
                             self.ForwardTransform.audioPort("in"))
        self.audioConnection(self.InverseTransform.audioPort("out"),
                             self.audioOut)
        self.parameterConnection(self.ForwardTransform.parameterPort("out"),
                                 self.InverseTransform.parameterPort("in"))
Exemple #10
0
    def __init__(self, context, name, parent, numberOfObjects, lspConfig):
        """
        Constructor, instantiates the component, all contained sub-components,
        and their connections.

        Parameters
        ----------

        self: VbapRenderer
            self argument, mandatory for Python methods.
        context: visr.SignalFlowContext
            A context object containing the sampling frequency and the block size.
            That is a mandatory parameter for VISR components.
        name: string
            Name of the component to be identified within a containing component.
        parent: visr.Compositcomponent
            A containing component, or None if this is the top-level component.
        numberOfObjects: int
            The maximum number of objects to be rendered.
        lspConfig: panning.LoudspeakerArray
            Object containing the loudspeaker positions.
        """
        numLsp = lspConfig.numberOfRegularLoudspeakers
        super().__init__(context, name, parent)
        self.audioIn = visr.AudioInputFloat("in", self, numberOfObjects)
        self.audioOut = visr.AudioOutputFloat("out", self, numLsp)
        self.objectIn = visr.ParameterInput(
            "objects", self, pml.ObjectVector.staticType,
            pml.DoubleBufferingProtocol.staticType, pml.EmptyParameterConfig())
        self.calculator = rcl.PanningCalculator(context, "VbapGainCalculator",
                                                self, numberOfObjects,
                                                lspConfig)
        self.matrix = rcl.GainMatrix(context,
                                     "GainMatrix",
                                     self,
                                     numberOfObjects,
                                     numLsp,
                                     interpolationSteps=context.period,
                                     initialGains=0.0)
        # Uncomment this and comment the lines above to use the simple, Python-based
        # GainMatrix class instead.
        #        self.matrix = GainMatrix( context, "GainMatrix", self, numberOfObjects,
        #                                  numLsp )
        self.audioConnection(self.audioIn, self.matrix.audioPort("in"))
        self.audioConnection(self.matrix.audioPort("out"), self.audioOut)
        self.parameterConnection(
            self.objectIn, self.calculator.parameterPort("objectVectorInput"))
        self.parameterConnection(self.calculator.parameterPort("vbapGains"),
                                 self.matrix.parameterPort("gainInput"))
Exemple #11
0
    def __init__(self, context, name, parent, numberOfObjects, lspConfig,
                 nwPort):
        """
        Constructor, instantiates the component, all contained sub-components,
        and their connections.

        Parameters
        ----------

        self: VbapRenderer
            self argument, mandatory for Python methods.
        context: visr.SignalFlowContext
            A context object containing the sampling frequency and the block size.
            That is a mandatory parameter for VISR components.
        name: string
            Name of the component to be identified within a containing component.
        parent: visr.Compositcomponent
            A containing component, or None if this is the top-level component.
        numberOfObjects: int
            The maximum number of objects to be rendered.
        lspConfig: panning.LoudspeakerArray
            Object containing the loudspeaker positions.
        nwPort: int
            Port number of a UDP connection to receive object metadata messages.
        """
        super().__init__(context, name, parent)
        if not isinstance(lspConfig, panning.LoudspeakerArray):
            lspConfig = panning.LoudspeakerArray(lspConfig)
        self.audioIn = visr.AudioInputFloat("in", self, numberOfObjects)
        self.audioOut = visr.AudioOutputFloat(
            "out", self, lspConfig.numberOfRegularLoudspeakers)
        self.receiver = rcl.UdpReceiver(context,
                                        "NetworkReceiver",
                                        self,
                                        port=nwPort)
        self.decoder = rcl.SceneDecoder(context, "SceneDecoder", self)
        self.panner = VbapRenderer(context, "VbapPanner", self,
                                   numberOfObjects, lspConfig)
        self.audioConnection(self.audioIn, self.panner.audioPort("in"))
        self.audioConnection(self.panner.audioPort("out"), self.audioOut)
        self.parameterConnection(self.receiver.parameterPort("messageOutput"),
                                 self.decoder.parameterPort("datagramInput"))
        self.parameterConnection(
            self.decoder.parameterPort("objectVectorOutput"),
            self.panner.parameterPort("objects"))
    def __init__(self, context, name, parent, numberOfInputs, numberOfOutputs,
                 arrayConfig, interpolationSteps):
        super(StandardVbap, self).__init__(context, name, parent)
        self.input = visr.AudioInputFloat("in", self, numberOfInputs)
        self.output = visr.AudioOutputFloat("out", self, numberOfOutputs)

        self.objectInput = visr.ParameterInput(
            "objectVectorInput",
            self,
            parameterType=pml.ObjectVector.staticType,
            protocolType=pml.DoubleBufferingProtocol.staticType,
            parameterConfig=pml.EmptyParameterConfig())
        self.panningCalculator = rcl.PanningCalculator(
            context,
            "GainCalculator",
            self,
            arrayConfig=arrayConfig,
            numberOfObjects=numberOfInputs,
            separateLowpassPanning=False)

        self.panningMatrix = rcl.GainMatrix(
            context,
            "PanningMatrix",
            self,
            numberOfInputs=numberOfInputs,
            numberOfOutputs=numberOfOutputs,
            interpolationSteps=interpolationSteps,
            initialGains=0.0,
            controlInput=True)

        self.audioConnection(self.input, self.panningMatrix.audioPort("in"))
        self.audioConnection(self.panningMatrix.audioPort("out"), self.output)

        self.parameterConnection(
            self.objectInput,
            self.panningCalculator.parameterPort("objectVectorInput"))
        self.parameterConnection(
            self.panningCalculator.parameterPort("gainOutput"),
            self.panningMatrix.parameterPort("gainInput"))
Exemple #13
0
 def __init__(self, context, name, parent, numberOfObjects, lspArray,
              nwPort):
     super().__init__(context, name, parent)
     if not isinstance(lspArray, panning.LoudspeakerArray):
         lspArray = panning.LoudspeakerArray(lspArray)
     self.audioIn = visr.AudioInputFloat("in", self, numberOfObjects)
     self.audioOut = visr.AudioOutputFloat(
         "out", self, lspArray.numberOfRegularLoudspeakers)
     self.receiver = rcl.UdpReceiver(context,
                                     "NetworkReceiver",
                                     self,
                                     port=nwPort)
     self.decoder = rcl.SceneDecoder(context, "SceneDecoder", self)
     self.panner = VbapL2Renderer(context, "VbapPanner", self,
                                  numberOfObjects, lspArray)
     self.audioConnection(self.audioIn, self.panner.audioPort("in"))
     self.audioConnection(self.panner.audioPort("out"), self.audioOut)
     self.parameterConnection(self.receiver.parameterPort("messageOutput"),
                              self.decoder.parameterPort("datagramInput"))
     self.parameterConnection(
         self.decoder.parameterPort("objectVectorOutput"),
         self.panner.parameterPort("objects"))
Exemple #14
0
 def __init__(self, context, name, parent, numberOfObjects, lspArray):
     numLsp = lspArray.numberOfRegularLoudspeakers
     super().__init__(context, name, parent)
     self.audioIn = visr.AudioInputFloat("in", self, numberOfObjects)
     self.audioOut = visr.AudioOutputFloat("out", self, numLsp)
     self.objectIn = visr.ParameterInput(
         "objects", self, pml.ObjectVector.staticType,
         pml.DoubleBufferingProtocol.staticType, pml.EmptyParameterConfig())
     self.calculator = VbapL2Panner(context, "VbapGainCalculator", self,
                                    numberOfObjects, lspArray)
     self.matrix = rcl.GainMatrix(context,
                                  "GainMatrix",
                                  self,
                                  numberOfObjects,
                                  numLsp,
                                  interpolationSteps=context.period,
                                  initialGains=0.0)
     self.audioConnection(self.audioIn, self.matrix.audioPort("in"))
     self.audioConnection(self.matrix.audioPort("out"), self.audioOut)
     self.parameterConnection(self.objectIn,
                              self.calculator.parameterPort("objects"))
     self.parameterConnection(self.calculator.parameterPort("gains"),
                              self.matrix.parameterPort("gainInput"))
    def __init__(self,
                 context,
                 name,
                 parent,
                 loudspeakerConfig,
                 numberOfInputs,
                 numberOfOutputs,
                 interpolationSteps=None,
                 controlDataType=pml.Float):
        super(PanningComparison, self).__init__(context, name, parent)

        if interpolationSteps is None:
            interpolationSteps = context.period

        numberOfRenderers = 2

        self.input = visr.AudioInputFloat("in", self, numberOfInputs)
        self.output = visr.AudioOutputFloat("out", self, numberOfOutputs)
        self.objectInput = visr.ParameterInput(
            "objectIn",
            self,
            protocolType=pml.DoubleBufferingProtocol.staticType,
            parameterType=pml.ObjectVector.staticType,
            parameterConfig=pml.EmptyParameterConfig())
        self.controlInput = visr.ParameterInput(
            "controlIn",
            self,
            protocolType=pml.MessageQueueProtocol.staticType,
            parameterType=controlDataType.staticType,
            parameterConfig=pml.EmptyParameterConfig())

        self.signalSwitch = AudioSignalSwitch(context,
                                              "OutputSwitch",
                                              self,
                                              numberOfChannels=numberOfOutputs,
                                              numberOfInputs=numberOfRenderers,
                                              controlDataType=controlDataType)
        self.parameterConnection(self.controlInput,
                                 self.signalSwitch.parameterPort('controlIn'))
        self.audioConnection(self.signalSwitch.audioPort('out'), self.output)

        self.standardRenderer = StandardVbap(
            context,
            "StandardVbap",
            self,
            arrayConfig=loudspeakerConfig,
            numberOfInputs=numberOfInputs,
            numberOfOutputs=numberOfOutputs,
            interpolationSteps=interpolationSteps)

        self.lfHfRenderer = LfHfVbap(context,
                                     "LfHfVbap",
                                     self,
                                     arrayConfig=loudspeakerConfig,
                                     numberOfInputs=numberOfInputs,
                                     numberOfOutputs=numberOfOutputs,
                                     interpolationSteps=interpolationSteps)
        # Use default filters for the moment
        self.audioConnection(self.input, self.standardRenderer.audioPort('in'))
        self.audioConnection(self.standardRenderer.audioPort('out'),
                             self.signalSwitch.audioPort('in_0'))
        self.parameterConnection(
            self.objectInput,
            self.standardRenderer.parameterPort('objectVectorInput'))

        self.audioConnection(self.input, self.lfHfRenderer.audioPort('in'))
        self.audioConnection(self.lfHfRenderer.audioPort('out'),
                             self.signalSwitch.audioPort('in_1'))
        self.parameterConnection(
            self.objectInput,
            self.lfHfRenderer.parameterPort('objectVectorInput'))
Exemple #16
0
    def __init__(
            self,
            context,
            name,
            parent,
            *,  # Only keyword arguments after this point
            numberOfObjects,
            maxHoaOrder,
            sofaFile=None,
            decodingFilters=None,
            interpolationSteps=None,
            headTracking=True,
            headOrientation=None,
            objectChannelAllocation=False,
            fftImplementation="default",
            headTrackingReceiver=None,
            headTrackingPositionalArguments=None,
            headTrackingKeywordArguments=None,
            sceneReceiveUdpPort=None):
        """
        Constructor.

        Parameters
        ----------
        context : visr.SignalFlowContext
            Standard visr.Component construction argument, holds the block size and the sampling frequency
        name : string
            Name of the component, Standard visr.Component construction argument
        parent : visr.CompositeComponent
            Containing component if there is one, None if this is a top-level component of the signal flow.
        numberOfObjects : int
            The number of audio objects to be rendered.
        maxHoaOrder: int
            HOA order used for encoding the point source and plane wave objects.
        sofaFile: string, optional
            A SOFA file containing the HOA decoding filters. These are expects as a
            2 x (maxHoaIrder+1)^2 array in the field Data.IR
        decodingFilters : numpy.ndarray, optional
            Alternative way to provide the HOA decoding filters. Expects a
            2 x (maxHoaIrder+1)^2 matrix containing FIR coefficients.
        interpolationSteps: int, optional
           Number of samples to transition to new object positions after an update.
        headOrientation : array-like
            Head orientation in spherical coordinates (2- or 3-element vector or list). Either a static orientation (when no tracking is used),
            or the initial view direction
        headTracking: bool
            Whether dynamic head tracking is active.
        objectChannelAllocation: bool
            Whether the processing resources are allocated from a pool of resources
            (True), or whether fixed processing resources statically tied to the audio signal channels are used.
            Not implemented at the moment, so leave the default value (False).
        fftImplementation: string, optional
            The FFT implementation to use. Default value enables VISR's default
            FFT library for the platform.
        headTrackingReceiver: class type, optional
            Class of the head tracking recveiver, None (default value) disables dynamic head tracking.
        headTrackingPositionalArguments: tuple optional
            Positional arguments passed to the constructor of the head tracking receiver object.
            Must be a tuple. If there is only a single argument, a trailing comma must be added.
        headTrackingKeywordArguments: dict, optional
            Keyword arguments passed to the constructor of the head tracking receiver. Must be a dictionary (dict)
        sceneReceiveUdpPort: int, optional
            A UDP port number where scene object metadata (in the S3A JSON format) is to be received).
            If not given (default), no network receiver is instantiated, and the object exposes a
            top-level parameter input port "objectVectorInput"
        """
        super(RealtimeHoaObjectToBinauralRenderer,
              self).__init__(context, name, parent)
        self.objectSignalInput = visr.AudioInputFloat("audioIn", self,
                                                      numberOfObjects)
        self.binauralOutput = visr.AudioOutputFloat("audioOut", self, 2)

        enableTracking = (headTrackingReceiver is not None)

        self.hoaBinauralRenderer = HoaObjectToBinauralRenderer(
            context,
            "HoaBinauralRenderer",
            self,
            numberOfObjects=numberOfObjects,
            maxHoaOrder=maxHoaOrder,
            sofaFile=sofaFile,
            decodingFilters=decodingFilters,
            interpolationSteps=interpolationSteps,
            headTracking=headTracking,
            headOrientation=headOrientation,
            fftImplementation=fftImplementation)

        if sceneReceiveUdpPort is None:
            self.objectVectorInput = visr.ParameterInput(
                "objectVector", self, pml.ObjectVector.staticType,
                pml.DoubleBufferingProtocol.staticType,
                pml.EmptyParameterConfig())
            self.parameterConnection(
                self.objectVectorInput,
                self.hoaBinauralRenderer.parameterPort("objects"))
        else:
            self.sceneReceiver = rcl.UdpReceiver(context,
                                                 "SceneReceiver",
                                                 self,
                                                 port=int(sceneReceiveUdpPort))
            self.sceneDecoder = rcl.SceneDecoder(context, "SceneDecoder", self)
            self.parameterConnection(
                self.sceneReceiver.parameterPort("messageOutput"),
                self.sceneDecoder.parameterPort("datagramInput"))
            self.parameterConnection(
                self.sceneDecoder.parameterPort("objectVectorOutput"),
                self.hoaBinauralRenderer.parameterPort("objects"))

        if enableTracking:
            if headTrackingPositionalArguments == None:
                headTrackingPositionalArguments = ()
            if headTrackingKeywordArguments == None:
                headTrackingKeywordArguments = {}
            self.trackingDevice = headTrackingReceiver(
                context, "HeadTrackingReceiver", self,
                *headTrackingPositionalArguments,
                **headTrackingKeywordArguments)
            self.parameterConnection(
                self.trackingDevice.parameterPort("orientation"),
                self.hoaBinauralRenderer.parameterPort("tracking"))

        self.audioConnection(self.objectSignalInput,
                             self.hoaBinauralRenderer.audioPort("audioIn"))
        self.audioConnection(self.hoaBinauralRenderer.audioPort("audioOut"),
                             self.binauralOutput)
Exemple #17
0
    def __init__(
            self,
            context,
            name,
            parent,
            *,  # No positional arguments past this point.
            sofaFile=None,
            hrirPositions=None,
            hrirData=None,
            hrirDelays=None,
            headOrientation=None,
            dynamicITD=True,
            hrirInterpolation=True,
            irTruncationLength=None,
            filterCrossfading=False,
            interpolatingConvolver=False,
            staticLateSofaFile=None,
            staticLateFilters=None,
            staticLateDelays=None,
            headTrackingReceiver=None,
            headTrackingPositionalArguments=None,
            headTrackingKeywordArguments=None,
            fftImplementation='default'):
        """
        Constructor.

        Parameters
        ----------
        context : visr.SignalFlowContext
            Standard visr.Component construction argument, a structure holding the block size and the sampling frequency
        name : string
            Name of the component, Standard visr.Component construction argument
        parent : visr.CompositeComponent
            Containing component if there is one, None if this is a top-level component of the signal flow.
        sofaFile: string
            BRIR database provided as a SOFA file. This is an alternative to the hrirPosition, hrirData
            (and optionally hrirDelays) argument. Default None means that hrirData and hrirPosition must be provided.
        hrirPositions : numpy.ndarray
            Optional way to provide the measurement grid for the BRIR listener view directions. If a
            SOFA file is provided, this is optional and overrides the listener view data in the file.
            Otherwise this argument is mandatory. Dimension #grid directions x (dimension of position argument)
        hrirData: numpy.ndarray
            Optional way to provide the BRIR data. Dimension: #grid directions  x #ears (2) # x #loudspeakers x #ir length
        hrirDelays: numpy.ndarray
            Optional BRIR delays. If a SOFA file is given, this  argument overrides a potential delay setting from the file. Otherwise, no extra delays
            are applied unless this option is provided. Dimension: #grid directions  x #ears(2) x # loudspeakers
        headOrientation : array-like
            Head orientation in spherical coordinates (2- or 3-element vector or list). Either a static orientation (when no tracking is used),
            or the initial view direction
        headTracking: bool
            Whether dynamic headTracking is active. If True, an control input "tracking" is created.
        dynamicITD: bool
            Whether the delay part of th BRIRs is applied separately to the (delay-free) BRIRs.
        hrirInterpolation: bool
            Whether BRIRs are interpolated for the current head oriention. If False, a nearest-neighbour interpolation is used.
        irTruncationLength: int
            Maximum number of samples of the BRIR impulse responses. Functional only if the BRIR is provided in a SOFA file.
        filterCrossfading: bool
            Whether dynamic BRIR changes are crossfaded (True) or switched immediately (False)
        interpolatingConvolver: bool
            Whether the interpolating convolver option is used. If True, the convolver stores all BRIR filters, and the controller sends only
            interpolation coefficient messages to select the BRIR filters and their interpolation ratios.
        staticLateSofaFile: string, optional
            Name of a file containing a static (i.e., head orientation-independent) late part of the BRIRs.
            Optional argument, might be used as an alternative to the staticLateFilters argument, but these options are mutually exclusive.
            If neither is given, no static late part is used. The fields 'Data.IR' and the 'Data.Delay' are used.
        staticLateFilters: numpy.ndarray, optional
            Matrix containing a static, head position-independent part of the BRIRs. This option is mutually exclusive to
            staticLateSofaFile. If none of these is given, no separate static late part  is rendered.
            Dimension: 2 x #numberOfLoudspeakers x firLength
        staticLateDelays: numpy.ndarray, optional
            Time delay of the late static BRIRs per loudspeaker. Optional attribute,
            only used if late static BRIR coefficients are provided.
            Dimension: 2 x #loudspeakers
        fftImplementation: string
            The FFT implementation to be used in the convolver. the default value selects the system default.
        """

        super(RealtimeVirtualLoudspeakerRenderer,
              self).__init__(context, name, parent)

        # Handle loading of HRIR data from either a SOFA file or the the matrix arguments.
        if (hrirData is not None) and (sofaFile is not None):
            raise ValueError(
                "Exactly one of the arguments sofaFile and hrirData must be present."
            )
        if sofaFile is not None:
            [sofaHrirPositions, hrirData, sofaHrirDelays
             ] = readSofaFile(sofaFile,
                              truncationLength=irTruncationLength,
                              truncationWindowLength=16)
            # If hrirDelays is not provided as an argument, use the one retrieved from the SOFA file
            if hrirDelays is None:
                hrirDelays = sofaHrirDelays
            # Use the positions obtained from the SOFA file only if the argument is not set
            if hrirPositions is None:
                hrirPositions = sofaHrirPositions

        # Crude check for 'horizontal-only' listener view directions
        if np.max(np.abs(hrirPositions[:, 1])) < deg2rad(1):
            hrirPositions = hrirPositions[:,
                                          [0, 2
                                           ]]  # transform to polar coordinates

        numberOfLoudspeakers = hrirData.shape[2]

        self.objectSignalInput = visr.AudioInputFloat("audioIn", self,
                                                      numberOfLoudspeakers)
        self.binauralOutput = visr.AudioOutputFloat("audioOut", self, 2)

        enableTracking = (headTrackingReceiver is not None)

        self.virtualLoudspeakerRenderer = VirtualLoudspeakerRenderer(
            context,
            "VirtualLoudspeakerRenderer",
            self,
            hrirPositions=hrirPositions,
            hrirData=hrirData,
            hrirDelays=hrirDelays,
            headOrientation=None,
            headTracking=enableTracking,
            dynamicITD=dynamicITD,
            hrirInterpolation=hrirInterpolation,
            irTruncationLength=irTruncationLength,
            filterCrossfading=filterCrossfading,
            interpolatingConvolver=interpolatingConvolver,
            staticLateSofaFile=staticLateSofaFile,
            staticLateFilters=staticLateFilters,
            staticLateDelays=staticLateDelays,
            fftImplementation=fftImplementation)
        if enableTracking:
            if headTrackingPositionalArguments == None:
                headTrackingPositionalArguments = ()
            if headTrackingKeywordArguments == None:
                headTrackingKeywordArguments = {}
            self.trackingDevice = headTrackingReceiver(
                context, "HeadTrackingReceiver", self,
                *headTrackingPositionalArguments,
                **headTrackingKeywordArguments)
            self.parameterConnection(
                self.trackingDevice.parameterPort("orientation"),
                self.virtualLoudspeakerRenderer.parameterPort("tracking"))

        self.audioConnection(
            self.objectSignalInput,
            self.virtualLoudspeakerRenderer.audioPort("audioIn"))
        self.audioConnection(
            self.virtualLoudspeakerRenderer.audioPort("audioOut"),
            self.binauralOutput)
    def __init__(self,
                 context,
                 name,
                 parent,
                 numberOfObjects,
                 port,
                 baud,
                 sofaFile,
                 enableSerial=True,
                 dynamicITD=True,
                 dynamicILD=True,
                 hrirInterpolation=False,
                 udpReceivePort=4242,
                 headTrackingCalibrationPort=None,
                 filterCrossfading=False):
        super(DynamicBinauralRendererSAW, self).__init__(context, name, parent)
        self.dynamicBinauralRenderer = DynamicBinauralRendererSerial(
            context,
            "DynamicBinauralRenderer",
            self,
            numberOfObjects,
            port,
            baud,
            sofaFile,
            enableSerial=enableSerial,
            dynITD=dynamicITD,
            dynILD=dynamicILD,
            hrirInterp=hrirInterpolation,
            headTrackingCalibrationPort=headTrackingCalibrationPort,
            filterCrossfading=filterCrossfading)

        self.sceneReceiver = rcl.UdpReceiver(
            context,
            "SceneReceiver",
            self,
            port=udpReceivePort,
            mode=rcl.UdpReceiver.Mode.Asynchronous)
        self.sceneDecoder = rcl.SceneDecoder(context, "SceneDecoder", self)
        self.parameterConnection(
            self.sceneReceiver.parameterPort("messageOutput"),
            self.sceneDecoder.parameterPort("datagramInput"))
        self.parameterConnection(
            self.sceneDecoder.parameterPort("objectVectorOutput"),
            self.dynamicBinauralRenderer.parameterPort("objectVector"))

        self.objectSignalInput = visr.AudioInputFloat("audioIn", self,
                                                      numberOfObjects)
        self.binauralOutput = visr.AudioOutputFloat("audioOut", self, 2)

        self.audioConnection(self.objectSignalInput,
                             self.dynamicBinauralRenderer.audioPort("audioIn"))
        self.audioConnection(
            self.dynamicBinauralRenderer.audioPort("audioOut"),
            self.binauralOutput)

        if headTrackingCalibrationPort is not None:
            self.calibrationTriggerReceiver = rcl.UdpReceiver(
                context,
                "CalibrationTriggerReceiver",
                self,
                port=headTrackingCalibrationPort)
            self.parameterConnection(
                self.calibrationTriggerReceiver.parameterPort("messageOutput"),
                self.dynamicBinauralRenderer.parameterPort(
                    "headTrackingCalibration"))
    def __init__(self,
                 context,
                 name,
                 parent,
                 numberOfInputs,
                 numberOfOutputs,
                 arrayConfig,
                 interpolationSteps,
                 lfFilter=rbbl.BiquadCoefficientFloat(0.001921697757295,
                                                      0.003843395514590,
                                                      0.001921697757295,
                                                      -1.824651307057289,
                                                      0.832338098086468),
                 hfFilter=rbbl.BiquadCoefficientFloat(0.914247351285939,
                                                      1.828494702571878,
                                                      -0.914247351285939,
                                                      -1.824651307057289,
                                                      0.832338098086468)):
        super(LfHfVbap, self).__init__(context, name, parent)
        self.input = visr.AudioInputFloat("in", self, numberOfInputs)
        self.output = visr.AudioOutputFloat("out", self, numberOfOutputs)

        self.objectInput = visr.ParameterInput(
            "objectVectorInput",
            self,
            parameterType=pml.ObjectVector.staticType,
            protocolType=pml.DoubleBufferingProtocol.staticType,
            parameterConfig=pml.EmptyParameterConfig())

        self.panningCalculator = rcl.PanningCalculator(
            context,
            "GainCalculator",
            self,
            arrayConfig=arrayConfig,
            numberOfObjects=numberOfInputs,
            separateLowpassPanning=True)

        filterMtx = rbbl.BiquadCoefficientMatrixFloat(2 * numberOfInputs, 1)
        for idx in range(0, numberOfInputs):
            filterMtx[idx, 0] = lfFilter
            filterMtx[idx + numberOfInputs, 0] = hfFilter

        self.filterBank = rcl.BiquadIirFilter(
            context,
            "filterBank",
            self,
            numberOfChannels=2 * numberOfInputs,
            numberOfBiquads=1,  # TODO: allow variable number of sections.
            initialBiquads=filterMtx,
            controlInput=False)
        self.audioConnection(
            self.input,
            [i % numberOfInputs for i in range(0, 2 * numberOfInputs)],
            self.filterBank.audioPort("in"), range(0, 2 * numberOfInputs))

        self.lfMatrix = rcl.GainMatrix(context,
                                       "LfPanningMatrix",
                                       self,
                                       numberOfInputs=numberOfInputs,
                                       numberOfOutputs=numberOfOutputs,
                                       interpolationSteps=interpolationSteps,
                                       initialGains=0.0,
                                       controlInput=True)
        self.audioConnection(self.filterBank.audioPort("out"),
                             range(0, numberOfInputs),
                             self.lfMatrix.audioPort("in"),
                             range(0, numberOfInputs))

        self.hfMatrix = rcl.GainMatrix(context,
                                       "HfPanningMatrix",
                                       self,
                                       numberOfInputs=numberOfInputs,
                                       numberOfOutputs=numberOfOutputs,
                                       interpolationSteps=interpolationSteps,
                                       initialGains=0.0,
                                       controlInput=True)
        self.audioConnection(self.filterBank.audioPort("out"),
                             range(numberOfInputs, 2 * numberOfInputs),
                             self.hfMatrix.audioPort("in"),
                             range(0, numberOfInputs))

        self.signalMix = rcl.Add(context,
                                 "SignalMix",
                                 self,
                                 numInputs=2,
                                 width=numberOfOutputs)
        self.audioConnection(self.signalMix.audioPort("out"), self.output)

        self.audioConnection(self.lfMatrix.audioPort("out"),
                             self.signalMix.audioPort("in0"))
        self.audioConnection(self.hfMatrix.audioPort("out"),
                             self.signalMix.audioPort("in1"))

        self.parameterConnection(
            self.objectInput,
            self.panningCalculator.parameterPort("objectVectorInput"))
        self.parameterConnection(
            self.panningCalculator.parameterPort("lowFrequencyGainOutput"),
            self.lfMatrix.parameterPort("gainInput"))
        self.parameterConnection(
            self.panningCalculator.parameterPort("gainOutput"),
            self.hfMatrix.parameterPort("gainInput"))
    def __init__( self,
                 context, name, parent,
                 *,
                 numberOfObjects,
                 sofaFile = None,
                 hrirPositions = None,
                 hrirData = None,
                 hrirDelays = None,
                 headOrientation = None,
                 dynamicITD = False,
                 dynamicILD = False,
                 hrirInterpolation = False,
                 filterCrossfading = False,
                 fftImplementation = "default",
                 headTrackingReceiver = None,
                 headTrackingPositionalArguments = None,
                 headTrackingKeywordArguments = None,
                 sceneReceiveUdpPort = None
                 ):
        """
        Constructor.

        Parameters
        ----------
        context : visr.SignalFlowContext
            Standard visr.Component construction argument, holds the block size and the sampling frequency
        name : string
            Name of the component, Standard visr.Component construction argument
        parent : visr.CompositeComponent
            Containing component if there is one, None if this is a top-level component of the signal flow.
        numberOfObjects: int
            Maximum number of audio objects
        sofaFile: str, optional
            Optional SOFA for loading loaded the HRIR and associated data (HRIR measurement positions and delays)
            If not provided, the information must be provided by the hrirPositions and hrirData arguments.
        hrirPositions: numpy.ndarray, optional
            Optional way to provide the measurement grid for the BRIR listener view directions.
            If a SOFA file is provided, this is optional and overrides the listener view data
            in the file. Otherwise this argument is mandatory.
            Dimension #grid directions x (dimension of position argument)
        hrirData: numpy.ndarray, optional
            Optional way to provide the BRIR data.
            Dimension: #grid directions  x #ears (2) # x #loudspeakers x #ir length
        hrirDelays: numpy.ndarray, optional
            Optional BRIR delays. If a SOFA file is given, this  argument overrides
            a potential delay setting from the file. Otherwise, no extra delays
            are applied unless this option is provided.
            Dimension: #grid directions  x #ears(2) x # loudspeakers
        headOrientation: array-like, optional
            Head orientation in spherical coordinates (2- or 3-element vector or list).
            Either a static orientation (when no tracking is used), or the
            initial view direction
        dynamicITD: bool, optional
            Whether the ITD is applied separately. That requires preprocessed HRIR data
        dynamicILD: bool, optional
            Whether the ILD is computed and applied separately. At the moment this feature is not used (apart from applying the object gains)
        hrirInterpolation: bool, optional
            Whether the controller supports interpolation between neighbouring HRTF grid
            points. False means nearest neighbour (no interpolation), True
            enables barycentric interpolation.
        filterCrossfading: bool, optional
            Use a crossfading FIR filter matrix to avoid switching artifacts.
        fftImplementation: string, optional
            The FFT implementation to use. Default value enables VISR's default
            FFT library for the platform.
        headTrackingReceiver: class type, optional
            Class of the head tracking recveiver, None (default value) disables dynamic head tracking.
        headTrackingPositionalArguments: tuple optional
            Positional arguments passed to the constructor of the head tracking receiver object.
            Must be a tuple. If there is only a single argument, a trailing comma must be added.
        headTrackingKeywordArguments: dict, optional
            Keyword arguments passed to the constructor of the head tracking receiver. Must be a dictionary (dict)
        sceneReceiveUdpPort: int, optional
            A UDP port number where scene object metadata (in the S3A JSON format) is to be received).
            If not given (default), no network receiver is instantiated, and the object exposes a
            top-level parameter input port "objectVectorInput"
        """
        super( RealtimeDynamicHrirRenderer, self ).__init__( context, name, parent )
        self.objectSignalInput = visr.AudioInputFloat( "audioIn", self, numberOfObjects )
        self.binauralOutput = visr.AudioOutputFloat( "audioOut", self, 2 )

        enableTracking = (headTrackingReceiver is not None)

        # Handle loading of HRIR data from either a SOFA file or the the matrix arguments.
        if (hrirData is not None) == (sofaFile is not None):
            raise ValueError( "Exactly one of the arguments sofaFile and hrirData must be present." )
        if sofaFile is not None:
            [ sofaHrirPositions, hrirData, sofaHrirDelays ] = readSofaFile( sofaFile )
            # If hrirDelays is not provided as an argument, use the one retrieved from the SOFA file
            if hrirDelays is None:
                hrirDelays = sofaHrirDelays
            # Use the positions obtained from the SOFA file only if the argument is not set
            if hrirPositions is None:
                hrirPositions = sofaHrirPositions

        self.dynamicHrirRenderer = DynamicHrirRenderer( context, "DynamicBinauralRenderer", self,
                                                       numberOfObjects = numberOfObjects,
                                                       hrirPositions = hrirPositions,
                                                       hrirData = hrirData,
                                                       hrirDelays = hrirDelays,
                                                       headOrientation = headOrientation,
                                                       headTracking = enableTracking,
                                                       dynamicITD = dynamicITD,
                                                       dynamicILD = dynamicILD,
                                                       hrirInterpolation = hrirInterpolation,
                                                       filterCrossfading = filterCrossfading,
                                                       fftImplementation = fftImplementation
                                                       )

        if sceneReceiveUdpPort is None:
            self.objectVectorInput = visr.ParameterInput( "objectVector", self, pml.ObjectVector.staticType,
                                                         pml.DoubleBufferingProtocol.staticType,
                                                         pml.EmptyParameterConfig() )
            self.parameterConnection( self.objectVectorInput,
                                     self.dynamicHrirRenderer.parameterPort("objectVector"))

        else:
            self.sceneReceiver = rcl.UdpReceiver( context, "SceneReceiver", self,
                                                 port = int(sceneReceiveUdpPort) )
            self.sceneDecoder = rcl.SceneDecoder( context, "SceneDecoder", self )
            self.parameterConnection( self.sceneReceiver.parameterPort("messageOutput"),
                                 self.sceneDecoder.parameterPort("datagramInput") )
            self.parameterConnection( self.sceneDecoder.parameterPort( "objectVectorOutput"),
                                 self.dynamicHrirRenderer.parameterPort("objectVector"))
        if enableTracking:
            if headTrackingPositionalArguments == None:
                headTrackingPositionalArguments = ()
            if headTrackingKeywordArguments == None:
                headTrackingKeywordArguments = {}
            self.trackingDevice = headTrackingReceiver(context, "HeadTrackingReceiver", self,
                                                *headTrackingPositionalArguments,
                                                **headTrackingKeywordArguments )
            self.parameterConnection( self.trackingDevice.parameterPort("orientation"), self.dynamicHrirRenderer.parameterPort("tracking"))

        self.audioConnection(  self.objectSignalInput, self.dynamicHrirRenderer.audioPort("audioIn"))
        self.audioConnection( self.dynamicHrirRenderer.audioPort("audioOut"), self.binauralOutput)
    def __init__(
        self,
        context,
        name,
        parent,
        loudspeakerConfig,
        numberOfInputs,
        numberOfOutputs,
        trajectoryPositions,
        trajectoryUpdateSamples=None,
        sourceLevel=1.0,
        sourceObjectId=0,
        controlDataType=pml.Float,
        controlReceivePort=8889,
    ):

        if trajectoryUpdateSamples is None:
            trajectoryUpdateSamples = context.period

        super(RealTimePanningComparisonTrajectory,
              self).__init__(context, name, parent)
        self.input = visr.AudioInputFloat("in", self, numberOfInputs)
        self.output = visr.AudioOutputFloat("out", self, numberOfOutputs)

        self.multiRenderer = PanningComparison(
            context,
            "MultiRenderer",
            self,
            loudspeakerConfig=loudspeakerConfig,
            numberOfInputs=numberOfInputs,
            numberOfOutputs=numberOfOutputs,
            interpolationSteps=trajectoryUpdateSamples,
            controlDataType=pml.Float)
        self.audioConnection(self.input, self.multiRenderer.audioPort("in"))
        self.audioConnection(self.multiRenderer.audioPort("out"), self.output)

        self.sceneGenerator = PointSourceTrajectoryGenerator(
            context,
            "SceneDecoder",
            self,
            positions=trajectoryPositions,
            updateRateSamples=trajectoryUpdateSamples,
            objectId=sourceObjectId,
            groupId=0,
            priority=0,
            objectLevel=sourceLevel)

        self.parameterConnection(
            self.sceneGenerator.parameterPort("objectVectorOutput"),
            self.multiRenderer.parameterPort("objectIn"))

        self.controlReceiver = rcl.UdpReceiver(
            context,
            "ControlReceiver",
            self,
            port=controlReceivePort,
            mode=rcl.UdpReceiver.Mode.Asynchronous)
        self.controlDecoder = rcl.ScalarOscDecoder(context, "ControlDecoder",
                                                   self)
        self.controlDecoder.setup(dataType='float')
        self.parameterConnection(
            self.controlReceiver.parameterPort("messageOutput"),
            self.controlDecoder.parameterPort("datagramInput"))
        self.parameterConnection(self.controlDecoder.parameterPort("dataOut"),
                                 self.multiRenderer.parameterPort("controlIn"))
Exemple #22
0
    def __init__(
            self,
            context,
            name,
            parent,
            *,  # This ensures that the remaining arguments are given as keyword arguments.
            sofaFile=None,
            hrirPositions=None,
            hrirData=None,
            hrirDelays=None,
            headOrientation=None,
            headTracking=True,
            dynamicITD=False,
            hrirInterpolation=False,
            irTruncationLength=None,
            filterCrossfading=False,
            interpolatingConvolver=False,
            staticLateSofaFile=None,
            staticLateFilters=None,
            staticLateDelays=None,
            fftImplementation='default'):
        """
        Constructor.

        Parameters
        ----------
        context : visr.SignalFlowContext
            Standard visr.Component construction argument, a structure holding the block size and the sampling frequency
        name : string
            Name of the component, Standard visr.Component construction argument
        parent : visr.CompositeComponent
            Containing component if there is one, None if this is a top-level component of the signal flow.
        sofaFile: string
            BRIR database provided as a SOFA file. This is an alternative to the hrirPosition, hrirData
            (and optionally hrirDelays) argument. Default None means that hrirData and hrirPosition must be provided.
        hrirPositions : numpy.ndarray
            Optional way to provide the measurement grid for the BRIR listener view directions. If a
            SOFA file is provided, this is optional and overrides the listener view data in the file.
            Otherwise this argument is mandatory. Dimension #grid directions x (dimension of position argument)
        hrirData: numpy.ndarray
            Optional way to provide the BRIR data. Dimension: #grid directions  x #ears (2) # x #loudspeakers x #ir length
        hrirDelays: numpy.ndarray
            Optional BRIR delays. If a SOFA file is given, this  argument overrides a potential delay setting from the file. Otherwise, no extra delays
            are applied unless this option is provided. Dimension: #grid directions  x #ears(2) x # loudspeakers
        headOrientation : array-like
            Head orientation in spherical coordinates (2- or 3-element vector or list). Either a static orientation (when no tracking is used),
            or the initial view direction
        headTracking: bool
            Whether dynamic headTracking is active. If True, an control input "tracking" is created.
        dynamicITD: bool
            Whether the delay part of th BRIRs is applied separately to the (delay-free) BRIRs.
        hrirInterpolation: bool
            Whether BRIRs are interpolated for the current head oriention. If False, a nearest-neighbour interpolation is used.
        irTruncationLength: int
            Maximum number of samples of the BRIR impulse responses. Functional only if the BRIR is provided in a SOFA file.
        filterCrossfading: bool
            Whether dynamic BRIR changes are crossfaded (True) or switched immediately (False)
        interpolatingConvolver: bool
            Whether the interpolating convolver option is used. If True, the convolver stores all BRIR filters, and the controller sends only
            interpolation coefficient messages to select the BRIR filters and their interpolation ratios.
        staticLateSofaFile: string, optional
            Name of a file containing a static (i.e., head orientation-independent) late part of the BRIRs.
            Optional argument, might be used as an alternative to the staticLateFilters argument, but these options are mutually exclusive.
            If neither is given, no static late part is used. The fields 'Data.IR' and the 'Data.Delay' are used.
        staticLateFilters: numpy.ndarray, optional
            Matrix containing a static, head position-independent part of the BRIRs. This option is mutually exclusive to
            staticLateSofaFile. If none of these is given, no separate static late part  is rendered.
            Dimension: 2 x #numberOfLoudspeakers x firLength
        staticLateDelays: numpy.ndarray, optional
            Time delay of the late static BRIRs per loudspeaker. Optional attribute,
            only used if late static BRIR coefficients are provided.
            Dimension: 2 x #loudspeakers
        fftImplementation: string
            The FFT implementation to be used in the convolver. the default value selects the system default.
        """
        if (hrirData is not None) and (sofaFile is not None):
            raise ValueError(
                "Exactly one of the arguments sofaFile and hrirData must be present."
            )
        if sofaFile is not None:
            [sofaHrirPositions, hrirData, sofaHrirDelays
             ] = readSofaFile(sofaFile,
                              truncationLength=irTruncationLength,
                              truncationWindowLength=16)
            # If hrirDelays is not provided as an argument, use the one retrieved from the SOFA file
            if hrirDelays is None:
                hrirDelays = sofaHrirDelays
            # Use the positions obtained from the SOFA file only if the argument is not set
            if hrirPositions is None:
                hrirPositions = sofaHrirPositions

        # Crude check for 'horizontal-only' listener view directions
        if np.max(np.abs(hrirPositions[:, 1])) < deg2rad(1):
            hrirPositions = hrirPositions[:,
                                          [0, 2
                                           ]]  # transform to polar coordinates

        numberOfLoudspeakers = hrirData.shape[2]

        super(VirtualLoudspeakerRenderer, self).__init__(context, name, parent)
        self.loudspeakerSignalInput = visr.AudioInputFloat(
            "audioIn", self, numberOfLoudspeakers)
        self.binauralOutput = visr.AudioOutputFloat("audioOut", self, 2)
        if headTracking:
            self.trackingInput = visr.ParameterInput(
                "tracking", self, pml.ListenerPosition.staticType,
                pml.DoubleBufferingProtocol.staticType,
                pml.EmptyParameterConfig())

        # Check consistency between HRIR positions and HRIR data
        if (hrirPositions.shape[0] != hrirData.shape[0]):
            raise ValueError(
                "The number of HRIR positions is inconsistent with the dimension of the HRIR data."
            )

        # Additional safety check (is tested in the controller anyway)
        if dynamicITD:
            if (hrirDelays is
                    None) or (hrirDelays.ndim != hrirData.ndim - 1) or (
                        hrirDelays.shape != hrirData.shape[0:-1]):
                raise ValueError(
                    'If the "dynamicITD" option is given, the parameter "delays" must match the first dimensions of the hrir data matrix.'
                )

        self.virtualLoudspeakerController = VirtualLoudspeakerController(
            context,
            "VirtualLoudspeakerController",
            self,
            hrirPositions=hrirPositions,
            hrirData=hrirData,
            headTracking=headTracking,
            dynamicITD=dynamicITD,
            hrirInterpolation=hrirInterpolation,
            hrirDelays=hrirDelays,
            interpolatingConvolver=interpolatingConvolver)

        if headTracking:
            self.parameterConnection(
                self.trackingInput,
                self.virtualLoudspeakerController.parameterPort(
                    "headTracking"))

        # Define the routing for the binaural convolver such that it matches the organisation of the
        # flat BRIR matrix.
        filterRouting = rbbl.FilterRoutingList()

        firLength = hrirData.shape[-1]

        if dynamicITD:
            self.delayVector = rcl.DelayVector(
                context,
                "delayVector",
                self,
                numberOfLoudspeakers * 2,
                interpolationType="lagrangeOrder3",
                initialDelay=0,
                controlInputs=rcl.DelayVector.ControlPortConfig.Delay,
                methodDelayPolicy=rcl.DelayMatrix.MethodDelayPolicy.Add,
                initialGain=1.0,
                interpolationSteps=context.period)

            self.audioConnection(self.loudspeakerSignalInput, [
                i % numberOfLoudspeakers
                for i in range(numberOfLoudspeakers * 2)
            ], self.delayVector.audioPort("in"),
                                 range(0, 2 * numberOfLoudspeakers))

            for idx in range(0, numberOfLoudspeakers):
                filterRouting.addRouting(idx, 0, idx, 1.0)
                filterRouting.addRouting(idx + numberOfLoudspeakers, 1,
                                         idx + numberOfLoudspeakers, 1.0)

            if interpolatingConvolver:
                if filterCrossfading:
                    interpolationSteps = context.period
                else:
                    interpolationSteps = 0

                numFilters = np.prod(hrirData.shape[:-1])
                filterReshaped = np.reshape(hrirData, (numFilters, firLength))
                self.convolver = rcl.InterpolatingFirFilterMatrix(
                    context,
                    'convolutionEngine',
                    self,
                    numberOfInputs=2 * numberOfLoudspeakers,
                    numberOfOutputs=2,
                    maxFilters=numFilters,
                    filterLength=firLength,
                    maxRoutings=2 * numberOfLoudspeakers,
                    numberOfInterpolants=2,  # TODO: Find out from
                    transitionSamples=interpolationSteps,
                    filters=filterReshaped,
                    routings=filterRouting,
                    controlInputs=rcl.InterpolatingFirFilterMatrix.
                    ControlPortConfig.Filters,
                    fftImplementation=fftImplementation)

            elif filterCrossfading:
                self.convolver = rcl.CrossfadingFirFilterMatrix(
                    context,
                    'convolutionEngine',
                    self,
                    numberOfInputs=2 * numberOfLoudspeakers,
                    numberOfOutputs=2,
                    maxFilters=2 * numberOfLoudspeakers,
                    filterLength=firLength,
                    maxRoutings=2 * numberOfLoudspeakers,
                    transitionSamples=context.period,
                    routings=filterRouting,
                    controlInputs=rcl.CrossfadingFirFilterMatrix.
                    ControlPortConfig.Filters,
                    fftImplementation=fftImplementation)
            else:
                self.convolver = rcl.FirFilterMatrix(
                    context,
                    'convolutionEngine',
                    self,
                    numberOfInputs=2 * numberOfLoudspeakers,
                    numberOfOutputs=2,
                    maxFilters=2 * numberOfLoudspeakers,
                    filterLength=firLength,
                    maxRoutings=2 * numberOfLoudspeakers,
                    routings=filterRouting,
                    controlInputs=rcl.FirFilterMatrix.ControlPortConfig.
                    Filters,
                    fftImplementation=fftImplementation)

            self.audioConnection(
                self.delayVector.audioPort("out"),
                self.convolver.audioPort("in"),
            )
            self.parameterConnection(
                self.virtualLoudspeakerController.parameterPort("delayOutput"),
                self.delayVector.parameterPort("delayInput"))

        else:  # no dynamic ITD
            for idx in range(0, numberOfLoudspeakers):
                filterRouting.addRouting(idx, 0, idx, 1.0)
                filterRouting.addRouting(idx, 1, idx + numberOfLoudspeakers,
                                         1.0)
            if interpolatingConvolver:
                if filterCrossfading:
                    interpolationSteps = context.period
                else:
                    interpolationSteps = 0

                #filterReshaped = np.concatenate( (hrirData[:,0,...],hrirData[:,1,...]), axis=1 )
                numFilters = np.prod(np.array(hrirData.shape[0:-1]))
                filterReshaped = np.reshape(hrirData, (numFilters, firLength))
                self.convolver = rcl.InterpolatingFirFilterMatrix(
                    context,
                    'convolutionEngine',
                    self,
                    numberOfInputs=numberOfLoudspeakers,
                    numberOfOutputs=2,
                    maxFilters=numFilters,
                    filterLength=firLength,
                    maxRoutings=2 * numberOfLoudspeakers,
                    numberOfInterpolants=2,  # TODO: Find out from
                    transitionSamples=interpolationSteps,
                    filters=filterReshaped,
                    routings=filterRouting,
                    controlInputs=rcl.InterpolatingFirFilterMatrix.
                    ControlPortConfig.Interpolants,
                    fftImplementation=fftImplementation)
            elif filterCrossfading:
                self.convolver = rcl.CrossfadingFirFilterMatrix(
                    context,
                    'convolutionEngine',
                    self,
                    numberOfInputs=numberOfLoudspeakers,
                    numberOfOutputs=2,
                    maxFilters=2 * numberOfLoudspeakers,
                    filterLength=firLength,
                    maxRoutings=2 * numberOfLoudspeakers,
                    transitionSamples=context.period,
                    routings=filterRouting,
                    controlInputs=rcl.CrossfadingFirFilterMatrix.
                    ControlPortConfig.Filters,
                    fftImplementation=fftImplementation)
            else:
                self.convolver = rcl.FirFilterMatrix(
                    context,
                    'convolutionEngine',
                    self,
                    numberOfInputs=numberOfLoudspeakers,
                    numberOfOutputs=2,
                    maxFilters=2 * numberOfLoudspeakers,
                    filterLength=firLength,
                    maxRoutings=2 * numberOfLoudspeakers,
                    routings=filterRouting,
                    controlInputs=rcl.FirFilterMatrix.ControlPortConfig.
                    Filters,
                    fftImplementation=fftImplementation)
            self.audioConnection(self.loudspeakerSignalInput,
                                 self.convolver.audioPort("in"))

        if interpolatingConvolver:
            self.parameterConnection(
                self.virtualLoudspeakerController.parameterPort(
                    "interpolatorOutput"),
                self.convolver.parameterPort("interpolantInput"))
        else:
            self.parameterConnection(
                self.virtualLoudspeakerController.parameterPort(
                    "filterOutput"),
                self.convolver.parameterPort("filterInput"))

        # Optionally use static filters for the late part.
        if (staticLateSofaFile is not None) and (staticLateFilters
                                                 is not None):
            raise ValueError(
                "The arguments 'staticLateSofaFile' and 'staticLateFilters' cannot both be given."
            )
        if (staticLateSofaFile is not None):
            latePos, lateFilters, lateDelay = readSofaFile(staticLateSofaFile)
            staticLateDelays = np.squeeze(lateDelay)
            staticLateFilters = np.squeeze(lateFilters)

        if (staticLateFilters is not None):
            flatDelays = staticLateDelays.flatten(order='C')
            self.staticLateDelays = rcl.DelayVector(
                context,
                'staticLateDelays',
                self,
                2 * numberOfLoudspeakers,
                interpolationSteps=context.period,
                interpolationType='nearestSample',
                initialGain=np.ones((numberOfLoudspeakers), dtype=np.float32),
                initialDelay=flatDelays)
            lateFilterRouting = rbbl.FilterRoutingList([
                rbbl.FilterRouting(i, i // numberOfLoudspeakers, i, 1.0)
                for i in range(2 * numberOfLoudspeakers)
            ])

            flatLateFilters = np.reshape(staticLateFilters,
                                         (2 * numberOfLoudspeakers, -1),
                                         order='C')
            self.staticLateFilters = rcl.FirFilterMatrix(
                context,
                "staticlateFilters",
                self,
                numberOfInputs=2 * numberOfLoudspeakers,
                numberOfOutputs=2,
                filterLength=staticLateFilters.shape[-1],
                maxFilters=2 * numberOfLoudspeakers,
                maxRoutings=2 * numberOfLoudspeakers,
                routings=lateFilterRouting,
                filters=flatLateFilters,
                fftImplementation=fftImplementation)
            self.audioConnection(
                sendPort=self.loudspeakerSignalInput,
                sendIndices=list(range(numberOfLoudspeakers)) +
                list(range(numberOfLoudspeakers)),
                receivePort=self.staticLateDelays.audioPort("in"))
            self.audioConnection(
                sendPort=self.staticLateDelays.audioPort("out"),
                receivePort=self.staticLateFilters.audioPort("in"))
            self.earlyLateSum = rcl.Add(context,
                                        "earlyLateSum",
                                        self,
                                        numInputs=2,
                                        width=2)
            self.audioConnection(self.convolver.audioPort("out"),
                                 self.earlyLateSum.audioPort("in0"))
            self.audioConnection(self.staticLateFilters.audioPort("out"),
                                 self.earlyLateSum.audioPort("in1"))
            self.audioConnection(self.earlyLateSum.audioPort("out"),
                                 self.binauralOutput)
        else:
            self.audioConnection(self.convolver.audioPort("out"),
                                 self.binauralOutput)
Exemple #23
0
    def __init__(self,
                 context,
                 name,
                 parent,
                 loudspeakerConfigs,
                 numberOfInputs,
                 numberOfOutputs,
                 interpolationPeriod,
                 diffusionFilters,
                 trackingConfiguration='',
                 controlDataType=pml.UnsignedInteger):
        super(MultiRenderer, self).__init__(context, name, parent)
        self.input = visr.AudioInputFloat("in", self, numberOfInputs)
        self.output = visr.AudioOutputFloat("out", self, numberOfOutputs)
        self.objectInput = visr.ParameterInput(
            "objectIn",
            self,
            protocolType=pml.DoubleBufferingProtocol.staticType,
            parameterType=pml.ObjectVector.staticType,
            parameterConfig=pml.EmptyParameterConfig())
        self.controlInput = visr.ParameterInput(
            "controlIn",
            self,
            protocolType=pml.MessageQueueProtocol.staticType,
            parameterType=controlDataType.staticType,
            parameterConfig=pml.EmptyParameterConfig())

        numRenderers = len(loudspeakerConfigs)
        self.outputSwitch = AudioSignalSwitch(context,
                                              "OutputSwitch",
                                              self,
                                              numberOfChannels=numberOfOutputs,
                                              numberOfInputs=numRenderers,
                                              controlDataType=controlDataType)
        self.parameterConnection(self.controlInput,
                                 self.outputSwitch.parameterPort('controlIn'))
        self.audioConnection(self.outputSwitch.audioPort('out'), self.output)

        self.renderers = []
        for rendererIdx in range(0, numRenderers):
            rendererName = "renderer%d" % rendererIdx
            config = loudspeakerConfigs[rendererIdx]

            decorrFilters = pml.MatrixParameterFloat(
                diffusionFilters[0:config.numberOfRegularLoudspeakers, :])

            renderer = signalflows.CoreRenderer(
                context,
                rendererName,
                self,
                loudspeakerConfiguration=config,
                numberOfInputs=numberOfInputs,
                numberOfOutputs=numberOfOutputs,
                interpolationPeriod=interpolationPeriod,
                diffusionFilters=decorrFilters,
                trackingConfiguration=trackingConfiguration)
            self.audioConnection(self.input, renderer.audioPort('audioIn'))
            self.audioConnection(
                renderer.audioPort('audioOut'),
                self.outputSwitch.audioPort('in_%d' % rendererIdx))
            self.parameterConnection(self.objectInput,
                                     renderer.parameterPort('objectDataInput'))
            self.renderers.append(renderer)
    def __init__(self,
                 context,
                 name,
                 parent,
                 loudspeakerConfigFiles,
                 numberOfInputs,
                 numberOfOutputs,
                 interpolationPeriod,
                 diffusionFilterFile,
                 udpReceivePort=8888,
                 controlReceivePort=8889,
                 trackingConfiguration=''):
        super(RealTimeMultiRenderer, self).__init__(context, name, parent)
        self.input = visr.AudioInputFloat("in", self, numberOfInputs)
        self.output = visr.AudioOutputFloat("out", self, numberOfOutputs)

        rendererConfigs = []
        for cfgFile in loudspeakerConfigFiles:
            rendererConfigs.append(panning.LoudspeakerArray(cfgFile))

        diffFilters = np.array(
            pml.MatrixParameterFloat.fromAudioFile(diffusionFilterFile))

        self.multiRenderer = MultiRenderer(
            context,
            name,
            self,
            loudspeakerConfigs=rendererConfigs,
            numberOfInputs=numberOfInputs,
            numberOfOutputs=numberOfOutputs,
            interpolationPeriod=interpolationPeriod,
            diffusionFilters=diffFilters,
            trackingConfiguration='',
            controlDataType=pml.Float)
        self.audioConnection(self.input, self.multiRenderer.audioPort("in"))
        self.audioConnection(self.multiRenderer.audioPort("out"), self.output)

        self.sceneReceiver = rcl.UdpReceiver(
            context,
            "SceneReceiver",
            self,
            port=udpReceivePort,
            mode=rcl.UdpReceiver.Mode.Asynchronous)
        self.sceneDecoder = rcl.SceneDecoder(context, "SceneDecoder", self)
        self.parameterConnection(
            self.sceneReceiver.parameterPort("messageOutput"),
            self.sceneDecoder.parameterPort("datagramInput"))
        self.parameterConnection(
            self.sceneDecoder.parameterPort("objectVectorOutput"),
            self.multiRenderer.parameterPort("objectIn"))

        self.controlReceiver = rcl.UdpReceiver(
            context,
            "ControlReceiver",
            self,
            port=controlReceivePort,
            mode=rcl.UdpReceiver.Mode.Asynchronous)
        self.controlDecoder = rcl.ScalarOscDecoder(context,
                                                   "ControlDecoder",
                                                   dataType='float')
        self.parameterConnection(
            self.controlReceiver.parameterPort("messageOutput"),
            self.controlDecoder.parameterPort("datagramInput"))
        self.parameterConnection(self.controlDecoder.parameterPort("dataOut"),
                                 self.multiRenderer.parameterPort("controlIn"))
Exemple #25
0
    def __init__(self,
                 context,
                 name,
                 parent,
                 hoaOrder=None,
                 sofaFile=None,
                 decodingFilters=None,
                 interpolationSteps=None,
                 headOrientation=None,
                 headTracking=True,
                 fftImplementation='default'):
        """
        Constructor.

        Parameters
        ----------
        context : visr.SignalFlowContext
            Standard visr.Component construction argument, holds the block size and the sampling frequency
        name : string
            Name of the component, Standard visr.Component construction argument
        parent : visr.CompositeComponent
            Containing component if there is one, None if this is a top-level component of the signal flow.
        hoaOrder: int or None
            The maximum HOA order that can be reproduced. If None, the HOA order is deduced
            from the first dimension of the HOA filters (possibly contained in a SOFA file).
        sofaFile: string or NoneType
            A file in SOFA format containing the decoding filters. This expects the filters in the
            field 'Data.IR', dimensions (hoaOrder+1)**2 x 2 x irLength. If None, then the filters
            must be provided in 'decodingFilters' parameter.
        decodingFilters : numpy.ndarray or NoneType
            Alternative way to provide the HOA decoding filters.
        interpolationSteps: int, optional
           Number of samples to transition to new object positions after an update.
        headOrientation : array-like
            Head orientation in spherical coordinates (2- or 3-element vector or list). Either a static orientation (when no tracking is used),
            or the initial view direction
        headTracking: bool
            Whether dynamic head tracking is active.
        fftImplementation: string, optional
            The FFT library to be used in the filtering. THe default uses VISR's
            default implementation for the present platform.
        """
        if (decodingFilters is None) == (sofaFile is None):
            raise ValueError(
                "HoaObjectToBinauralRenderer: Either 'decodingFilters' or 'sofaFile' must be provided."
            )
        if sofaFile is None:
            filters = decodingFilters
        else:
            # pos and delays are not used here.
            [pos, filters, delays] = readSofaFile(sofaFile)

        if hoaOrder is None:
            numHoaCoeffs = filters.shape[0]
            orderP1 = int(np.floor(np.sqrt(numHoaCoeffs)))
            if orderP1**2 != numHoaCoeffs:
                raise ValueError(
                    "If hoaOrder is not given, the number of HOA filters must be a square number"
                )
            hoaOrder = orderP1 - 1
        else:
            numHoaCoeffs = (hoaOrder + 1)**2

        if filters.ndim != 3 or filters.shape[1] != 2 or filters.shape[
                0] < numHoaCoeffs:
            raise ValueError(
                "HoaObjectToBinauralRenderer: the filter data must be a 3D matrix where the second dimension is 2 and the first dimension is equal or larger than (hoaOrder+1)^2."
            )

        # Set default value for fading between interpolation
        if interpolationSteps is None:
            interpolationSteps = context.period

        super(HoaBinauralRenderer, self).__init__(context, name, parent)
        self.hoaSignalInput = visr.AudioInputFloat("audioIn", self,
                                                   numHoaCoeffs)
        self.binauralOutput = visr.AudioOutputFloat("audioOut", self, 2)

        filterMtx = np.concatenate(
            (filters[0:numHoaCoeffs, 0, :], filters[0:numHoaCoeffs, 1, :]))
        routings = rbbl.FilterRoutingList()
        for idx in range(0, numHoaCoeffs):
            routings.addRouting(idx, 0, idx, 1.0)
            routings.addRouting(idx, 1, idx + numHoaCoeffs, 1.0)

        self.binauralFilterBank = rcl.FirFilterMatrix(
            context,
            'binauralFilterBank',
            self,
            numberOfInputs=numHoaCoeffs,
            numberOfOutputs=2,
            filterLength=filters.shape[-1],
            maxFilters=2 * numHoaCoeffs,
            maxRoutings=2 * numHoaCoeffs,
            filters=filterMtx,
            routings=routings,
            controlInputs=rcl.FirFilterMatrix.ControlPortConfig.NoInputs,
            fftImplementation=fftImplementation)

        if headTracking or (headOrientation is not None):

            numMatrixCoeffs = ((hoaOrder + 1) * (2 * hoaOrder + 1) *
                               (2 * hoaOrder + 3)) // 3

            self.rotationCalculator = HoaRotationMatrixCalculator(
                context,
                "RotationCalculator",
                self,
                hoaOrder,
                dynamicOrientation=headTracking,
                initialOrientation=headOrientation)

            rotationMatrixRoutings = rbbl.SparseGainRoutingList()
            for oIdx in range(hoaOrder + 1):
                entryStart = (oIdx * (2 * oIdx - 1) * (2 * oIdx + 1)) // 3
                diagStart = oIdx**2
                for rowIdx in range(2 * oIdx + 1):
                    row = diagStart + rowIdx
                    colsPerRow = 2 * oIdx + 1
                    for colIdx in range(2 * oIdx + 1):
                        col = diagStart + colIdx
                        entryIdx = entryStart + rowIdx * colsPerRow + colIdx
                        rotationMatrixRoutings.addRouting(
                            entryIdx, row, col, 0.0)

            self.rotationMatrix = rcl.SparseGainMatrix(
                context,
                "rotationMatrix",
                self,
                numberOfInputs=numHoaCoeffs,
                numberOfOutputs=numHoaCoeffs,
                interpolationSteps=interpolationSteps,
                maxRoutingPoints=numMatrixCoeffs,
                initialRoutings=rotationMatrixRoutings,
                controlInputs=rcl.SparseGainMatrix.ControlPortConfig.Gain)
            self.audioConnection(self.hoaSignalInput,
                                 self.rotationMatrix.audioPort("in"))
            self.audioConnection(self.rotationMatrix.audioPort("out"),
                                 self.binauralFilterBank.audioPort("in"))
            self.parameterConnection(
                self.rotationCalculator.parameterPort("coefficients"),
                self.rotationMatrix.parameterPort("gainInput"))

            if headTracking:
                self.trackingInput = visr.ParameterInput(
                    "tracking", self, pml.ListenerPosition.staticType,
                    pml.DoubleBufferingProtocol.staticType,
                    pml.EmptyParameterConfig())
                self.parameterConnection(
                    self.trackingInput,
                    self.rotationCalculator.parameterPort("orientation"))
        else:
            self.audioConnection(self.hoaSignalInput,
                                 self.binauralFilterbank.audioPort("in"))

        self.audioConnection(self.binauralFilterBank.audioPort("out"),
                             self.binauralOutput)
    def __init__(
            self,
            context,
            name,
            parent,
            *,  # This ensures that the remaining arguments are given as keyword arguments.
            numberOfObjects,
            sofaFile=None,
            hrirPositions=None,
            hrirData=None,
            hrirDelays=None,
            headOrientation=None,
            headTracking=True,
            dynamicITD=False,
            hrirInterpolation=False,
            irTruncationLength=None,
            filterCrossfading=False,
            interpolatingConvolver=False,
            staticLateSofaFile=None,
            staticLateFilters=None,
            staticLateDelays=None,
            fftImplementation="default",
            loudspeakerConfiguration=None,
            loudspeakerRouting=None,
            objectRendererOptions={}):
        """
        Constructor.

        Parameters
        ----------
        context : visr.SignalFlowContext
            Standard visr.Component construction argument, a structure holding the block size and the sampling frequency
        name : string
            Name of the component, Standard visr.Component construction argument
        parent : visr.CompositeComponent
            Containing component if there is one, None if this is a top-level component of the signal flow.
        sofaFile: string
            BRIR database provided as a SOFA file. This is an alternative to the hrirPosition, hrirData
            (and optionally hrirDelays) argument. Default None means that hrirData and hrirPosition must be provided.
        hrirPositions : numpy.ndarray
            Optional way to provide the measurement grid for the BRIR listener view directions. If a
            SOFA file is provided, this is optional and overrides the listener view data in the file.
            Otherwise this argument is mandatory. Dimension #grid directions x (dimension of position argument)
        hrirData: numpy.ndarray
            Optional way to provide the BRIR data. Dimension: #grid directions  x #ears (2) # x #loudspeakers x #ir length
        hrirDelays: numpy.ndarray
            Optional BRIR delays. If a SOFA file is given, this  argument overrides a potential delay setting from the file. Otherwise, no extra delays
            are applied unless this option is provided. Dimension: #grid directions  x #ears(2) x # loudspeakers
        headOrientation : array-like
            Head orientation in spherical coordinates (2- or 3-element vector or list). Either a static orientation (when no tracking is used),
            or the initial view direction
        headTracking: bool
            Whether dynamic headTracking is active. If True, an control input "tracking" is created.
        dynamicITD: bool
            Whether the delay part of th BRIRs is applied separately to the (delay-free) BRIRs.
        hrirInterpolation: bool
            Whether BRIRs are interpolated for the current head oriention. If False, a nearest-neighbour interpolation is used.
        irTruncationLength: int
            Maximum number of samples of the BRIR impulse responses. Functional only if the BRIR is provided in a SOFA file.
        filterCrossfading: bool
            Whether dynamic BRIR changes are crossfaded (True) or switched immediately (False)
        interpolatingConvolver: bool
            Whether the interpolating convolver option is used. If True, the convolver stores all BRIR filters, and the controller sends only
            interpolation coefficient messages to select the BRIR filters and their interpolation ratios.
        staticLateSofaFile: string, optional
            Name of a file containing a static (i.e., head orientation-independent) late part of the BRIRs.
            Optional argument, might be used as an alternative to the staticLateFilters argument, but these options are mutually exclusive.
            If neither is given, no static late part is used. The fields 'Data.IR' and the 'Data.Delay' are used.
        staticLateFilters: numpy.ndarray, optional
            Matrix containing a static, head position-independent part of the BRIRs. This option is mutually exclusive to
            staticLateSofaFile. If none of these is given, no separate static late part  is rendered.
            Dimension: 2 x #numberOfLoudspeakers x firLength
        staticLateDelays: numpy.ndarray, optional
            Time delay of the late static BRIRs per loudspeaker. Optional attribute,
            only used if late static BRIR coefficients are provided.
            Dimension: 2 x #loudspeakers
        fftImplementation: string
            The FFT implementation to be used in the convolver. the default value selects the system default.
        loudspeakerConfiguration: panning.LoudspeakerArray
            Loudspeaker configuration object used in the ob ject renderer. Must not be None
        loudspeakerRouting: array-like list of integers or None
            Routing indices from the outputs of the object renderer to the inputs of the binaural virtual loudspeaker renderer.
            If empty, the outputs of the object renderer are connected to the first inputs of the virt. lsp renderer.
        objectRendererOptions: dict
            Keyword arguments passed to the object renderer (rcl.CoreRenderer). This may involve all optional
            arguments for this class apart from loudspeakerConfiguration, numberOfInputs, and numberOfOutputs.
            If provided, these paremters are overwritten by the values determined from the binaural renderer's configuration.

        """

        # Parameter checking
        if not isinstance(loudspeakerConfiguration, panning.LoudspeakerArray):
            # Try to convert automatically
            loudspeakerConfiguration = panning.LoudspeakerArray(
                loudspeakerConfiguration)
            # raise ValueError( "'loudspeakerConfiguration' is not a 'panning.LoudspeakerArray' object." )
        numArraySpeakers = loudspeakerConfiguration.numberOfRegularLoudspeakers

        outRoutings = list(range(numArraySpeakers))  # Plain[0,1,...] routing

        super(ObjectToVirtualLoudspeakerRenderer,
              self).__init__(context, name, parent)

        self.objectInput = visr.AudioInputFloat("audioIn", self,
                                                numberOfObjects)
        self.binauralOutput = visr.AudioOutputFloat("audioOut", self, 2)
        self.objectVectorInput = visr.ParameterInput(
            "objectVector", self, pml.ObjectVector.staticType,
            pml.DoubleBufferingProtocol.staticType, pml.EmptyParameterConfig())
        if headTracking:
            self.trackingInput = visr.ParameterInput(
                "tracking", self, pml.ListenerPosition.staticType,
                pml.DoubleBufferingProtocol.staticType,
                pml.EmptyParameterConfig())

        objectRendererOptions[
            "loudspeakerConfiguration"] = loudspeakerConfiguration
        objectRendererOptions["numberOfInputs"] = numberOfObjects
        objectRendererOptions["numberOfOutputs"] = numArraySpeakers

        if "interpolationPeriod" not in objectRendererOptions:
            objectRendererOptions["interpolationPeriod"] = context.period

        if "diffusionFilters" not in objectRendererOptions:
            diffLen = 512
            fftLen = int(np.ceil(0.5 * (diffLen + 1)))
            H = np.exp(-1j * (np.random.rand(numArraySpeakers, fftLen)))
            h = np.fft.irfft(H, axis=1)
            diffFilters = efl.BasicMatrixFloat(h)
            objectRendererOptions["diffusionFilters"] = diffFilters

        self.objectRenderer = CoreRenderer(context, "ObjectRenderer", self,
                                           **objectRendererOptions)

        self.virtualLoudspeakerRenderer = VirtualLoudspeakerRenderer(
            context,
            "VirtualLoudspeakerRenderer",
            self,
            sofaFile=sofaFile,
            hrirPositions=hrirPositions,
            hrirData=hrirData,
            hrirDelays=hrirDelays,
            headOrientation=headOrientation,
            headTracking=headTracking,
            dynamicITD=dynamicITD,
            hrirInterpolation=hrirInterpolation,
            irTruncationLength=irTruncationLength,
            filterCrossfading=filterCrossfading,
            interpolatingConvolver=interpolatingConvolver,
            staticLateSofaFile=staticLateSofaFile,
            staticLateFilters=staticLateFilters,
            staticLateDelays=staticLateDelays,
            fftImplementation=fftImplementation)

        self.audioConnection(self.objectInput,
                             self.objectRenderer.audioPort("audioIn"))
        numVirtualSpeakers = self.virtualLoudspeakerRenderer.audioPort(
            'audioIn').width

        if loudspeakerRouting is None:
            if numVirtualSpeakers != numArraySpeakers:
                raise ValueError(
                    "If no 'loudspeakerRouting' parameter is provided, the numbers of loudspeakers of the object renderer and the binaural virt. loudspeaker renderer must match."
                )
            loudspeakerRouting = list(
                range(numArraySpeakers))  # Plain[0,1,...] routing

        if numVirtualSpeakers > numArraySpeakers:
            unconnectedSpeakers = list(
                set(range(numVirtualSpeakers)) - set(outRoutings))
            self.nullSource = NullSource(context, "NullSource", self, width=1)
            self.audioConnection(
                self.nullSource.audioPort("out"),
                [0] * len(unconnectedSpeakers),
                self.virtualLoudspeakerRenderer.audioPort("audioIn"),
                loudspeakerRouting)
        self.audioConnection(
            self.objectRenderer.audioPort("audioOut"), outRoutings,
            self.virtualLoudspeakerRenderer.audioPort("audioIn"),
            loudspeakerRouting)
        self.audioConnection(
            self.virtualLoudspeakerRenderer.audioPort("audioOut"),
            self.binauralOutput)
        self.parameterConnection(
            self.objectVectorInput,
            self.objectRenderer.parameterPort("objectDataInput"))
        if headTracking:
            self.parameterConnection(
                self.trackingInput,
                self.virtualLoudspeakerRenderer.parameterPort("tracking"))
Exemple #27
0
 def __init__(self, context, name, parent, width):
     super(PythonAdder3, self).__init__(context, name, parent)
     self.input0 = visr.AudioInputFloat("in0", self, width)
     self.input1 = visr.AudioInputFloat("in1", self, width)
     self.input2 = visr.AudioInputFloat("in2", self, width)
     self.output = visr.AudioOutputFloat("out", self, width)
Exemple #28
0
    def __init__(
            self,
            context,
            name,
            parent,
            *,  # Only keyword arguments after this point
            hoaOrder=None,
            sofaFile=None,
            decodingFilters=None,
            interpolationSteps=None,
            headTracking=True,
            headOrientation=None,
            fftImplementation="default",
            headTrackingReceiver=None,
            headTrackingPositionalArguments=None,
            headTrackingKeywordArguments=None):
        """
        Constructor.

        Parameters
        ----------
        context : visr.SignalFlowContext
            Standard visr.Component construction argument, holds the block size and the sampling frequency
        name : string
            Name of the component, Standard visr.Component construction argument
        parent : visr.CompositeComponent
            Containing component if there is one, None if this is a top-level component of the signal flow.
        hoaOrder: optional, int or None
            HOA order used for encoding the point source and plane wave objects.
            If not provided, the order is determined from the number of decoding filters (either passed as a matrix or in
            a SOFA file)
        sofaFile: string, optional
            A SOFA file containing the HOA decoding filters. These are expects as a
            2 x (maxHoaIrder+1)^2 array in the field Data.IR
        decodingFilters : numpy.ndarray, optional
            Alternative way to provide the HOA decoding filters. Expects a
            2 x (maxHoaIrder+1)^2 matrix containing FIR coefficients.
        interpolationSteps: int, optional
           Number of samples to transition to new object positions after an update.
        headOrientation : array-like
            Head orientation in spherical coordinates (2- or 3-element vector or list). Either a static orientation (when no tracking is used),
            or the initial view direction
        headTracking: bool
            Whether dynamic head tracking is active.
        fftImplementation: string, optional
            The FFT implementation to use. Default value enables VISR's default
            FFT library for the platform.
        headTrackingReceiver: class type, optional
            Class of the head tracking recveiver, None (default value) disables dynamic head tracking.
        headTrackingPositionalArguments: tuple optional
            Positional arguments passed to the constructor of the head tracking receiver object.
            Must be a tuple. If there is only a single argument, a trailing comma must be added.
        headTrackingKeywordArguments: dict, optional
            Keyword arguments passed to the constructor of the head tracking receiver. Must be a dictionary (dict)
        """
        super(RealtimeHoaBinauralRenderer,
              self).__init__(context, name, parent)
        enableTracking = (headTrackingReceiver is not None)

        self.hoaBinauralRenderer = HoaBinauralRenderer(
            context,
            "HoaBinauralRenderer",
            self,
            hoaOrder=hoaOrder,
            sofaFile=sofaFile,
            decodingFilters=decodingFilters,
            interpolationSteps=interpolationSteps,
            headTracking=headTracking,
            headOrientation=headOrientation,
            fftImplementation=fftImplementation)

        if enableTracking:
            if headTrackingPositionalArguments == None:
                headTrackingPositionalArguments = ()
            if headTrackingKeywordArguments == None:
                headTrackingKeywordArguments = {}
            self.trackingDevice = headTrackingReceiver(
                context, "HeadTrackingReceiver", self,
                *headTrackingPositionalArguments,
                **headTrackingKeywordArguments)
            self.parameterConnection(
                self.trackingDevice.parameterPort("orientation"),
                self.hoaBinauralRenderer.parameterPort("tracking"))

        self.hoaSignalInput = visr.AudioInputFloat(
            "audioIn", self,
            self.hoaBinauralRenderer.audioPort("audioIn").width)
        self.binauralOutput = visr.AudioOutputFloat("audioOut", self, 2)

        self.audioConnection(self.hoaSignalInput,
                             self.hoaBinauralRenderer.audioPort("audioIn"))
        self.audioConnection(self.hoaBinauralRenderer.audioPort("audioOut"),
                             self.binauralOutput)
    def __init__(
            self,
            context,
            name,
            parent,  # Standard arguments for a VISR component
            numberOfObjects,  # Number of audo objects to be rendered.
            *,  # No positional arguments beyond this point
            sofaFile=None,  # whether a SOFA file is used to loaded the HRIR data.
            hrirPositions=None,  # Optional way to provide the measurement grid for the BRIR listener view directions. If a SOFA file is provided, this is optional and
            # overrides the listener view data in the file. Otherwise this argument is mandatory. Dimension #grid directions x (dimension of position argument)
        hrirData=None,  # Optional way to provide the BRIR data. Dimension: #grid directions  x #ears (2) # x #loudspeakers x #ir length
            hrirDelays=None,  # Optional BRIR delays. If a SOFA file is given, this  argument overrides a potential delay setting from the file. Otherwise, no extra delays
            # are applied unless this option is provided. Dimension: #grid directions  x #ears(2) x # loudspeakers
        headOrientation=None,  # Head orientation in spherical coordinates (2- or 3-element vector or list). Either a static orientation (when no tracking is used),
            # or the initial view direction
        headTracking=True,  # Whether dynamic racking is used.
            dynamicITD=True,  # Whether the ITD is applied separately. That requires preprocessed HRIR data
            dynamicILD=True,  # Whether the ILD is computed and applied separately. At the moment this feature is not used (apart from applying the object gains)
            hrirInterpolation=True,  # Whether the controller supports interpolation between neighbouring HRTF gridpoints. False means nearest neighbour (no interpolation),
            # True enables barycentric interpolation.
        filterCrossfading=False,  # Use a crossfading FIR filter matrix to avoid switching artifacts.
            interpolatingConvolver=False,
            fftImplementation="default"  # The FFT implementation to use.
    ):
        """
        Constructor.

        Parameters
        ----------
        context : visr.SignalFlowContext
            Standard visr.Component construction argument, holds the block size and the sampling frequency
        name : string
            Name of the component, Standard visr.Component construction argument
        parent : visr.CompositeComponent
            Containing component if there is one, None if this is a top-level component of the signal flow.
        numberOfObjects: int
            Maximum number of audio objects
        sofaFile: str, optional
            Optional SOFA for loading loaded the HRIR and associated data (HRIR measurement positions and delays)
            If not provided, the information must be provided by the hrirPositions and hrirData arguments.
        hrirPositions: numpy.ndarray, optional
            Optional way to provide the measurement grid for the BRIR listener view directions.
            If a SOFA file is provided, this is optional and overrides the listener view data
            in the file. Otherwise this argument is mandatory.
            Dimension #grid directions x (dimension of position argument)
        hrirData: numpy.ndarray, optional
            Optional way to provide the BRIR data.
            Dimension: #grid directions  x #ears (2) # x #loudspeakers x #ir length
        hrirDelays: numpy.ndarray, optional
            Optional BRIR delays. If a SOFA file is given, this  argument overrides
            a potential delay setting from the file. Otherwise, no extra delays
            are applied unless this option is provided.
            Dimension: #grid directions  x #ears(2) x # loudspeakers
        headOrientation: array-like, optional
            Head orientation in spherical coordinates (2- or 3-element vector or list).
            Either a static orientation (when no tracking is used), or the
            initial view direction
        headTracking: bool
            Whether dynamic head tracking is supported. If True, a parameter input with type
            pml.ListenerPosition and protocol pml.DoubleBufffering is created.
        dynamicITD: bool, optional
            Whether the ITD is applied separately. That requires preprocessed HRIR data
        dynamicILD: bool, optional
            Whether the ILD is computed and applied separately. At the moment this feature is not used (apart from applying the object gains)
        hrirInterpolation: bool, optional
            Whether the controller supports interpolation between neighbouring HRTF grid
            points. False means nearest neighbour (no interpolation), True
            enables barycentric interpolation.
        filterCrossfading: bool, optional
            Use a crossfading FIR filter matrix to avoid switching artifacts.
        fftImplementation: string, optional
            The FFT implementation to use. Default value enables VISR's default
            FFT library for the platform.
        """
        super(DynamicHrirRenderer, self).__init__(context, name, parent)
        self.objectSignalInput = visr.AudioInputFloat("audioIn", self,
                                                      numberOfObjects)
        self.binauralOutput = visr.AudioOutputFloat("audioOut", self, 2)
        self.objectVectorInput = visr.ParameterInput(
            "objectVector", self, pml.ObjectVector.staticType,
            pml.DoubleBufferingProtocol.staticType, pml.EmptyParameterConfig())
        if headTracking:
            self.trackingInput = visr.ParameterInput(
                "tracking", self, pml.ListenerPosition.staticType,
                pml.DoubleBufferingProtocol.staticType,
                pml.EmptyParameterConfig())

        if (hrirData is not None) and (sofaFile is not None):
            raise ValueError(
                "Exactly one of the arguments sofaFile and hrirData must be present."
            )
        if sofaFile is not None:
            # We don't support HRIR truncation here because they are usually quite short.
            [sofaHrirPositions, hrirData,
             sofaHrirDelays] = readSofaFile(sofaFile)
            # If hrirDelays is not provided as an argument, use the one retrieved from the SOFA file
            if hrirDelays is None:
                hrirDelays = sofaHrirDelays
            # Use the positions obtained from the SOFA file only if the argument is not set
            if hrirPositions is None:
                hrirPositions = sofaHrirPositions

        if dynamicITD:
            if (hrirDelays is None) or (hrirDelays.ndim !=
                                        2) or (hrirDelays.shape !=
                                               (hrirData.shape[0], 2)):
                raise ValueError(
                    'If the "dynamicITD" option is given, the parameter "delays" must be a #hrirs x 2 matrix.'
                )

        self.dynamicHrirController = DynamicHrirController(
            context,
            "DynamicHrirController",
            self,
            numberOfObjects,
            hrirPositions,
            hrirData,
            useHeadTracking=headTracking,
            dynamicITD=dynamicITD,
            dynamicILD=dynamicILD,
            hrirInterpolation=hrirInterpolation,
            interpolatingConvolver=interpolatingConvolver,
            hrirDelays=hrirDelays)

        self.parameterConnection(
            self.objectVectorInput,
            self.dynamicHrirController.parameterPort("objectVector"))
        if headTracking:
            self.parameterConnection(
                self.trackingInput,
                self.dynamicHrirController.parameterPort("headTracking"))

        firLength = hrirData.shape[-1]

        # Used if the InterpolatingConvolver is selected.
        numberOfInterpolants = 3 if hrirInterpolation else 1
        interpolationSteps = context.period if filterCrossfading else 0

        if dynamicITD or dynamicILD:
            if dynamicITD:
                delayControls = rcl.DelayVector.ControlPortConfig.Delay
            else:
                delayControls = rcl.DelayVector.ControlPortConfig.No
            if dynamicILD:
                delayControls = delayControls | rcl.DelayVector.ControlPortConfig.Gain
                initialGain = 0.0  # If the ILD is applied in the DelayVector, start from zero.
            else:
                initialGain = 1.0  # Fixed setting as the gain of the delay vector is not used

            self.delayVector = rcl.DelayVector(
                context,
                "delayVector",
                self,
                numberOfObjects * 2,
                interpolationType="lagrangeOrder3",
                initialDelay=0,
                controlInputs=delayControls,
                methodDelayPolicy=rcl.DelayMatrix.MethodDelayPolicy.Add,
                initialGain=initialGain,
                interpolationSteps=context.period)

            inConnections = [
                i % numberOfObjects for i in range(numberOfObjects * 2)
            ]
            self.audioConnection(self.objectSignalInput, inConnections,
                                 self.delayVector.audioPort("in"),
                                 range(2 * numberOfObjects))

            # Define the routing for the binaural convolver such that it match the layout of the
            # flat BRIR matrix.
            filterRouting = rbbl.FilterRoutingList()
            for idx in range(0, numberOfObjects):
                filterRouting.addRouting(idx, 0, idx, 1.0)
                filterRouting.addRouting(idx + numberOfObjects, 1,
                                         idx + numberOfObjects, 1.0)
            numMatrixInputs = 2 * numberOfObjects
        else:
            filterRouting = rbbl.FilterRoutingList()
            for idx in range(0, numberOfObjects):
                filterRouting.addRouting(idx, 0, idx, 1.0)
                filterRouting.addRouting(idx, 1, idx + numberOfObjects, 1.0)
            filterRouting2 = rbbl.FilterRoutingList([
                rbbl.FilterRouting(i % numberOfObjects, i // numberOfObjects,
                                   i, 1.0) for i in range(2 * numberOfObjects)
            ])
            numMatrixInputs = numberOfObjects

        if interpolatingConvolver:
            numFilters = np.prod(np.array(hrirData.shape[0:-1]))
            filterReshaped = np.reshape(hrirData, (numFilters, firLength), 'C')
            self.convolver = rcl.InterpolatingFirFilterMatrix(
                context,
                'convolutionEngine',
                self,
                numberOfInputs=numMatrixInputs,
                numberOfOutputs=2,
                maxFilters=numFilters,
                filterLength=firLength,
                maxRoutings=2 * numberOfObjects,
                numberOfInterpolants=numberOfInterpolants,
                transitionSamples=interpolationSteps,
                filters=filterReshaped,
                routings=filterRouting,
                controlInputs=rcl.InterpolatingFirFilterMatrix.
                ControlPortConfig.Interpolants,
                fftImplementation=fftImplementation)
        elif filterCrossfading:
            self.convolver = rcl.CrossfadingFirFilterMatrix(
                context,
                'convolutionEngine',
                self,
                numberOfInputs=numMatrixInputs,
                numberOfOutputs=2,
                maxFilters=2 * numberOfObjects,
                filterLength=firLength,
                maxRoutings=2 * numberOfObjects,
                routings=filterRouting,
                transitionSamples=context.period,
                controlInputs=rcl.CrossfadingFirFilterMatrix.ControlPortConfig.
                Filters,
                fftImplementation=fftImplementation)
        else:
            self.convolver = rcl.FirFilterMatrix(
                context,
                'convolutionEngine',
                self,
                numberOfInputs=numMatrixInputs,
                numberOfOutputs=2,
                maxFilters=2 * numberOfObjects,
                filterLength=firLength,
                maxRoutings=2 * numberOfObjects,
                routings=filterRouting,
                controlInputs=rcl.FirFilterMatrix.ControlPortConfig.Filters,
                fftImplementation=fftImplementation)
        if dynamicITD or dynamicILD:
            self.audioConnection(self.delayVector.audioPort("out"),
                                 self.convolver.audioPort("in"))
            if dynamicITD:
                self.parameterConnection(
                    self.dynamicHrirController.parameterPort("delayOutput"),
                    self.delayVector.parameterPort("delayInput"))
            if dynamicILD:
                self.parameterConnection(
                    self.dynamicHrirController.parameterPort("gainOutput"),
                    self.delayVector.parameterPort("gainInput"))
        else:
            self.audioConnection(self.objectSignalInput,
                                 self.convolver.audioPort("in"))

        self.audioConnection(self.convolver.audioPort("out"),
                             self.binauralOutput)
        if interpolatingConvolver:
            self.parameterConnection(
                self.dynamicHrirController.parameterPort("interpolatorOutput"),
                self.convolver.parameterPort("interpolantInput"))
        else:
            self.parameterConnection(
                self.dynamicHrirController.parameterPort("filterOutput"),
                self.convolver.parameterPort("filterInput"))
    def __init__(self,
                 context,
                 name,
                 parent,
                 numberOfObjects,
                 maxHoaOrder=None,
                 sofaFile=None,
                 decodingFilters=None,
                 interpolationSteps=None,
                 headOrientation=None,
                 headTracking=True,
                 objectChannelAllocation=False,
                 fftImplementation='default'):
        """
        Constructor.

        Parameters
        ----------
        context : visr.SignalFlowContext
            Standard visr.Component construction argument, holds the block size and the sampling frequency
        name : string
            Name of the component, Standard visr.Component construction argument
        parent : visr.CompositeComponent
            Containing component if there is one, None if this is a top-level component of the signal flow.
        numberOfObjects : int
            The number of audio objects to be rendered.
        maxHoaOrder: int or None
            The maximum HOA order that can be reproduced. If None, the HOA order is deduced
            from the first dimension of the HOA filters (possibly contained in a SOFA file).
        sofaFile: string or NoneType
        decodingFilters : numpy.ndarray or NoneType
            Alternative way to provide the HOA decoding filters.
        interpolationSteps: int
        headOrientation : array-like
            Head orientation in spherical coordinates (2- or 3-element vector or list). Either a static orientation (when no tracking is used),
            or the initial view direction
        headTracking: bool
            Whether dynamic head tracking is active.
        objectChannelAllocation: bool
            Whether the processing resources are allocated from a pool of resources
            (True), or whether fixed processing resources statically tied to the audio signal channels are used.
            Not implemented at the moment, so leave the default value (False).
        fftImplementation: string, optional
            The FFT library to be used in the filtering. THe default uses VISR's
            default implementation for the present platform.
        """
        if (decodingFilters is None) == (sofaFile is None):
            raise ValueError(
                "HoaObjectToBinauralRenderer: Either 'decodingFilters' or 'sofaFile' must be provided."
            )
        if sofaFile is None:
            filters = decodingFilters
        else:
            # pos and delays are not used here.
            [pos, filters, delays] = readSofaFile(sofaFile)

        if maxHoaOrder is None:
            numHoaCoeffs = filters.shape[0]
            orderP1 = int(np.floor(np.sqrt(numHoaCoeffs)))
            if orderP1**2 != numHoaCoeffs:
                raise ValueError(
                    "If maxHoaOrder is not given, the number of HOA filters must be a square number"
                )
            maxHoaOrder = orderP1 - 1
        else:
            numHoaCoeffs = (maxHoaOrder + 1)**2

        if filters.ndim != 3 or filters.shape[1] != 2 or filters.shape[
                0] < numHoaCoeffs:
            raise ValueError(
                "HoaObjectToBinauralRenderer: the filter data must be a 3D matrix where the second dimension is 2 and the first dimension is equal or larger than (maxHoaOrder+1)^2."
            )

        super(HoaObjectToBinauralRenderer,
              self).__init__(context, name, parent)
        self.objectSignalInput = visr.AudioInputFloat("audioIn", self,
                                                      numberOfObjects)
        self.binauralOutput = visr.AudioOutputFloat("audioOut", self, 2)
        self.objectVectorInput = visr.ParameterInput(
            "objectVector", self, pml.ObjectVector.staticType,
            pml.DoubleBufferingProtocol.staticType, pml.EmptyParameterConfig())

        if interpolationSteps is None:
            interpolationSteps = context.period

        self.objectEncoder = HoaObjectEncoder(
            context,
            'HoaEncoder',
            self,
            numberOfObjects=numberOfObjects,
            hoaOrder=maxHoaOrder,
            channelAllocation=objectChannelAllocation)

        self.parameterConnection(
            self.objectVectorInput,
            self.objectEncoder.parameterPort("objectVector"))

        self.encoderMatrix = rcl.GainMatrix(
            context,
            "encoderMatrix",
            self,
            numberOfInputs=numberOfObjects,
            numberOfOutputs=(maxHoaOrder + 1)**2,
            interpolationSteps=interpolationSteps,
            initialGains=0.0,
            controlInput=True)
        self.audioConnection(self.objectSignalInput,
                             self.encoderMatrix.audioPort("in"))

        filterMtx = np.concatenate(
            (filters[0:numHoaCoeffs, 0, :], filters[0:numHoaCoeffs, 1, :]))

        routings = rbbl.FilterRoutingList()
        for idx in range(0, numHoaCoeffs):
            routings.addRouting(idx, 0, idx, 1.0)
            routings.addRouting(idx, 1, idx + numHoaCoeffs, 1.0)

        self.binauralFilterBank = rcl.FirFilterMatrix(
            context,
            'binauralFilterBank',
            self,
            numberOfInputs=numHoaCoeffs,
            numberOfOutputs=2,
            filterLength=filters.shape[-1],
            maxFilters=2 * numHoaCoeffs,
            maxRoutings=2 * numHoaCoeffs,
            filters=filterMtx,
            routings=routings,
            controlInputs=rcl.FirFilterMatrix.ControlPortConfig.NoInputs,
            fftImplementation=fftImplementation)

        self.audioConnection(self.encoderMatrix.audioPort("out"),
                             self.binauralFilterBank.audioPort("in"))
        self.audioConnection(self.binauralFilterBank.audioPort("out"),
                             self.binauralOutput)

        if headTracking:
            self.trackingInput = visr.ParameterInput(
                "tracking", self, pml.ListenerPosition.staticType,
                pml.DoubleBufferingProtocol.staticType,
                pml.EmptyParameterConfig())
            self.coefficientRotator = HoaCoefficientRotation(
                context,
                'coefficientRotator',
                self,
                numberOfObjects=numberOfObjects,
                hoaOrder=maxHoaOrder)
            self.parameterConnection(
                self.trackingInput,
                self.coefficientRotator.parameterPort("tracking"))
            self.parameterConnection(
                self.objectEncoder.parameterPort("coefficientOutput"),
                self.coefficientRotator.parameterPort("coefficientInput"))
            self.parameterConnection(
                self.coefficientRotator.parameterPort("coefficientOutput"),
                self.encoderMatrix.parameterPort("gainInput"))
        else:
            self.parameterConnection(
                self.objectEncoder.parameterPort("coefficientOutput"),
                self.encoderMatrix.parameterPort("gainInput"))