def __init__( self ):
        # Set up the UI
        super( LoudnessGui, self ).__init__( None )
        self.audioRunning = False
        self.setGeometry( 0, 0, 800, 600 )

        self.layout = QtGui.QGridLayout( self )

        self.startButton = QtGui.QPushButton( "Start Audio" ) # , self.centralWidget )
        #self.startButton.setMinimumSize( 200, 60 )
        self.stopButton = QtGui.QPushButton( "Stop Audio" ) # , self.centralWidget )
        #self.stopButton.setMinimumSize( 200, 60 )
        self.layout.addWidget( self.startButton, 0, 0 ) # , QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter )
        self.layout.addWidget( self.stopButton, 0, 1 ) # , QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter )

        self.loudnessLabel = QtGui.QLabel( '--- dB' ) # , self.centralWidget )
        #self.loudnessLabel.setMinimumSize( 200, 60 )

        self.layout.addWidget( self.loudnessLabel, 0, 2 ) # , QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter )

        self.startButton.clicked.connect( self.startAudio )
        self.stopButton.clicked.connect( self.stopAudio )

        self.plotWidget = pg.PlotWidget()
        self.plotWidget.setRange( xRange = (0,numPlotPoints-1), yRange = (-130.0,6.0))
        self.layout.addWidget( self.plotWidget, 1, 0, 1, 4 )
        self.loudnessPlot = pg.PlotCurveItem()
        self.plotWidget.addItem( self.loudnessPlot )

        self.loudnessHist = -120.0 * np.ones( numPlotPoints, dtype=np.float32 )
        self.loudnessPlot.setData( self.loudnessHist )

        # %% Setup the DSP part

        self.context = visr.SignalFlowContext( period = blockSize,
                                               samplingFrequency = samplingFrequency )
        self.meter = LoudnessMeter( self.context, 'meter', None, numChannels,
                                   measurePeriod = 0.4, audioOut = True )

        audioOptions = ai.AudioInterface.Configuration( numChannels,
                                                       numChannels,
                                                       samplingFrequency,
                                                       blockSize )
        self.audioInterface = ai.AudioInterfaceFactory.create( audioInterfaceName,
                                                              audioOptions,
                                                              audioBackendOptions )
        self.flow = rrl.AudioSignalFlow( self.meter )

        self.loudnessPort = self.flow.parameterSendPort( 'loudnessOut' )

        self.readTimer = QtCore.QTimer( self )
        self.readTimer.timeout.connect( self.getMeterValues )
        self.readTimer.setSingleShot( False )
        self.readTimer.setInterval( 100 ) # ms
updateRate = 4

ctxt = visr.SignalFlowContext(blockSize, fs)

# Instantiate the atomic flow
dl = rcl.DelayMatrix(ctxt, "DL")
dl.setup(numberOfInputs=numberOfInputs,
         numberOfOutputs=numberOfOutputs,
         interpolationType="lagrangeOrder0",
         initialDelay=0.0001,
         initialGain=0.0,
         interpolationSteps=updateRate * blockSize,
         methodDelayPolicy=rcl.DelayMatrix.MethodDelayPolicy.Limit,
         controlInputs=rcl.DelayMatrix.ControlPortConfig.All)

flow = rrl.AudioSignalFlow(dl)

gainInputProtocol = flow.parameterReceivePort("gainInput")
delayInputProtocol = flow.parameterReceivePort("delayInput")

t = np.arange(0, numSamples, dtype=np.float32) / fs

inputSignal = np.zeros([numberOfInputs, numSamples], dtype=np.float32)
inputSignal[0, :] = 0.5 * np.sin(2.0 * 440 * np.pi * t)
inputSignal[1, :] = 0.25 * np.cos(2.0 * 880 * np.pi * t)
inputSignal[2, :] = 0.25 * np.cos(2.0 * 1760 * np.pi * t)

outputSignal = np.zeros([numberOfOutputs, numSamples], dtype=np.float32)

gainInputProtocol.data()[0, 0] = 1.0
gainInputProtocol.data()[1, 2] = 0.7
    numLoudspeakers,
    port,
    baud,
    sofaFile,
    enableSerial=useTracking,
    dynITD=useDynamicITD,
    hrirInterp=useHRIRinterpolation,
    irTruncationLength=BRIRtruncationLength,
    headTrackingCalibrationPort=headTrackingCalibrationPort,
    switchUdpPort=12345)

result, messages = rrl.checkConnectionIntegrity(controller)
if not result:
    print(messages)

flow = rrl.AudioSignalFlow(controller)

if not useSerialPort and useTracking:
    trackingInput = flow.parameterReceivePort("tracking")

aiConfig = ai.AudioInterface.Configuration(flow.numberOfCaptureChannels,
                                           flow.numberOfPlaybackChannels, fs,
                                           blockSize)
#if client name is too long you get an error
jackCfg = """{ "clientname": "VirtualLoudspeakerRenderer",
  "autoconnect" : "false",
  "portconfig":
  {
    "capture":  [{ "basename":"in", "externalport" : {} }],
    "playback": [{ "basename":"out", "externalport" : {} }]
  }
示例#4
0
numSamples = blockSize * numIterations

numChannels = 32

comp = visr_drc.Compressor(context,
                           "Compressor",
                           None,
                           numberOfChannels=numChannels,
                           averagingTimeSeconds=0.01,
                           attackTimeSeconds=0.001,
                           releaseTimeSeconds=0.005,
                           compressorThresholdDB=-6.02,
                           compressorSlope=1)

flow = rrl.AudioSignalFlow(comp)

fSig = 80

fMod = 2

t = 1.0 / fs * np.arange(numSamples)

modSig = 0.5 * (1.0 + np.cos(fMod * 2 * np.pi * t))
toneSig = np.cos(fSig * 2 * np.pi * t)

inputSignal = np.zeros((numChannels, numSamples), dtype=np.float32)
inputSignal[0, :] = toneSig * modSig
# inputSignal[1,:] = 2.5*toneSig * modSig

outputSignal = np.zeros((numChannels, numSamples), dtype=np.float32)
                       numberOfInputs=numberOfObjects,
                       numberOfOutputs=numberOfOutputs,
                       interpolationPeriod=1024,
                       diffusionFilters=diffFilters,
                       controlDataType=pml.UnsignedInteger)
else:
    mr = RealTimeMultiRenderer(ctxt,
                               "MultiRenderer",
                               None,
                               loudspeakerConfigFiles=configFiles,
                               numberOfInputs=numberOfObjects,
                               numberOfOutputs=numberOfOutputs,
                               interpolationPeriod=1024,
                               diffusionFilterFile=diffFilterFile)

flow = rrl.AudioSignalFlow(mr)

objectInput = flow.parameterReceivePort('objectIn')

controlInput = flow.parameterReceivePort('controlIn')

inputSignal = np.zeros((2, signalLength), dtype=np.float32)
inputSignal[0, :] = 0.75 * np.sin(2.0 * np.pi * 440 * t)

outputSignal = np.zeros((numberOfOutputs, signalLength), dtype=np.float32)

for blockIdx in range(0, numBlocks):
    if blockIdx % (parameterUpdatePeriod / blockSize) == 0:
        az = 0.025 * blockIdx
        el = 0.1 * np.sin(0.025 * blockIdx)
        r = 1
示例#6
0
# Determine signal size and the number of blocks
numBlocks = inputSignal.shape[1] // blockSize # Note: That truncates the signal
signalLength = blockSize * numBlocks

# Check the number of channels.
numberOfChannels = inputSignal.shape[0] 

# Data structure to define basic
context = visr.SignalFlowContext( blockSize, samplingFrequency )

# Instantiate the signal flow (a component)
meter = LoudnessMeter( context, "meter", None, numberOfChannels = numberOfChannels )

# Instantiate an AudioSignalFlow object to execute the component.
flow = rrl.AudioSignalFlow( meter )

# Retrieve the component's external output port.
# This is used to retrieve the metered values generated by the LoudnessMeter.
loudnessOut = flow.parameterSendPort( 'loudnessOut' )

# Define a list to collect the computed loudness values.
loudness = []

# Execute the audio signal flow sequentially by processing the audio block-wise.
for blockIdx in range(0,numBlocks):
    # Extract the next audio block
    inputBlock = inputSignal[:, blockIdx*blockSize:(blockIdx+1)*blockSize]
    # Execute the signal flow (one iteration)
    flow.process( inputBlock )
# Or a more configurable Python adder
# pa = PythonAdder( c, "pa0", None, 3, 2 )

numBlocks = 16
numSamples = numBlocks * blockSize

t = np.arange(0, numSamples, dtype=np.float32) / fs

inputSignal = np.zeros([6, numSamples], dtype=np.float32)
inputSignal[0, :] = np.sin(2 * np.pi * 440 * t)
inputSignal[1, :] = 0.5 * np.sin(2 * np.pi * 880 * t)
inputSignal[2, :] = 0.15 * np.sin(2 * np.pi * 1340 * t)

referenceOutput = inputSignal[0:2, :] + inputSignal[2:4, :] + inputSignal[
    4:6, :]

outputSignal = np.zeros((2, numSamples), dtype=np.float32)

flow = rrl.AudioSignalFlow(pa)

for blockIdx in range(0, numBlocks):
    inputBlock = inputSignal[:,
                             blockIdx * blockSize:(blockIdx + 1) * blockSize]
    outputBlock = flow.process(inputBlock)
    outputSignal[:,
                 blockIdx * blockSize:(blockIdx + 1) * blockSize] = outputBlock

plt.figure(1)
plt.plot(t, referenceOutput[0, :], 'bo-', t, outputSignal[0, :], 'rx-')
plt.show()
示例#8
0
numBlocks = 16
numSamples = numBlocks * blockSize

t = np.arange(0, numSamples, dtype=np.float32) / fs

inputSignal = np.zeros([4, numSamples], dtype=np.float32)
inputSignal[0, :] = np.sin(2 * np.pi * 440 * t)
inputSignal[1, :] = 0.5 * np.sin(2 * np.pi * 880 * t)
inputSignal[2, :] = 0.15 * np.sin(2 * np.pi * 1340 * t)

referenceOutput = inputSignal[0:2, :] + inputSignal[2:, :]

outputSignal = np.zeros((2, numSamples), dtype=np.float32)

c = visr.SignalFlowContext(blockSize, fs)

adder = rcl.Add(c, 'add', numInputs=2, width=2)

flow = rrl.AudioSignalFlow(adder)

for blockIdx in range(0, numBlocks):
    inputBlock = inputSignal[:,
                             blockIdx * blockSize:(blockIdx + 1) * blockSize]
    outputBlock = flow.process(inputBlock)
    outputSignal[:,
                 blockIdx * blockSize:(blockIdx + 1) * blockSize] = outputBlock

plt.figure(1)
plt.plot(t, referenceOutput[0, :], 'bo-', t, outputSignal[0, :], 'rx-')
plt.show()
renderer1 = signalflows.BaselineRenderer(context=ctxt,
                                         name='renderer1',
                                         parent=None,
                                         loudspeakerConfig=lc,
                                         numberOfInputs=2,
                                         numberOfOutputs=41,
                                         interpolationPeriod=4096,
                                         diffusionFilters=diffFilters,
                                         trackingConfiguration='',
                                         sceneReceiverPort=4242,
                                         reverbConfig='',
                                         frequencyDependentPanning=False)

print('Created renderer.')

flow1 = rrl.AudioSignalFlow(renderer1)

cc1 = visr.CompositeComponent(ctxt, 'composite1', None)

numComponents = cc1.numberOfComponents

print('Composite has %d components' % (numComponents))

add1 = rcl.Add(ctxt, 'adder1', cc1)
add1.setup(width=8, numInputs=2)

add2 = rcl.Add(ctxt, 'adder2', cc1)
add2.setup(width=8, numInputs=2)

# In C++, this is a protected method (can be done only from within.)
#r1 = range(0,8)
示例#10
0
numChannels = 3

inputSignal = np.ones((numChannels, numSamples), dtype=np.float32)
outputSignal = np.inf * np.ones((numChannels, numSamples), dtype=np.float32)

interpolationPeriod = 64

ctxt = visr.SignalFlowContext(blockSize, samplingFrequency)

gvec = rcl.GainVector(ctxt, 'calc', None)
gvec.setup(numberOfChannels=numChannels,
           interpolationSteps=88,
           controlInputs=True)

flow = rrl.AudioSignalFlow(gvec)

paramInput = flow.parameterReceivePort('gainInput')
gv = paramInput.data()
gv.set(np.asarray([1.0, 1.0, 1.0]))
paramInput.swapBuffers()

# Dummy input required for the process() function

for blockIdx in range(0, numBlocks):

    if blockIdx == 3:
        gv = paramInput.data()
        gv[0] = 0.55
        gv[1] = 1.0
        gv[2] = 1.0
#lc = pml.LoudspeakerArray( '/home/andi/dev/visr/config/isvr/audiolab_39speakers_1subwoofer.xml' )
lc = pml.LoudspeakerArray(
    '/home/andi/dev/visr/config/generic/bs2051-9+10+3_linear.xml')
# lc = pml.LoudspeakerArray( 'c:/local/visr/config/isvr/audiolab_39speakers_1subwoofer.xml' )

reverbConfigString = '{ \"numReverbObjects\": 5, \"discreteReflectionsPerObject\": 20, \"lateReverbFilterLength\": 0.5, \"lateReverbDecorrelationFilters\": \"/home/andi/dev/visr/config/filters/random_phase_allpass_64ch_1024taps.wav\" }'

rsaoRenderer = rsao.ReverbObjectRenderer(ctxt,
                                         'rsaoRenderer',
                                         None,
                                         loudspeakerConfiguration=lc,
                                         numberOfObjectSignals=2,
                                         reverbConfig=reverbConfigString)

flow = rrl.AudioSignalFlow(rsaoRenderer)

paramInput = flow.parameterReceivePort('objectIn')

inputSignal = np.zeros((2, signalLength), dtype=np.float32)
inputSignal[0, :] = 0.75 * np.sin(2.0 * np.pi * 440 * t)

outputSignal = np.zeros((numOutputChannels, signalLength), dtype=np.float32)

for blockIdx in range(0, numBlocks):
    if blockIdx % (parameterUpdatePeriod / blockSize) == 0:
        az = 0.025 * blockIdx
        el = 0.1 * np.sin(0.025 * blockIdx)
        r = 1
        x, y, z = sph2cart(az, el, r)
        ps1 = objectmodel.PointSource(0)
示例#12
0
numObjects = 1

signalLength = bs * numBlocks
t = 1.0 / samplingFrequency * np.arange(0, signalLength)

ctxt = visr.SignalFlowContext(bs, samplingFrequency)

lc = panning.LoudspeakerArray('../data/bs2051-4+5+0.xml')

numOutputChannels = lc.numberOfRegularLoudspeakers

rendererVbap = VbapRenderer(ctxt, 'renderer', None, numObjects, lspConfig=lc)
rendererL2 = VbapL2Renderer(ctxt, 'renderer', None, numObjects, lspArray=lc)

flowVbap = rrl.AudioSignalFlow(rendererVbap)
flow = rrl.AudioSignalFlow(rendererL2)

paramInput = flow.parameterReceivePort('objects')

paramInputVbap = flowVbap.parameterReceivePort('objects')

az = np.linspace(0, 2.0 * np.pi, numBlocks)
el = 10.0 * np.pi / 180.0
r = 1.0

inSig = np.zeros((numObjects, signalLength), dtype=np.float32)
inSig[0, :] = 0.75 * np.sin(2.0 * np.pi * 88 * t)

outSigL2 = np.zeros((numOutputChannels, signalLength), dtype=np.float32)
outSigVbap = np.zeros((numOutputChannels, signalLength), dtype=np.float32)
                            interpolationSteps = blockSize,
                            headTracking = False
                            )
graphDyn = DynamicBinauralRenderer( context, "DynamicBinauralRenderer", None, numBinauralObjects,
                                  headTracking = False,
                                  dynITD = False,
                                  dynILD = False,
                                  hrirInterp = True )

#graphHoa = DynamicBinauralRenderer( context, "DynamicBinauralRenderer", None, numBinauralObjects,
#                                  headTracking = False,
#                                  dynITD = False,
#                                  dynILD = False,
#                                  hrirInterp = False )

flowHoa = rrl.AudioSignalFlow( graphHoa )
flowDyn = rrl.AudioSignalFlow( graphDyn )

paramInputHoa = flowHoa.parameterReceivePort('objectVector')
paramInputDyn = flowDyn.parameterReceivePort('objectVector')

for testIdx in range(0,numTestSignals):

    outputSignalHoa = np.zeros( (numOutputChannels, signalLength ), dtype=np.float32 )
    outputSignalDyn = np.zeros( (numOutputChannels, signalLength ), dtype=np.float32 )

    x,y,z = sph2cart( azimuths[testIdx], elevations[testIdx], 1.0 )
    ps1 = objectmodel.PointSource(0)
    ps1.position = [x,y,z]
    ps1.level = 1
    ps1.groupId = 5
示例#14
0
# Load the loudspeaker configuation
lc = panning.LoudspeakerArray('../data/bs2051-4+5+0.xml')

numLsp = lc.numberOfRegularLoudspeakers

# Create components for the two apnning algorithms
pannerVbap = rcl.PanningCalculator(ctxt,
                                   'vbapbCalc',
                                   None,
                                   numObjects,
                                   arrayConfig=lc)
pannerL2 = VbapL2Panner(ctxt, 'constSpread', None, numObjects, lspArray=lc)

# %% Create signal flow objects for the two algorithms
flowVbap = rrl.AudioSignalFlow(pannerVbap)
flowL2 = rrl.AudioSignalFlow(pannerL2)

# %% Retrieve parameter inputs and outputs for the two audio objects.
paramInputVbap = flowVbap.parameterReceivePort('objectVectorInput')
paramOutputVbap = flowVbap.parameterSendPort('vbapGains')

paramInputL2 = flowL2.parameterReceivePort('objects')
paramOutputL2 = flowL2.parameterSendPort('gains')

# %% Define a number of object positions in speherical coordinates.
# Here we define a set of positions in the horizontal plane with 1 degree distance.
az = np.linspace(0.0, 2 * np.pi, numBlocks)
el = 10.0 * np.pi / 180.0

# %% Preallocate a matrix of output gains (#numLsp x #directions)
if False:
    calc = PythonPanner(ctxt,
                        'calc',
                        None,
                        numberOfObjects=numObjectChannels,
                        arrayConfig=lc)
else:
    calc = rcl.PanningCalculator(ctxt,
                                 'calc',
                                 None,
                                 numberOfObjects=numObjectChannels,
                                 arrayConfig=lc,
                                 separateLowpassPanning=True)

flow = rrl.AudioSignalFlow(calc)

paramInput = flow.parameterReceivePort('objectVectorInput')

# Dummy input required for the process() function
inputBlock = np.zeros((0, blockSize), dtype=np.float32)

lfGainOutput = flow.parameterSendPort("vbapGains")
hfGainOutput = flow.parameterSendPort("vbipGains")

hfGains = np.zeros((gridSize, numSpeakers))
lfGains = np.zeros((gridSize, numSpeakers))

for blockIdx in range(0, gridSize):
    az = 50.0 * np.pi / 180.0  # azGrid[blockIdx]
    el = 10.0 * np.pi / 180.0
context = visr.SignalFlowContext(period=blockSize, samplingFrequency=fs)

# The parameter hoaOrder is sensed from the data dimension in the SOFA file.
graph = HoaBinauralRenderer(context,
                            "HoaBinauralRenderer",
                            None,
                            sofaFile=sofaFile,
                            headTracking=useTracking,
                            headOrientation=[np.pi / 4, 0.0],
                            fftImplementation=fftImplementation)

result, messages = rrl.checkConnectionIntegrity(graph)
if not result:
    raise RuntimeError("Error in signal flow: %s" % messages)

flow = rrl.AudioSignalFlow(graph)

if useTracking:
    trackingInput = flow.parameterReceivePort('tracking')

# TODO: Provide a HOA input signal (ACN convention).
# Here we us a trivial, not very reasonable one (sine wave in the y component)
inputSignal = np.zeros((inputWidth, signalLength), dtype=np.float32)
inputSignal[1, :] = 0.75 * np.sin(2.0 * np.pi * 440 * t)

# Preallocate the binaural output signal
outputSignal = np.zeros((numOutputChannels, signalLength), dtype=np.float32)

numPos = 360 / 5
azSequence = (2.0 * np.pi) / numPos * np.arange(0, numPos)
    "DynamicHrirRenderer",
    None,
    numberOfObjects=numBinauralObjects,
    sofaFile=sofaFile,
    headTracking=useTracking,
    dynamicITD=useDynamicITD,
    dynamicILD=useDynamicILD,
    hrirInterpolation=useHRIRinterpolation,
    filterCrossfading=useCrossfading,
    interpolatingConvolver=useInterpolatingConvolver)

result, messages = rrl.checkConnectionIntegrity(renderer)
if not result:
    print(messages)

flow = rrl.AudioSignalFlow(renderer)
paramInput = flow.parameterReceivePort('objectVector')

if useTracking:
    trackingInput = flow.parameterReceivePort("tracking")

inputSignal = np.zeros((numBinauralObjects, signalLength), dtype=np.float32)
inputSignal[0, :] = 0.75 * np.sin(2.0 * np.pi * 440 * t)
outputSignal = np.zeros((numOutputChannels, signalLength), dtype=np.float32)

numPos = 360 / 5
azSequence = (2.0 * np.pi) / numPos * np.arange(0, numPos)

az = 0
el = 0
r = 1
#cc = TFProcessing( context, "top", None, numberOfChannels=numberOfChannels,
#                  dftLength=dftSize, window=window, hopSize=hopSize )
cc = TFProcessing(context,
                  "top",
                  None,
                  numberOfChannels=numberOfChannels,
                  dftLength=dftSize,
                  windowLength=windowSize,
                  hopSize=hopSize)

res, msg = rrl.checkConnectionIntegrity(cc)
if not res:
    print("The top-level composite is inconsistent: %s" % msg)
    # TODO: Abort here

flow = rrl.AudioSignalFlow(cc)

numBlocks = 16
numSamples = numBlocks * blockSize

t = np.arange(0, numSamples, dtype=np.float32) / samplingFrequency

inputSignal = np.zeros([numberOfChannels, numSamples], dtype=np.float32)
inputSignal[0, :] = np.sin(2 * np.pi * 440 * t)
inputSignal[1, :] = 0.5 * np.sin(2 * np.pi * 880 * t)

outputSignal = np.zeros([numberOfChannels, numSamples], dtype=np.float32)

for blockIdx in range(0, numBlocks):
    inputBlock = inputSignal[:,
                             blockIdx * blockSize:(blockIdx + 1) * blockSize]