コード例 #1
0
context = visr.SignalFlowContext(period=blockSize, samplingFrequency=fs)

renderer = DynamicHrirRenderer(
    context,
    "DynamicHrirRenderer",
    None,
    numberOfObjects=numBinauralObjects,
    sofaFile=sofaFile,
    headTracking=useTracking,
    dynamicITD=useDynamicITD,
    dynamicILD=useDynamicILD,
    hrirInterpolation=useHRIRinterpolation,
    filterCrossfading=useCrossfading,
    interpolatingConvolver=useInterpolatingConvolver)

result, messages = rrl.checkConnectionIntegrity(renderer)
if not result:
    print(messages)

flow = rrl.AudioSignalFlow(renderer)
paramInput = flow.parameterReceivePort('objectVector')

if useTracking:
    trackingInput = flow.parameterReceivePort("tracking")

inputSignal = np.zeros((numBinauralObjects, signalLength), dtype=np.float32)
inputSignal[0, :] = 0.75 * np.sin(2.0 * np.pi * 440 * t)
outputSignal = np.zeros((numOutputChannels, signalLength), dtype=np.float32)

numPos = 360 / 5
azSequence = (2.0 * np.pi) / numPos * np.arange(0, numPos)
コード例 #2
0
        trajectoryPositions=positions,
        trajectoryUpdateSamples=updatePeriod,
        controlReceivePort=4245)
else:
    renderer = RealTimeMultiRenderer(context,
                                     "MultiRenderer",
                                     None,
                                     loudspeakerConfigFiles=configPaths,
                                     numberOfInputs=4,
                                     numberOfOutputs=34,
                                     interpolationPeriod=blockSize,
                                     diffusionFilterFile=diffusionFilter,
                                     udpReceivePort=4242,
                                     controlReceivePort=4245)

flowCheck, messages = rrl.checkConnectionIntegrity(renderer)
if not flowCheck:
    print("Integrity check for multirenderer failed: %s" % messages)
    # sys.exit()

flow = rrl.AudioSignalFlow(renderer)

aiConfig = ai.AudioInterface.Configuration(flow.numberOfCaptureChannels,
                                           flow.numberOfPlaybackChannels, fs,
                                           blockSize)

jackCfg = """{ "clientname": "MultiRenderer",
  "autoconnect" : "false",
  "portconfig":
  {
    "capture":  [{ "basename":"inObj_", "externalport" : {} }],
コード例 #3
0
signalLength = blockSize * numBlocks
t = 1.0 / fs * np.arange(0, signalLength)

context = visr.SignalFlowContext(period=blockSize, samplingFrequency=fs)

# The parameter hoaOrder is sensed from the data dimension in the SOFA file.
graph = HoaBinauralRenderer(context,
                            "HoaBinauralRenderer",
                            None,
                            sofaFile=sofaFile,
                            headTracking=useTracking,
                            headOrientation=[np.pi / 4, 0.0],
                            fftImplementation=fftImplementation)

result, messages = rrl.checkConnectionIntegrity(graph)
if not result:
    raise RuntimeError("Error in signal flow: %s" % messages)

flow = rrl.AudioSignalFlow(graph)

if useTracking:
    trackingInput = flow.parameterReceivePort('tracking')

# TODO: Provide a HOA input signal (ACN convention).
# Here we us a trivial, not very reasonable one (sine wave in the y component)
inputSignal = np.zeros((inputWidth, signalLength), dtype=np.float32)
inputSignal[1, :] = 0.75 * np.sin(2.0 * np.pi * 440 * t)

# Preallocate the binaural output signal
outputSignal = np.zeros((numOutputChannels, signalLength), dtype=np.float32)
コード例 #4
0
controller = Comparison(
    context,
    "VirtualLoudspeakerRenderer",
    None,
    numLoudspeakers,
    port,
    baud,
    sofaFile,
    enableSerial=useTracking,
    dynITD=useDynamicITD,
    hrirInterp=useHRIRinterpolation,
    irTruncationLength=BRIRtruncationLength,
    headTrackingCalibrationPort=headTrackingCalibrationPort,
    switchUdpPort=12345)

result, messages = rrl.checkConnectionIntegrity(controller)
if not result:
    print(messages)

flow = rrl.AudioSignalFlow(controller)

if not useSerialPort and useTracking:
    trackingInput = flow.parameterReceivePort("tracking")

aiConfig = ai.AudioInterface.Configuration(flow.numberOfCaptureChannels,
                                           flow.numberOfPlaybackChannels, fs,
                                           blockSize)
#if client name is too long you get an error
jackCfg = """{ "clientname": "VirtualLoudspeakerRenderer",
  "autoconnect" : "false",
  "portconfig":
コード例 #5
0
# Design a Hann window that satisfies the COLA (constant overlap-add) property.
tUnity = np.array(np.arange(0, dftSize))
window = np.sin(np.pi / float(dftSize) * tUnity)**2

#cc = TFProcessing( context, "top", None, numberOfChannels=numberOfChannels,
#                  dftLength=dftSize, window=window, hopSize=hopSize )
cc = TFProcessing(context,
                  "top",
                  None,
                  numberOfChannels=numberOfChannels,
                  dftLength=dftSize,
                  windowLength=windowSize,
                  hopSize=hopSize)

res, msg = rrl.checkConnectionIntegrity(cc)
if not res:
    print("The top-level composite is inconsistent: %s" % msg)
    # TODO: Abort here

flow = rrl.AudioSignalFlow(cc)

numBlocks = 16
numSamples = numBlocks * blockSize

t = np.arange(0, numSamples, dtype=np.float32) / samplingFrequency

inputSignal = np.zeros([numberOfChannels, numSamples], dtype=np.float32)
inputSignal[0, :] = np.sin(2 * np.pi * 440 * t)
inputSignal[1, :] = 0.5 * np.sin(2 * np.pi * 880 * t)