def __init__( self, context, name, parent, positions, # Either #points x 3 or 1x3 matrix of Cartesian object positions. updateRateSamples=None, objectId=0, groupId=0, priority=0, objectLevel=1.0, objectChannel=None, diffuseness=None): super(PointSourceTrajectoryGenerator, self).__init__(context, name, parent) if updateRateSamples % self.period() != 0: raise ValueError( "TrajectoryGenerator: The update rate must be a multiple of the period." ) self.updateCycles = updateRateSamples // self.period() self.positions = positions self.numPositions = self.positions.shape[1] self.objectOutput = visr.ParameterOutput( "objectVectorOutput", self, parameterType=pml.ObjectVector.staticType, protocolType=pml.DoubleBufferingProtocol.staticType, parameterConfig=pml.EmptyParameterConfig()) self.cycleCounter = 0 self.positionCounter = 0 if diffuseness is None: self.object = om.PointSource(objectId) else: ValueError("Diffuse point sources not currently supported.") self.object.position = positions[:, 0] self.object.level = objectLevel self.groupId = groupId self.object.priority = priority if objectChannel is None: objectChannel = objectId self.object.channels = [objectChannel]
paramInput = flow.parameterReceivePort('objectVector') if useTracking: trackingInput = flow.parameterReceivePort("tracking") inputSignal = np.zeros((numBinauralObjects, signalLength), dtype=np.float32) inputSignal[0, :] = 0.75 * np.sin(2.0 * np.pi * 440 * t) outputSignal = np.zeros((numOutputChannels, signalLength), dtype=np.float32) numPos = 360 / 5 azSequence = (2.0 * np.pi) / numPos * np.arange(0, numPos) az = 0 el = 0 r = 1 ps1 = objectmodel.PointSource(0) ps1.position = sph2cart(np.array([az, el, r])) ps1.level = 0.005 ps1.channels = [ps1.objectId] pw1 = objectmodel.PlaneWave(1) pw1.azimuth = az pw1.elevation = el pw1.referenceDistance = r pw1.level = 0.005 pw1.groupId = 5 pw1.priority = 5 pw1.channels = [pw1.objectId] ov = paramInput.data() ov.set([ps1, pw1])
objectInput = flow.parameterReceivePort('objectIn') controlInput = flow.parameterReceivePort('controlIn') inputSignal = np.zeros((2, signalLength), dtype=np.float32) inputSignal[0, :] = 0.75 * np.sin(2.0 * np.pi * 440 * t) outputSignal = np.zeros((numberOfOutputs, signalLength), dtype=np.float32) for blockIdx in range(0, numBlocks): if blockIdx % (parameterUpdatePeriod / blockSize) == 0: az = 0.025 * blockIdx el = 0.1 * np.sin(0.025 * blockIdx) r = 1 x, y, z = sph2cart(az, el, r) ps1 = om.PointSource(0) ps1.x = x ps1.y = y ps1.z = z ps1.level = 0.5 ps1.groupId = 5 ps1.priority = 5 ps1.resetNumberOfChannels(1) ps1.setChannelIndex(0, ps1.objectId) ov = objectInput.data() # ov.clear() ov.insert(ps1) objectInput.swapBuffers() renderSelect = int(blockIdx / (parameterUpdatePeriod / blockSize)) % 2
azSequence = (2.0*np.pi)/numPos * np.arange( 0, numPos ) filters = np.zeros( (numPos,2,filterLength) ) if dynamicITD: delays = np.zeros( (2,numPos) ) if dynamicILD: gains = np.zeros( (2,numPos) ) for blockIdx in range(0,numPos): az = azSequence[blockIdx] x = np.cos( az ) y = np.sin( az ) z = 0 ps1 = om.PointSource( 0 ) ps1.position = [x,y,z] ps1.level = 0.25 ps1.channels = [0] ps2 = om.PointSource( 1 ) ps2.position = [-x,-y,z] ps2.level = 0.25 ps2.channels = [1] objectInput.data().set( [ps1, ps2] ) objectInput.swapBuffers() if headTrackEnabled: headrotation = np.pi/2; trackingInput.data().orientation = [0,0, -headrotation] #rotates over the z axis, that means that the rotation is on the xy plane
trackingInput = flow.parameterReceivePort("tracking") inputSignal = np.zeros((numObjects, signalLength), dtype=np.float32) inputSignal[0, :] = 0.75 * np.sin(2.0 * np.pi * 440 * t) outputSignal = np.zeros((numOutputChannels, signalLength), dtype=np.float32) numPos = 360 / 5 azSequence = (2.0 * np.pi) / numPos * np.arange(0, numPos) ov = paramInput.data() for idx in range(numObjects): az = 0 el = 0 r = 1 ps = om.PointSource(idx) ps.position = sph2cart(np.asarray([az, el, r])) ps.level = 0.5 ps.groupId = 5 ps.priority = 5 ps.channels = [idx] ov.insert(ps) paramInput.swapBuffers() start = time() for blockIdx in range(0, numBlocks): if useSourceMovement: az = azSequence[int(blockIdx % numPos)] el = 0
oscControlPort=oscMessages, jsonControlPort=jsonMessages) flow = rrl.AudioSignalFlow(mda) objInput = flow.parameterReceivePort("objectIn") if oscMessages: oscInput = flow.parameterReceivePort("oscControlIn") if jsonMessages: jsonInput = flow.parameterReceivePort("jsonControlIn") objOutput = flow.parameterSendPort("objectOut") ps0 = om.PointSource(0) ps0.level = 1.0 ps0.position = [-2, -3, 0.5] ps1 = om.PointSource(1) ps1.level = 1.0 ps1.position = [-4, 1, -0.5] newVolume = 6.02 if oscMessages: msg = OSC.OSCMessage("/groupVolume/narrator") msg.append(float(-6.0)) binMsg = msg.getBinary() msgParam = pml.StringParameter(binMsg) oscInput.enqueue(msgParam)
trackingInput = flow.parameterReceivePort('tracking') inputSignal = np.zeros((numBinauralObjects, signalLength), dtype=np.float32) inputSignal[0, :] = 0.75 * np.sin(2.0 * np.pi * 440 * t) outputSignal = np.zeros((numOutputChannels, signalLength), dtype=np.float32) numPos = 360 / 5 azSequence = (2.0 * np.pi) / numPos * np.arange(0, numPos) ov = paramInput.data() for idx in range(numBinauralObjects): az = 0 el = 0 r = 1 ps = objectmodel.PointSource(idx) ps.position = sph2cart(np.asarray([az, el, r])) ps.level = 0.5 ps.groupId = 5 ps.priority = 5 ps.channels = [idx] ov.insert(ps) paramInput.swapBuffers() start = time.time() for blockIdx in range(0, numBlocks): if useSourceMovement: az = azSequence[int(blockIdx % numPos)] el = 0
None, 'c:/local/dev/metadapter/config/radio_drama_adaptation_demo.xml', objectVectorInput=True, objectVectorOutput=objectVectorOutput, oscControlPort=oscMessages) flow = rrl.AudioSignalFlow(mda) objInput = flow.parameterReceivePort("objectIn") if oscMessages: oscInput = flow.parameterReceivePort("oscControlIn") objOutput = flow.parameterSendPort("objectOut") ps = om.PointSource(0) ps.level = 0.375 ps.position = [-2, -3, 0.5] if oscMessages: msg = OSC.OSCMessage("/groupVolume/narrator") msg.append(float(-6.0)) binMsg = msg.getBinary() msgParam = pml.StringParameter(binMsg) oscInput.enqueue(msgParam) objInput.data().set([ps]) objInput.swapBuffers() flow.process()
paramOutputL2 = flowL2.parameterSendPort('gains') # %% Define a number of object positions in speherical coordinates. # Here we define a set of positions in the horizontal plane with 1 degree distance. az = np.linspace(0.0, 2 * np.pi, numBlocks) el = 10.0 * np.pi / 180.0 # %% Preallocate a matrix of output gains (#numLsp x #directions) gainsVbap = np.zeros((numBlocks, numLsp)) gainsL2 = np.zeros((numBlocks, numLsp)) # %% Run the simulation as a number of iterations, where a new source position # is set in each iteration. for bi in range(0, numBlocks): # Create a point source with a givel azimuth and elevation. ps1 = om.PointSource(0) # 0 is the source id. ps1.position = sph2cart(az[bi], el, 1.0) # Set other object properties. ps1.channels = [0] ps1.level = 1.0 # Send the point source as input to the VBAP panning gain calculator. ovVbap = paramInputVbap.data() ovVbap.set([ps1]) # Trigger sending of a new parameter value./ paramInputVbap.swapBuffers() # Run the signal flow graph for one iteration. flowVbap.process() # Obtain the computed gains from the parameter output port of the signal flow. gainsVbap[bi, :] = np.array(paramOutputVbap.data())[:, 0]