Пример #1
0
def run():

    from os import path
    from acoular import __file__ as bpath, MicGeom, WNoiseGenerator, PointSource,\
     Mixer, WriteH5, TimeSamples, PowerSpectra, RectGrid, SteeringVector,\
     BeamformerBase, L_p
    from pylab import figure, plot, axis, imshow, colorbar, show

    # set up the parameters
    sfreq = 51200
    duration = 1
    nsamples = duration * sfreq
    micgeofile = path.join(path.split(bpath)[0], 'xml', 'array_64.xml')
    h5savefile = 'three_sources.h5'

    # generate test data, in real life this would come from an array measurement
    mg = MicGeom(from_file=micgeofile)
    n1 = WNoiseGenerator(sample_freq=sfreq, numsamples=nsamples, seed=1)
    n2 = WNoiseGenerator(sample_freq=sfreq,
                         numsamples=nsamples,
                         seed=2,
                         rms=0.7)
    n3 = WNoiseGenerator(sample_freq=sfreq,
                         numsamples=nsamples,
                         seed=3,
                         rms=0.5)
    p1 = PointSource(signal=n1, mics=mg, loc=(-0.1, -0.1, 0.3))
    p2 = PointSource(signal=n2, mics=mg, loc=(0.15, 0, 0.3))
    p3 = PointSource(signal=n3, mics=mg, loc=(0, 0.1, 0.3))
    pa = Mixer(source=p1, sources=[p2, p3])
    wh5 = WriteH5(source=pa, name=h5savefile)
    wh5.save()

    # analyze the data and generate map

    ts = TimeSamples(name=h5savefile)
    ps = PowerSpectra(time_data=ts, block_size=128, window='Hanning')

    rg = RectGrid( x_min=-0.2, x_max=0.2, y_min=-0.2, y_max=0.2, z=0.3, \
    increment=0.01 )
    st = SteeringVector(grid=rg, mics=mg)

    bb = BeamformerBase(freq_data=ps, steer=st)
    pm = bb.synthetic(8000, 3)
    Lm = L_p(pm)

    # show map
    imshow( Lm.T, origin='lower', vmin=Lm.max()-10, extent=rg.extend(), \
    interpolation='bicubic')
    colorbar()

    # plot microphone geometry
    figure(2)
    plot(mg.mpos[0], mg.mpos[1], 'o')
    axis('equal')

    show()
Пример #2
0
    def get_acoular_essentials(self):

        #Set the mic array geometry
        mg = MicGeom(from_file=self.array_arrngmnt)

        #Set rectangular plane and grid parameters for Acoular
        self.set_grid()
        rg = RectGrid(x_min=self.x_min_grid, x_max=self.x_max_grid, y_min=self.y_min_grid, y_max=self.y_max_grid, z=self.distance, \
            increment=self.grid_increment)

        st = SteeringVector(grid=rg, mics=mg)

        return mg, rg, st
Пример #3
0
    def test_timeconvolve(self):
        """compare results of timeconvolve with numpy convolve"""
        # Parameters
        NSAMPLES = 25
        N1 = WNoiseGenerator(sample_freq=1000, numsamples=NSAMPLES, seed=1)
        MGEOM = MicGeom(mpos_tot=[[1], [1], [1]])
        P1 = PointSource(signal=N1, mics=MGEOM)
        KERNEL = np.random.rand(20)
        CONV = TimeConvolve(kernel=KERNEL, source=P1)

        SIG = tools.return_result(P1, num=NSAMPLES)
        RES = tools.return_result(CONV, num=100)

        for i in range(P1.numchannels):
            REF = np.convolve(np.squeeze(KERNEL), np.squeeze(SIG[:, i]))
            np.testing.assert_allclose(np.squeeze(RES[:, i]),
                                       REF,
                                       rtol=1e-5,
                                       atol=1e-8)
Пример #4
0
def fbeampreparation():
    # Gradangaben von Theta im Intervall [0,180] statt wie bei DCASE [90,-90]
    M1 = spherical2cart(deg2rad(45),deg2rad(55),0.042)
    M2 = spherical2cart(deg2rad(315),deg2rad(125),0.042)
    M3 = spherical2cart(deg2rad(135),deg2rad(125),0.042)
    M4 = spherical2cart(deg2rad(225),deg2rad(55),0.042)
    mg = MicGeom()
    mg.mpos_tot = array([M1,M2,M3,M4]).T # add microphone positions to MicGeom object
    
    # define evaluation grid
    rg = SphericalGrid_Equiangular(NPOINTS_AZI, NPOINTS_ELE)
    st = SteeringVector(grid=rg, mics=mg)
    
    if DEBUG:
        firstframe = STARTFRAME
        lastframe = ENDFRAME
    else:
        firstframe = 0
        lastframe = 600
    
    return mg, rg, st, firstframe, lastframe
Пример #5
0
t1.stop = 16000  # last valid sample = 15999
invalid = [1, 7]  # list of invalid channels (unwanted microphones etc.)
t1.invalid_channels = invalid

#===============================================================================
# calibration is usually needed and can be set directly at the TimeSamples
# object (preferred) or for frequency domain processing at the PowerSpectra
# object (for backwards compatibility)
#===============================================================================
t1.calib = Calib(from_file=calibfile)

#===============================================================================
# the microphone geometry must have the same number of valid channels as the
# TimeSamples object has
#===============================================================================
m = MicGeom(from_file=micgeofile)
m.invalid_channels = invalid

#===============================================================================
# the grid for the beamforming map; a RectGrid3D class is also available
# (the example grid is very coarse)
#===============================================================================
g = RectGrid(x_min=-0.6,
             x_max=-0.0,
             y_min=-0.3,
             y_max=0.3,
             z=0.68,
             increment=0.05)

#===============================================================================
# for frequency domain methods, this provides the cross spectral matrix and its
Пример #6
0
MaskedTimeSamples, FiltFiltOctave, Trajectory, BeamformerTimeSq, TimeAverage, \
BeamformerTimeSqTraj,BeamformerCapon,BeamformerMusic,BeamformerDamas,BeamformerCMF,BeamformerClean,BeamformerFunctional,\
TimeCache, FiltOctave, BeamformerTime, TimePower, IntegratorSectorTime, \
PointSource, MovingPointSource, SineGenerator, WNoiseGenerator, Mixer, WriteWAV

from pylab import subplot, imshow, show, colorbar, plot, transpose, figure, \
psd, axis, xlim, ylim, title, tight_layout, text

freq = 48000
sfreq = freq / 2  #sampling frequency
duration = 1
nsamples = freq * duration
datafile = 'cry_n0000001.wav'
micgeofile = path.join(path.split(bpath)[0], 'xml', 'array_64_8mic.xml')
h5savefile = 'cry_n0000001.wav'
m = MicGeom(from_file=micgeofile)
n = WNoiseGenerator(sample_freq=sfreq, numsamples=nsamples, seed=1)  #1pascal
p = PointSource(signal=n, mpos=m, loc=(-0.1, -0.1, 0.3))
p1 = Mixer(source=p)
wh5 = WriteH5(source=p, name=h5savefile)
wh5.save()

#definition the different source signal
r = 52.5
nsamples = long(sfreq * 0.3)
n1 = WNoiseGenerator(sample_freq=sfreq, numsamples=nsamples)
s1 = SineGenerator(sample_freq=sfreq, numsamples=nsamples, freq=freq)
s2 = SineGenerator(sample_freq=sfreq, numsamples=nsamples, freq=freq, \
    phase=pi)

#define a circular array of 8 microphones
Пример #7
0
Copyright (c) 2006-2021 Acoular Development Team.
All rights reserved.
"""

from acoular import WNoiseGenerator, PointSource, PowerSpectra, MicGeom, L_p
from acoular.tools import barspectrum
from numpy import array
from pylab import figure,plot,show,xlim,ylim,xscale,xticks,xlabel,ylabel,\
    grid,real, title, legend

# constants
sfreq = 12800  # sample frequency
band = 3  # octave: 1 ;   1/3-octave: 3 (for plotting)

# set up microphone at (0,0,0)
m = MicGeom()
m.mpos_tot = array([[0, 0, 0]])

# create noise source
n1 = WNoiseGenerator(sample_freq=sfreq, numsamples=10 * sfreq, seed=1)

t = PointSource(signal=n1, mics=m, loc=(1, 0, 1))

# create power spectrum
f = PowerSpectra(time_data=t, window='Hanning', overlap='50%', block_size=4096)

# get spectrum data
spectrum_data = real(f.csm[:, 0,
                           0])  # get power spectrum from cross-spectral matrix
freqs = f.fftfreq()  # FFT frequencies
Пример #8
0
    ])


def cart2spherical_dcase(x, y, z):
    phi = arctan2(y, x) * 180 / pi
    theta = arccos(z / (sqrt(x**2 + y**2 + z**2))) * 180 / pi
    return array([phi, 90 - theta])


# Gradangaben von Theta im Intervall [0,180] statt wie bei DCASE [90,-90]
M1 = spherical2cart(deg2rad(45), deg2rad(55), 0.042)
M2 = spherical2cart(deg2rad(315), deg2rad(125), 0.042)
M3 = spherical2cart(deg2rad(135), deg2rad(125), 0.042)
M4 = spherical2cart(deg2rad(225), deg2rad(55), 0.042)

mg = MicGeom()
mg.mpos_tot = array([M1, M2, M3,
                     M4]).T  # add microphone positions to MicGeom object

# define evaluation grid
# Hier könntest du vielleicht eine neue Spherical Grid Klasse schreiben oder
# eine ArbitraryGrid Klasse, damit wir ein sinnvolles Gitter zur Lokalisierung
# verwenden können.
# Als Anregung siehe: https://spaudiopy.readthedocs.io/en/latest/spaudiopy.grids.html
#
rg = SphericalGrid_Equiangular(NPOINTS_AZI, NPOINTS_ELE)
st = SteeringVector(grid=rg, mics=mg)

# analyze the data and generate map
name = AUDIO_DIR + TRACK
ts = WavSamples(name=name, start=STARTFRAME * NUM, stop=ENDFRAME * NUM)
Пример #9
0
import numpy as np
from acoular import __file__ as bpath, config, WNoiseGenerator, PointSource, MicGeom

config.global_caching = "none"

# if this flag is set to True, new data will be simulated and
WRITE_NEW_REFERENCE_DATA = False
# new source results are generated for comparison during testing. Should always be False. Only set to
# true, if it is necessary to recalculate the data, due to wanted changes of the sources.

# Parameters
SFREQ = 1000
SEED = 1
NSAMPLES = 100
N1 = WNoiseGenerator(sample_freq=SFREQ, numsamples=NSAMPLES, seed=SEED)
MGEOM = MicGeom(mpos_tot=[[1], [1], [1]])


def get_source_result(Source, num=32):
    """
    returns the result for a given source

    Parameters
    ----------
    source : cls
        source class that is tested.
    num : int, optional
        number of samples to return. The default is 32.

    Returns
    -------
import acoular
from acoular import L_p, TimeSamples, Calib, MicGeom, EigSpectra,\
RectGrid3D, BeamformerBase, BeamformerFunctional, BeamformerEig, BeamformerOrth, \
BeamformerCleansc, BeamformerCapon, BeamformerMusic, BeamformerCMF, PointSpreadFunction, BeamformerClean, BeamformerDamas

# other imports
from os import path
#from mayavi import mlab
from numpy import amax
#from cPickle import dump, load
from pickle import dump, load

# see example3
t = TimeSamples(name='example_data.h5')
cal = Calib(from_file='example_calib.xml')
m = MicGeom(from_file=path.join(\
    path.split(acoular.__file__)[0], 'xml', 'array_56.xml'))
g = RectGrid3D(x_min=-0.6, x_max=-0.0, y_min=-0.3, y_max=0.3, \
    z_min=0.48, z_max=0.88, increment=0.1)
f = EigSpectra(time_data=t,
               window='Hanning',
               overlap='50%',
               block_size=128,
               ind_low=5,
               ind_high=15)
csm = f.csm[:]
eva = f.eva[:]
eve = f.eve[:]

#""" Creating the beamformers
bb1Rem = BeamformerBase(freq_data=f,
                        grid=g,
Пример #11
0
# construct the trajectory for the source
#===============================================================================

tr1 = Trajectory()
tmax = U / rps
delta_t = 1. / rps / 16.0  # 16 steps per revolution
for t in arange(0, tmax * 1.001, delta_t):
    i = t * rps * 2 * pi  #angle
    # define points for trajectory spline
    tr1.points[t] = (R * cos(i), R * sin(i), Z)  # anti-clockwise rotation

#===============================================================================
# define circular microphone array and load other array geometries
#===============================================================================

m = MicGeom()
# set 28 microphone positions
m.mpos_tot = array([(r*sin(2*pi*i+pi/4), r*cos(2*pi*i+pi/4), 0) \
    for i in linspace(0.0, 1.0, 28, False)]).T

mg_file = path.join(path.split(acoular.__file__)[0], 'xml', 'array_64.xml')
mg = MicGeom(from_file=mg_file)

#===============================================================================
# define the different signals
#===============================================================================

if sys.version_info > (3, ):
    long = int
nsamples = long(sfreq * tmax)
Пример #12
0
    import pyface.qt
except:
    os.environ['QT_API'] = 'pyqt'

# make sure that no OMP multithreading is used if OMP_NUM_THREADS is not defined
os.environ.setdefault('OMP_NUM_THREADS','1')

#from .fileimport import time_data_import, csv_import, td_import, \
#bk_mat_import, datx_import
try:
    from .nidaqimport import nidaq_import
except:
    pass

object_name.configure_traits()
m = MicGeom(from_file='UCA8.xml')



import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from os import path
import acoular
from acoular import L_p, Calib, MicGeom, TimeSamples, \
RectGrid, BeamformerBase, EigSpectra, BeamformerOrth, BeamformerCleansc, \
MaskedTimeSamples, FiltFiltOctave, BeamformerTimeSq, TimeAverage, \
TimeCache, BeamformerTime, TimePower, BeamformerCMF, \
BeamformerCapon, BeamformerMusic, BeamformerDamas, BeamformerClean, \
BeamformerFunctional
Пример #13
0
WRITE_NEW_REFERENCE_DATA = False
# new beamformer results are generated for comparison during testing. Should always be False. Only set to
# true, if it is necessary to recalculate the data, due to wanted changes of the Beamformers (or MovingPointSource).

# Parameters
FNAME = join('reference_data', 'beamformer_traj_time_data.h5')
SFREQ = 6000
SPEED = 10  # km/h
SEED = 1
D = .5
SOURCE_POS = (0.0, 0.0, D)
passby_dist = .5  # distance that the source is passing in front of array
CONV_AMP = True

# create linear mic geom
MGEOM = MicGeom()
N = 5
L = .5
MGEOM.mpos_tot = np.zeros((3, N), dtype=np.float64)
win = np.sin(np.arange(N) * np.pi / (N - 1))
b = 0.4
MGEOM.mpos_tot[0] = np.linspace(-L, L, N) * (1 - b) / (win * b + 1 - b)

# Monopole Trajectory
t_passby = passby_dist / SPEED / 3.6
nsamples = int(t_passby * SFREQ)
TRAJ = Trajectory()  # source center
TRAJ.points[0] = (-passby_dist / 2 + SOURCE_POS[0], SOURCE_POS[1],
                  SOURCE_POS[2])
TRAJ.points[t_passby] = (+passby_dist / 2, SOURCE_POS[1], SOURCE_POS[2])
Пример #14
0
    BeamformerOrth, RectGrid, MaskedTimeSamples, Sector,\
        UncorrelatedNoiseSource, SourceMixer, SamplesGenerator, BeamformerTimeTraj, BeamformerTimeSqTraj,\
            BeamformerCleantTraj, BeamformerCleantSqTraj, IntegratorSectorTime, MaskedTimeInOut, ChannelMixer,\
                SpatialInterpolator, SpatialInterpolatorRotation, SpatialInterpolatorConstantRotation, Mixer,\
                    WriteWAV, MergeGrid, FiltWNoiseGenerator, SphericalHarmonicSource, PointSource
from numpy import array
from unittest import TestCase

# a dictionary containing all classes that should change their digest on
# changes of the following trait types:
#   * List
#   * CArray
UNEQUAL_DIGEST_TEST_DICT = {
    #    "MicGeom.mpos_tot item assignment" : (MicGeom(mpos_tot=[[1.,2.,3.]]), "obj.mpos_tot[:] = 0."),
    "MicGeom.mpos_tot new array assignment":
    (MicGeom(mpos_tot=[[1., 2., 3.]]), "obj.mpos_tot = array([0.])"),
    #    "MicGeom.invalid_channels item assignment" : (MicGeom(mpos_tot=[[1.,2.,3.]],invalid_channels=[1]), "obj.invalid_channels[0] = 0"),
    "MicGeom.invalid_channels new list assignment":
    (MicGeom(mpos_tot=[[1., 2., 3.]],
             invalid_channels=[1]), "obj.invalid_channels = [0]"),
    # environments.py
    #    "UniformFlowEnvironment.fdv item assignment": (UniformFlowEnvironment(), "obj.fdv[0] = 0."),
    "UniformFlowEnvironment.fdv array assignment":
    (UniformFlowEnvironment(), "obj.fdv = array((0., 0., 0.))"),
    #    "SlotJet.origin item assignment": (SlotJet(), "obj.origin[0] = 1."),
    "SlotJet.origin array assignment":
    (SlotJet(), "obj.origin = array((1., 0., 0.))"),
    #    "SlotJet.flow item assignment": (SlotJet(), "obj.flow[0] = 0."),
    "SlotJet.flow array assignment": (SlotJet(),
                                      "obj.flow = array((0., 0., 0.))"),
    #    "SlotJet.plane item assignment": (SlotJet(), "obj.plane[0] = 1."),