Пример #1
0
def run():

    from os import path
    from acoular import __file__ as bpath, MicGeom, WNoiseGenerator, PointSource,\
     Mixer, WriteH5, TimeSamples, PowerSpectra, RectGrid, SteeringVector,\
     BeamformerBase, L_p
    from pylab import figure, plot, axis, imshow, colorbar, show

    # set up the parameters
    sfreq = 51200
    duration = 1
    nsamples = duration * sfreq
    micgeofile = path.join(path.split(bpath)[0], 'xml', 'array_64.xml')
    h5savefile = 'three_sources.h5'

    # generate test data, in real life this would come from an array measurement
    mg = MicGeom(from_file=micgeofile)
    n1 = WNoiseGenerator(sample_freq=sfreq, numsamples=nsamples, seed=1)
    n2 = WNoiseGenerator(sample_freq=sfreq,
                         numsamples=nsamples,
                         seed=2,
                         rms=0.7)
    n3 = WNoiseGenerator(sample_freq=sfreq,
                         numsamples=nsamples,
                         seed=3,
                         rms=0.5)
    p1 = PointSource(signal=n1, mics=mg, loc=(-0.1, -0.1, 0.3))
    p2 = PointSource(signal=n2, mics=mg, loc=(0.15, 0, 0.3))
    p3 = PointSource(signal=n3, mics=mg, loc=(0, 0.1, 0.3))
    pa = Mixer(source=p1, sources=[p2, p3])
    wh5 = WriteH5(source=pa, name=h5savefile)
    wh5.save()

    # analyze the data and generate map

    ts = TimeSamples(name=h5savefile)
    ps = PowerSpectra(time_data=ts, block_size=128, window='Hanning')

    rg = RectGrid( x_min=-0.2, x_max=0.2, y_min=-0.2, y_max=0.2, z=0.3, \
    increment=0.01 )
    st = SteeringVector(grid=rg, mics=mg)

    bb = BeamformerBase(freq_data=ps, steer=st)
    pm = bb.synthetic(8000, 3)
    Lm = L_p(pm)

    # show map
    imshow( Lm.T, origin='lower', vmin=Lm.max()-10, extent=rg.extend(), \
    interpolation='bicubic')
    colorbar()

    # plot microphone geometry
    figure(2)
    plot(mg.mpos[0], mg.mpos[1], 'o')
    axis('equal')

    show()
Пример #2
0
def get_sql_sources(cursor, sources_id, sfreq, mpos):
    # sources_id -> id für eine Quellkartierung mit bestimmten Quellen
    # source_id -> id jeder einzelnen source
    # source ids -> liste aller ids der einzelnen sources einer sources id
    source_ids = fetchall(cursor, get_source_ids, sources_id)
    cursor.execute(sq(get_ap, 1))
    (ap) = cursor.fetchone()
    sourcelist = []  # list
    for source_id in source_ids:
        cursor.execute(
            sq(get_source, sources_id,
               source_id))  # get the source parameters one after the other
        (signal_id, x1, x2, x3, pol_type, dipole_id, p_rms) = cursor.fetchone()
        # generate harmonic or noise source...
        sgnl = WNoiseGenerator(rms=p_rms,
                               sample_freq=sfreq,
                               numsamples=512000,
                               seed=(signal_id - 1))
        #    newsrc = PointSource(signal = sgnl,
        #                         mpos = m_error,
        #                         loc = (x1*ap, x2*ap, x3*ap))
        newsrc = PointSource(signal=sgnl,
                             mpos=mpos,
                             loc=(x1 * ap[0], x2 * ap[0], x3 * ap[0]))
        sourcelist.append(newsrc)  # add the new source to the list
    if len(source_ids
           ) > 1:  # if there are multiple sources, they have to be mixed
        src = Mixer(source=sourcelist[0], sources=sourcelist[1:])
    else:  # if there's only one source, it is the only one in the list
        src = sourcelist[0]
    return src
Пример #3
0
    def test_timeconvolve(self):
        """compare results of timeconvolve with numpy convolve"""
        # Parameters
        NSAMPLES = 25
        N1 = WNoiseGenerator(sample_freq=1000, numsamples=NSAMPLES, seed=1)
        MGEOM = MicGeom(mpos_tot=[[1], [1], [1]])
        P1 = PointSource(signal=N1, mics=MGEOM)
        KERNEL = np.random.rand(20)
        CONV = TimeConvolve(kernel=KERNEL, source=P1)

        SIG = tools.return_result(P1, num=NSAMPLES)
        RES = tools.return_result(CONV, num=100)

        for i in range(P1.numchannels):
            REF = np.convolve(np.squeeze(KERNEL), np.squeeze(SIG[:, i]))
            np.testing.assert_allclose(np.squeeze(RES[:, i]),
                                       REF,
                                       rtol=1e-5,
                                       atol=1e-8)
Пример #4
0
TimeCache, FiltOctave, BeamformerTime, TimePower, IntegratorSectorTime, \
PointSource, MovingPointSource, SineGenerator, WNoiseGenerator, Mixer, WriteWAV

from pylab import subplot, imshow, show, colorbar, plot, transpose, figure, \
psd, axis, xlim, ylim, title, tight_layout, text

freq = 48000
sfreq = freq / 2  #sampling frequency
duration = 1
nsamples = freq * duration
datafile = 'cry_n0000001.wav'
micgeofile = path.join(path.split(bpath)[0], 'xml', 'array_64_8mic.xml')
h5savefile = 'cry_n0000001.wav'
m = MicGeom(from_file=micgeofile)
n = WNoiseGenerator(sample_freq=sfreq, numsamples=nsamples, seed=1)  #1pascal
p = PointSource(signal=n, mpos=m, loc=(-0.1, -0.1, 0.3))
p1 = Mixer(source=p)
wh5 = WriteH5(source=p, name=h5savefile)
wh5.save()

#definition the different source signal
r = 52.5
nsamples = long(sfreq * 0.3)
n1 = WNoiseGenerator(sample_freq=sfreq, numsamples=nsamples)
s1 = SineGenerator(sample_freq=sfreq, numsamples=nsamples, freq=freq)
s2 = SineGenerator(sample_freq=sfreq, numsamples=nsamples, freq=freq, \
    phase=pi)

#define a circular array of 8 microphones
m = MicGeom('array_64_8mic.xml')
m.mpos_tot = array([(r*sin(2*pi*i+pi/8), r*cos(2*pi*i+pi/8), 0) \
Пример #5
0
 Mixer, WriteH5, TimeSamples, PowerSpectra, RectGrid, BeamformerBase, L_p
from pylab import figure, plot, axis, imshow, colorbar, show

# set up the parameters
sfreq = 51200 
duration = 1
nsamples = duration*sfreq
micgeofile = path.join(path.split(bpath)[0],'xml','array_64.xml')
h5savefile = 'three_sources.h5'

# generate test data, in real life this would come from an array measurement
mg = MicGeom( from_file=micgeofile )
n1 = WNoiseGenerator( sample_freq=sfreq, numsamples=nsamples, seed=1 )
n2 = WNoiseGenerator( sample_freq=sfreq, numsamples=nsamples, seed=2, rms=0.7 )
n3 = WNoiseGenerator( sample_freq=sfreq, numsamples=nsamples, seed=3, rms=0.5 )
p1 = PointSource( signal=n1, mpos=mg,  loc=(-0.1,-0.1,0.3) )
p2 = PointSource( signal=n2, mpos=mg,  loc=(0.15,0,0.3) )
p3 = PointSource( signal=n3, mpos=mg,  loc=(0,0.1,0.3) )
pa = Mixer( source=p1, sources=[p2,p3] )
wh5 = WriteH5( source=pa, name=h5savefile )
wh5.save()

# analyze the data and generate map
ts = TimeSamples( name=h5savefile )
ps = PowerSpectra( time_data=ts, block_size=128, window='Hanning' )
rg = RectGrid( x_min=-0.2, x_max=0.2, y_min=-0.2, y_max=0.2, z=0.3, \
increment=0.01 )
bb = BeamformerBase( freq_data=ps, grid=rg, mpos=mg )
pm = bb.synthetic( 8000, 3 )
Lm = L_p( pm )
Пример #6
0
from numpy import array
from pylab import figure,plot,show,xlim,ylim,xscale,xticks,xlabel,ylabel,\
    grid,real, title, legend

# constants
sfreq = 12800  # sample frequency
band = 3  # octave: 1 ;   1/3-octave: 3 (for plotting)

# set up microphone at (0,0,0)
m = MicGeom()
m.mpos_tot = array([[0, 0, 0]])

# create noise source
n1 = WNoiseGenerator(sample_freq=sfreq, numsamples=10 * sfreq, seed=1)

t = PointSource(signal=n1, mics=m, loc=(1, 0, 1))

# create power spectrum
f = PowerSpectra(time_data=t, window='Hanning', overlap='50%', block_size=4096)

# get spectrum data
spectrum_data = real(f.csm[:, 0,
                           0])  # get power spectrum from cross-spectral matrix
freqs = f.fftfreq()  # FFT frequencies

# use barspectrum from acoular.tools to create third octave plot data
(f_borders, p, f_center) = barspectrum(spectrum_data, freqs, band, bar=True)
(f_borders_, p_, f_center_) = barspectrum(spectrum_data,
                                          freqs,
                                          band,
                                          bar=False)
Пример #7
0
#===============================================================================
if sys.version_info > (3, ):
    long = int
nsamples = long(sfreq * tmax)
n1 = WNoiseGenerator(sample_freq=sfreq, numsamples=nsamples)
s1 = SineGenerator(sample_freq=sfreq, numsamples=nsamples, freq=freq)
s2 = SineGenerator(sample_freq=sfreq, numsamples=nsamples, freq=freq, \
    phase=pi)

#===============================================================================
# define the moving source and one fixed source
#===============================================================================

p0 = MovingPointSource(signal=s1, mpos=m, trajectory=tr1)
#t = p0 # use only moving source
p1 = PointSource(signal=n1, mpos=m, loc=(0, R, Z))
t = Mixer(source=p0, sources=[
    p1,
])  # mix both signals
#t = p1 # use only fix source

# uncomment to save the signal to a wave file
#ww = WriteWAV(source = t)
#ww.channels = [0,14]
#ww.save()

#===============================================================================
# fixed focus frequency domain beamforming
#===============================================================================

f = PowerSpectra(time_data=t, window='Hanning', overlap='50%', block_size=128, \
micgeofile = path.join(path.split(bpath)[0], 'xml', 'array_64.xml')
# generate test data, in real life this would come from an array measurement
m = MicGeom(from_file=micgeofile)

#===============================================================================
# Now, the sources (signals and types/positions) are defined.
#===============================================================================
sfreq = 51200
duration = 1
nsamples = duration * sfreq

n1 = WNoiseGenerator(sample_freq=sfreq, numsamples=nsamples, seed=1)
n2 = WNoiseGenerator(sample_freq=sfreq, numsamples=nsamples, seed=2, rms=0.5)
n3 = WNoiseGenerator(sample_freq=sfreq, numsamples=nsamples, seed=3, rms=0.25)
p1 = PointSource(signal=n1, mics=m, loc=(-0.1, -0.1, 0.3))
p2 = PointSource(signal=n2, mics=m, loc=(0.15, 0, 0.17))
p3 = PointSource(signal=n3, mics=m, loc=(0, 0.1, 0.25))
pa = SourceMixer(sources=[p1, p2, p3])

#===============================================================================
# the 3D grid (very coarse to enable fast computation for this example)
#===============================================================================

g = RectGrid3D(x_min=-0.2,
               x_max=0.2,
               y_min=-0.2,
               y_max=0.2,
               z_min=0.1,
               z_max=0.36,
               increment=0.02)
Пример #9
0
#===============================================================================

ufe = UniformFlowEnvironment(ma=0.5, fdv=(0, 1, 0))
g = RectGrid(x_min=-3.0,
             x_max=+3.0,
             y_min=-3.0,
             y_max=+3.0,
             z=Z,
             increment=0.2)

#===============================================================================
# define some sources
#===============================================================================

mp = MovingPointSource(signal=s1, env=ufe, mpos=m, trajectory=tr1)
ps = PointSource(signal=s1, mpos=m, loc=(0, R, Z))
pd = PointSourceDipole(signal=s1,
                       mpos=m,
                       direction=(0.5, 0, 0),
                       loc=(0, -2, Z))
un = UncorrelatedNoiseSource(signal=n1, mpos=mg)
mix = SourceMixer(sources=[ps, pd])

#===============================================================================
# 3rd Octave Filter
#===============================================================================

fi = FiltFiltOctave(source=un, band=freq, fraction='Third octave')

#==============================================================================
#write to H5 File
Пример #10
0
nsamples = duration * sfreq  # 采样点个数
#micgeofile = path.join(path.split(bpath)[0], 'xml', 'MyMicArray_56.xml')  # 麦克风阵列的位置数据
micgeofile = '/home3/zengwh/VoiceRec/data/compute/MyMicArray_56.xml'
num_single_sound_data = 100  # 单声源训练集数目
m = MicGeom(from_file=micgeofile)

# 设置声源位置信息和声压强度
# n2 = WNoiseGenerator(sample_freq=sfreq, numsamples=nsamples, seed=2, rms=0.7)
# n3 = WNoiseGenerator(sample_freq=sfreq, numsamples=nsamples, seed=3, rms=0.5)
for i in range(num_single_sound_data):
    print(i)
    [Rms] = np.around(np.random.random(1), 2)
    n1 = WNoiseGenerator(sample_freq=sfreq, numsamples=nsamples, rms=Rms)
    # 随机产生0-1的数作为声源坐标并进行四舍五入操作
    [source_x, source_y] = np.dot(np.around(np.random.random(2), 2), 3) - 1.5
    p1 = PointSource(signal=n1, mpos=m, loc=(source_x, source_y, 2.5))
    # p2 = PointSource(signal=n2, mpos=m,  loc=(0.15,0,0.3))
    # p3 = PointSource(signal=n3, mpos=m,  loc=(0,0.1,0.3))
    # p = Mixer(source = p1, sources = [p2])
    p = Mixer(source=p1)
    # 保存文件名
    os.makedirs(
        '/home3/zengwh/VoiceRec/data/compute/100000_data/Val/one_source',
        exist_ok=True)
    h5savefile = '/home3/zengwh/VoiceRec/data/compute/100000_data/Val/one_source/x_{:.2f}_y_{:.2f}_rms_{:.2f}_sources.h5'.format(
        source_x, source_y, Rms)
    wh5 = WriteH5(source=p, name=h5savefile)
    #print(wh5)
    wh5.save()
    h5 = h5py.File(h5savefile)
    h5 = h5['time_data'][::50, :]