Esempio n. 1
0
    # d_critical distance with defined angular position
    azi = azi + np.pi  # TODO: fix in srs library!!!
    src_sph = np.array([azi, np.pi / 2 - incl, d_critical.mean()])
    src_cart = masp.sph2cart(src_sph)
    src = rec + src_cart
    nSrc = src.shape[0]

    # SH orders for receivers
    rec_orders = np.array([1])

    maxlim = rt60_0  # just stop if the echogram goes beyond that time ( or just set it to max(rt60) )
    limits = np.ones(nBands) * maxlim  # hardcoded!

    abs_echograms = srs.compute_echograms_sh(room, src, rec, abs_wall, limits,
                                             rec_orders)
    irs = srs.render_rirs_sh(abs_echograms, band_centerfreqs, fs).squeeze().T
    # Normalize as SN3D
    irs *= np.sqrt(4 * np.pi)
    irs *= np.asarray([1, 1. / np.sqrt(3), 1. / np.sqrt(3),
                       1. / np.sqrt(3)])[:, np.newaxis]  ## ACN, SN3D

    # %% SYNTHESIZE AUDIOS

    af = audio_files[1]
    # af = '/Volumes/Dinge/audio/410298__inspectorj__voice-request-26b-algeria-will-rise-again-serious.wav'
    #
    # # Open audio files and encode into ambisonics
    audio_file_length_samples = int(audio_file_length * fs)

    mono_s_t = librosa.core.load(af, sr=fs,
                                 mono=True)[0][:audio_file_length_samples]
Esempio n. 2
0
tic = time.time()

maxlim = 1.5 # just stop if the echogram goes beyond that time ( or just set it to max(rt60) )
limits = np.minimum(rt60, maxlim)

# Compute echograms
# abs_echograms, rec_echograms, echograms = srs.compute_echograms_sh(room, src, rec, abs_wall, limits, rec_orders)
abs_echograms = srs.compute_echograms_sh(room, src, rec, abs_wall, limits, rec_orders)

# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# RENDERING

# In this case all the information (e.g. SH directivities) are already
# encoded in the echograms, hence they are rendered directly to discrete RIRs
fs = 48000
sh_rirs = srs.render_rirs_sh(abs_echograms, band_centerfreqs, fs)

toc = time.time()
print('Elapsed time is ' + str(toc-tic) + 'seconds.')


# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# GENERATE SOUND SCENES
# Each source is convolved with the respective mic IR, and summed with
# the rest of the sources to create the microphone mixed signals


sourcepath = '../../data/milk_cow_blues_4src.wav'
src_sigs = librosa.core.load(sourcepath, sr=None, mono=False)[0].T[:,:nSrc]

sh_sigs = srs.apply_source_signals_sh(sh_rirs, src_sigs)
Esempio n. 3
0
    # Critical distance for the room
    abs_wall = srs.find_abs_coeffs_from_rt(room, np.asarray([rt60]))[0]
    _, d_critical, _ = srs.room_stats(room, abs_wall, verbose=False)

    # Random source position, half critical distance
    azi = np.random.rand() * 2 * np.pi
    incl = np.random.rand() * np.pi
    azi = azi + np.pi # TODO: fix in srs library!!!
    src_sph = np.array([azi, np.pi/2-incl, d_critical.mean()/2])
    src_cart = masp.sph2cart(src_sph)
    src = rec + src_cart
    nSrc = src.shape[0]

    # Render echogram
    abs_echograms = srs.compute_echograms_sh(room, src, rec, abs_wall, limits, rec_orders)
    irs = srs.render_rirs_sh(abs_echograms, rt60_f, fs).squeeze().T
    # Normalize as SN3D
    irs *= np.sqrt(4 * np.pi)

    # Write audio file
    audio_file_name = str(i)+'.wav'
    audio_file_path = os.path.join(IR_folder_path, audio_file_name)
    sf.write(audio_file_path, irs.T, samplerate=fs)

    # Write metadata file
    metadata_file_name = str(i)+'.csv'
    metadata_file_path = os.path.join(IR_folder_path, metadata_file_name)
    with open(metadata_file_path, 'w', newline='') as file:
        writer = csv.writer(file)
        writer.writerow(["azi", (azi - np.pi)*360/(2*np.pi)])
        writer.writerow(["ele", (np.pi/2 - incl)*360/(2*np.pi)])