def performMeasurement(n_runs, decay_time, noise_color, source_volume): # Load configuration file - this needs to be absolute path for exe file config_data = helpers.parseConfigFile(path=r'config.cfg') # Nick PC ## The system names for the NI devices can be found using the NI-MAX package which is installed as part of the DAQ-mx installation ## These names can be changed in NI-MAX, but need to be updated in the software configuration file ao_device_name = config_data[ 'aodevicename'] # Name of NI analog output device - 2 channel voltage output ai_device_name = config_data[ 'aidevicename'] # Name of NI analog input device - 8 channel voltage (microphone) input dio_device_name = config_data[ 'diodevicename'] # Name of NI digital input/output device # Measurement parameters in config file fs = int(config_data['samplingfrequency']) # Sampling frequency rise_time = float( config_data['risetime'] ) # Desired rise time for reverberation time measurements. This prevents sharp clips on sound sources excitation_time = float( config_data['excitationtime'] ) # Desired excitation time for reverberation time measurements mics = config_data['micid'] # IDs for microphones as listed in NI-MAX channel_name = config_data['micnames'] # Microphone names analog_output_id = config_data[ 'outputid'] # IDs for outputs as listed in NI-MAX storedSignal = config_data[ 'usestoredexcitation'] # Defines how to generate signal. If true a prebuilt psuedo-random signal is used, otherwise generates a new signal in code (slower) # Generate excitation array - use prebuilt excitation if this is set in config. # Typically building a new array is significantly slower and will yeild slightly less repeatable results # Both signals are non-correlated for each sound source if storedSignal == 'true': excitation_array1, t_array = helpers.usePreExcitation( rise_time=rise_time, excitation_time=excitation_time, decay_time=decay_time, fs=fs, noise_color="pink", signal_num=1) excitation_array2, t_array = helpers.usePreExcitation( rise_time=rise_time, excitation_time=excitation_time, decay_time=decay_time, fs=fs, noise_color="pink", signal_num=2) else: excitation_array1, t_array = helpers.createExcitation( rise_time=rise_time, excitation_time=excitation_time, decay_time=decay_time, fs=fs, noise_color=noise_color) excitation_array2, t_array = helpers.createExcitation( rise_time=rise_time, excitation_time=excitation_time, decay_time=decay_time, fs=fs, noise_color=noise_color) # Build 2d array to excite both sources using 2x analog outputs twoChanEx = (source_volume / 100) * np.vstack( [excitation_array1, excitation_array2]) # Calculate total measurement duration and corresponding number of samples t_tot = rise_time + excitation_time + decay_time N_samp = int(t_tot) * fs # Setup NI task and begin measurement with nidaqmx.task.Task("OutputTask") as ao_task, nidaqmx.task.Task( 'InputTask') as ai_task: print("Setting up generator") ################## Setup analogue output ################ for ao_channel in analog_output_id: ao_chan_name = ao_device_name + '/' + ao_channel # Build channel name based on config details print('AO channel name: {}'.format(ao_chan_name)) # Add an analog output channel ao_task.ao_channels.add_ao_voltage_chan( ao_chan_name, name_to_assign_to_channel=ao_channel, min_val=-3.0, max_val=3.0) # Setup tast timing - use fixed sample length ao_task.timing.cfg_samp_clk_timing(fs, sample_mode=AcquisitionType.FINITE, samps_per_chan=N_samp) # Store outgoing data on chassis but do not start measurement ao_task.write(twoChanEx, auto_start=False) ############ Setup microphone inputs ############# print("Setting up inputs") for micID, channel in zip(mics, channel_name): ai_chan_name = ai_device_name + '/' + micID # Build channel name based on config details print('AI channel name: {}'.format(ai_chan_name)) # Add an analog input channel ai_task.ai_channels.add_ai_microphone_chan( ai_chan_name, name_to_assign_to_channel=channel, mic_sensitivity=22.4, max_snd_press_level=140, units=nidaqmx.constants.SoundPressureUnits.PA) # Setup tast timing - use fixed sample length ai_task.timing.cfg_samp_clk_timing(fs, sample_mode=AcquisitionType.FINITE, samps_per_chan=N_samp) results = [] print("Starting measurements") for nxd in range(n_runs): print("Run: {}".format(nxd)) logger.add_text("Run: {}/{}".format(nxd + 1, n_runs)) # Start output signal and measurement ao_task.start() ai_task.start() # Wait until both tasks are complete ao_task.wait_until_done(timeout=t_tot + 5) ai_task.wait_until_done(timeout=t_tot + 5) # Record data from mic data = ai_task.read(number_of_samples_per_channel=N_samp) print('Shape of data: {}'.format(np.shape(data))) # Stop both tasks ao_task.stop() ai_task.stop() results.append(data) print("Measurement completed") print('Shape of results: {}'.format(np.shape(results))) # Store results in no array and return this data results_np = np.array(results) print(results_np) return results_np
def performMeasurement(n_runs, decay_time, noise_color): # Read configuration file - needs to be absolute path for exe file config_data = helpers.parseConfigFile( path=r'D:\Scripts\reverb-tkinter-interface\config.cfg') ao_device_name = config_data['aodevicename'] ai_device_name = config_data['aidevicename'] dio_device_name = config_data['diodevicename'] fs = int(config_data['samplingfrequency']) rise_time = float(config_data['risetime']) excitation_time = float(config_data['excitationtime']) # decay_time = float(config_data['decaytime']) mics = config_data['micid'] channel_name = config_data['micnames'] analog_output_id = config_data['outputid'] # Generate excitation array excitation_array, t_array = helpers.createExcitation( rise_time=rise_time, excitation_time=excitation_time, decay_time=decay_time, fs=fs, noise_color=noise_color) t_tot = rise_time + excitation_time + decay_time N_samp = int(t_tot) * fs # Setup NI task with nidaqmx.task.Task("OutputTask") as ao_task, nidaqmx.task.Task( 'InputTask') as ai_task: print("Setting up generator") ################## Setup analogue output ################ ao_chan_name = ao_device_name + '/' + analog_output_id print('AO channel name: {}'.format(ao_chan_name)) ao_task.ao_channels.add_ao_voltage_chan( ao_chan_name, name_to_assign_to_channel='speaker_output', min_val=-3.0, max_val=3.0) ao_task.timing.cfg_samp_clk_timing(fs, sample_mode=AcquisitionType.FINITE, samps_per_chan=N_samp) ao_task.write(excitation_array, auto_start=False) ############ Setup microphone inputs ############# print("Setting up inputs") for micID, channel in zip(mics, channel_name): ai_chan_name = ai_device_name + '/' + micID print('AI channel name: {}'.format(ai_chan_name)) ai_task.ai_channels.add_ai_microphone_chan( ai_chan_name, name_to_assign_to_channel=channel, mic_sensitivity=22.4, max_snd_press_level=140, units=nidaqmx.constants.SoundPressureUnits.PA) ai_task.timing.cfg_samp_clk_timing(fs, sample_mode=AcquisitionType.FINITE, samps_per_chan=N_samp) results = [] print("Starting measurements") for nxd in range(n_runs): print("Run: {}".format(nxd)) # Start output signal and measurement ao_task.start() ai_task.start() # Wait until both tasks are complete ao_task.wait_until_done(timeout=t_tot + 5) ai_task.wait_until_done(timeout=t_tot + 5) # Record data from mic data = ai_task.read(number_of_samples_per_channel=N_samp) # Stop both tasks ao_task.stop() ai_task.stop() results.append(data) print("Measurement completed") results_np = np.array(results) print(results_np) return results_np
def performRTcalculation(data, volume, temp, relativeHumidity, pressure, db_decay='t20', decay_time=5): # Set path to configuration file - needs to be absolute for executable # config_data = helpers.parseConfigFile(path=r'D:\Scripts\reverb-tkinter-interface\config.cfg') config_data = helpers.parseConfigFile( path=r'D:\ReverberationRoom\tkinter-interface\config.cfg') # Read all configuration data print('Reading config data from .config file') fs = int(config_data['samplingfrequency']) rise_time = float(config_data['risetime']) excitation_time = float(config_data['excitationtime']) mics = config_data['micid'] channel_name = config_data['micnames'] analog_output_id = config_data['outputid'] estRT = float(config_data['estimatedrt']) pRef = float(config_data['referencepressure']) p_ref = pRef n_mics = int(config_data['nummics']) window_time = float(config_data['windowtime']) fLow = int(config_data['flow']) fHigh = int(config_data['fhigh']) averaging_type = config_data['avgtype'] signal_type = config_data['usestoredexcitation'] # General parameters t_tot = rise_time + excitation_time + decay_time N_samp = int(t_tot) * fs dt = 1.0 / fs t_array = np.arange(start=0, stop=t_tot, step=dt) wT = estRT / 48 windowN = int(wT * fs) window_length = windowN print("Length of window: {}s : {} samples".format(window_time, windowN)) total_reverb = {} mic_location = [1, 2, 3, 4, 5, 6] # Perform ensemble averaging of individual mics for number of runs # averaged_data = helpers.ensemble_average(raw_data=data, n_mics=n_mics) print('Performing ensemble averaging at each microphone') mean_data = np.mean(a=data, axis=0) # Build pandas df for data print('Inserting data into pandas dataFrame') df = pd.DataFrame() rt_df = pd.DataFrame(columns=['frequency_Hz'] + mics) for row, mic in zip(mean_data, mics): print('Inserted mic: {}'.format(mic)) df['mean_{}'.format(mic)] = row # Step though mics and calculate RT for mic in mics: print('Calculating RT for mic: {}'.format(mic)) # Perform 3rd octave filters print( 'Applying 1/3rd Octave filters to data between {}Hz - {}Hz'.format( fLow, fHigh)) filtered_data = filterAndBands.thirdOctFilters( data=df['mean_{}'.format(mic)], fs=fs, f_low=fLow, f_high=fHigh) rt_df['frequency_Hz'] = filtered_data.keys() for key in filtered_data: df['{}_{}Hz'.format(mic, key)] = 10 * np.log10( (abs(filtered_data[key])**2) / (p_ref**2)) # df['{}_log_data'.format(mic)] = 10*np.log10(abs(df['mean_{}'.format(mic)])/p_ref) df['{}_samples'.format(mic)] = np.arange(len( df['mean_{}'.format(mic)])) df['{}_seconds'.format(mic)] = df['{}_samples'.format(mic)] / fs dummy_RT = [] for key in filtered_data: print("Averaging {}Hz band for {}".format(key, mic)) df['{}_{}Hz_exp'.format(mic, key)] = df['{}_{}Hz'.format( mic, key)].ewm(span=window_length, adjust=False).mean() # df['{}_{}Hz_lin'.format(mic, key)] = df['{}_{}Hz'.format(mic,key)].rolling(window=window_length).mean() windowed_data = df['{}_{}Hz_{}'.format(mic, key, averaging_type)].values t_array = df['{}_seconds'.format(mic)].values print('Seperating signal into sections for evaluation') excitation_range = [ int((rise_time) * fs), int((rise_time + excitation_time) * fs) ] excitation_level = filterAndBands.dBavg( windowed_data[excitation_range[0] + int(0.5 * fs):excitation_range[1] - int(0.5 * fs)]) trigger_level = excitation_level - 5 print('Excitation - 5dB: {}'.format(trigger_level)) min_decay_level = min(windowed_data[excitation_range[1]:]) bg_start = len(windowed_data) - 2 * fs bg_level = filterAndBands.dBavg(windowed_data[bg_start:]) if db_decay == 't20': bg_trigger_level = max([trigger_level - 20, bg_level + 5]) print('Trigger level - 20dB: {}'.format(bg_trigger_level)) elif db_decay == 't30': bg_trigger_level = max([trigger_level - 30, bg_level + 5]) print('Trigger level - 30dB: {}'.format(bg_trigger_level)) elif db_decay == 'all': bg_trigger_level = bg_level + 10 print('Background + 10dB: {}'.format(bg_trigger_level)) ndx_start = np.where( windowed_data[excitation_range[1]:] <= trigger_level) decay_start = ndx_start[0][0] print('Start of evaluation range {} seconds'.format( decay_start, decay_start / fs)) ndx_end = np.where( windowed_data[excitation_range[1]:] <= bg_trigger_level) decay_end = ndx_end[0][0] print('End of evaluation range {} samples / {} seconds'.format( decay_end, decay_end / fs)) print("Calculating RT of {}Hz band for {}".format(key, mic)) fitting_level = windowed_data[excitation_range[1] + decay_start:excitation_range[1] + decay_end] fitting_times = t_array[excitation_range[1] + decay_start:excitation_range[1] + decay_end] slope, intercept, r_value, p_value, std_err = stats.linregress( x=fitting_times, y=fitting_level) print('Slope: {}, intercept: {}, R-squared: {}'.format( slope, intercept, r_value)) t_plot = np.arange(start=0, stop=rise_time + excitation_time + decay_time, step=0.1) y_fitted = slope * t_plot + intercept print("R-squared: {}".format(r_value)) RT = -60 / slope dummy_RT.append(RT) print("Reverberation Time at {} Hz: {}s".format(key, RT)) rt_df[mic] = dummy_RT print('Reverb time prior to averaging') print(rt_df) rt_df = rt_df.set_index('frequency_Hz') rt_df['avg'] = rt_df.mean(axis=1) print('Averaged RT') print(rt_df) print('Calculating Absorption Area') a = [] for freq in rt_df.index.array: print('Calculating absorption area for {} Hz'.format(freq)) rt = rt_df.loc[freq] print(rt) rt = rt['avg'] print('Average RT: {}'.format(rt)) a.append( iso354.soundAbsorptionArea(V=volume, RT=rt, T=temp, f=int(freq), hr=relativeHumidity, Pa=pressure * 1000)) rt_df['abs_area'] = a return rt_df
import tkinter as tk import tkinter.ttk as ttk from tkinter import filedialog import os import pandas as pd import numpy as np import nidaqmx from nidaqmx.constants import AcquisitionType import helpers # config_data = helpers.parseConfigFile(path=r'D:\Scripts\reverb-tkinter-interface\config.cfg') # Rob PC config_data = helpers.parseConfigFile( path=r'D:\Scripts\reverb-tkinter-interface\config.cfg') # Scrum PC ao_device_name = config_data['aodevicename'] ai_device_name = config_data['aidevicename'] dio_device_name = config_data['diodevicename'] fs = int(config_data['samplingfrequency']) # decay_time = float(config_data['decaytime']) mics = config_data['micid'] channel_name = config_data['micnames'] t_tot = 1200 N_samp = int(t_tot) * fs n_runs = 1 ndx = 3 with nidaqmx.task.Task('InputTask') as ai_task: ############ Setup microphone inputs ############# print("Setting up inputs")
def performRTcalculation(data, volume, temp, relativeHumidity, pressure, db_decay='t20', decay_time=5): print("Shape of data into RT calc: {}".format(np.shape(data))) # Load configuration file - path needs to be absolute for executable to find it config_data = helpers.parseConfigFile(path=r'config.cfg') print(config_data) # Read all configuration data print('Reading config data from .config file') fs = int(config_data['samplingfrequency']) # Sampling frequency rise_time = float(config_data['risetime']) # Rise time for excitation excitation_time = float(config_data['excitationtime'] ) # Duration that excitation is played for mics = config_data['micid'] # IDs for microphones as listed in NI-MAX channel_name = config_data['micnames'] # Microphone names analog_output_id = config_data[ 'outputid'] # IDs for outputs as listed in NI-MAX estRT = float( config_data['estimatedrt'] ) # Estimated reverberation time for room - allows selection of filtering time pRef = float(config_data['referencepressure'] ) # Reference pressure value for conversion to dB p_ref = pRef n_mics = int( config_data['nummics']) # Number of microphones used in measurements window_length = float( config_data['windowlength']) # Desired length of window fLow = int( config_data['flow']) # Low frequency limit for 1/3rd octave bands fHigh = int( config_data['fhigh']) # High frequency limit for 1/3rd octave bands averaging_type = config_data[ 'avgtype'] # Select exponential or linear averaging signal_type = config_data[ 'usestoredexcitation'] # If a pre-calculated excitation signal was used in measurements # General parameters t_tot = rise_time + excitation_time + decay_time # Total duration of measurement N_samp = int(t_tot) * fs # Number of samples in measurement dt = 1.0 / fs # Sample spacing t_array = np.arange(start=0, stop=t_tot, step=dt) # Array of time points (seconds) windowN = int(window_length * fs) # Number of samples in window print("Length of window: {}s : {} samples".format(window_length, windowN)) # Setup loop parameters total_reverb = {} mic_location = [1, 2, 3, 4, 5, 6] # Perform ensemble averaging of individual mics for number of runs # averaged_data = helpers.ensemble_average(raw_data=data, n_mics=n_mics) print('Performing ensemble averaging at each microphone') print("Shape of data before ensemble average: {}".format(np.shape(data))) mean_data = np.mean(a=data, axis=0) print("Shape of data after ensemble average: {}".format( np.shape(mean_data))) # Build pandas df for data print('Inserting data into pandas dataFrame') df = pd.DataFrame() rt_df = pd.DataFrame(columns=['frequency_Hz'] + mics) for row, mic in zip(mean_data, mics): print('Inserted mic: {}'.format(mic)) df['mean_{}'.format(mic)] = row print(df) # Step though mics and calculate RT for mic in mics: print('Calculating RT for mic: {}'.format(mic)) logger.add_text('Calculating RT for mic: {}'.format(mic)) # Perform 3rd octave filters print( 'Applying 1/3rd Octave filters to data between {}Hz - {}Hz'.format( fLow, fHigh)) filtered_data = filterAndBands.thirdOctFilters( data=df['mean_{}'.format(mic)], fs=fs, f_low=fLow, f_high=fHigh) rt_df['frequency_Hz'] = filtered_data.keys() for key in filtered_data: df['{}_{}Hz'.format(mic, key)] = 10 * np.log10( (abs(filtered_data[key])**2) / (p_ref**2)) # df['{}_log_data'.format(mic)] = 10*np.log10(abs(df['mean_{}'.format(mic)])/p_ref) df['{}_samples'.format(mic)] = np.arange(len( df['mean_{}'.format(mic)])) df['{}_seconds'.format(mic)] = df['{}_samples'.format(mic)] / fs dummy_RT = [] for key in filtered_data: print("Averaging {}Hz band for {}".format(key, mic)) df['{}_{}Hz_exp'.format(mic, key)] = df['{}_{}Hz'.format( mic, key)].ewm(span=windowN, adjust=False).mean() print(df['{}_{}Hz_exp'.format(mic, key)]) # exit() # df['{}_{}Hz_lin'.format(mic, key)] = df['{}_{}Hz'.format(mic,key)].rolling(window=windowN).mean() windowed_data = df['{}_{}Hz_{}'.format(mic, key, averaging_type)].values print(windowed_data) # exit() t_array = df['{}_seconds'.format(mic)].values print('Seperating signal into sections for evaluation') excitation_range = [ int((rise_time) * fs), int((rise_time + excitation_time) * fs) ] print('Excitation range:') print(excitation_range) excitation_level = filterAndBands.dBavg( windowed_data[excitation_range[0] + int(0.5 * fs):excitation_range[1] - int(0.5 * fs)]) print('Excitation level: {}'.format(excitation_level)) trigger_level = excitation_level - 5 print('Decay trigger level: {}'.format(trigger_level)) min_decay_level = min(windowed_data[excitation_range[1]:]) print('Minimum decay level: {}'.format(min_decay_level)) bg_start = len(windowed_data) - 2 * fs print('Start of bg evaluation - last 2 sec: {}'.format(bg_start)) bg_level = filterAndBands.dBavg(windowed_data[bg_start:]) print('Background noise level: {}'.format(bg_level)) if db_decay == 't20': bg_trigger_level = max([trigger_level - 20, bg_level + 5]) print('Trigger level - 20dB: {}'.format(bg_trigger_level)) print('Headroom: {}'.format(bg_trigger_level - bg_level)) elif db_decay == 't30': bg_trigger_level = max([trigger_level - 30, bg_level + 5]) print('Trigger level - 30dB: {}'.format(bg_trigger_level)) print('Headroom: {}'.format(bg_trigger_level - bg_level)) elif db_decay == 'all': bg_trigger_level = bg_level + 10 print('Background + 10dB: {}'.format(bg_trigger_level)) print('Headroom: {}'.format(bg_trigger_level - bg_level)) ndx_start = np.where( windowed_data[excitation_range[1]:] <= trigger_level) decay_start = ndx_start[0][0] print('Start of evaluation range {} seconds'.format( decay_start, decay_start / fs)) ndx_end = np.where( windowed_data[excitation_range[1]:] <= bg_trigger_level) decay_end = ndx_end[0][0] print('End of evaluation range {} samples / {} seconds'.format( decay_end, decay_end / fs)) # exit() print("Calculating RT of {}Hz band for {}".format(key, mic)) fitting_level = windowed_data[excitation_range[1] + decay_start:excitation_range[1] + decay_end] print("Fitting level:") print(fitting_level) fitting_times = t_array[excitation_range[1] + decay_start:excitation_range[1] + decay_end] print("Fitting times:") print(fitting_times) slope, intercept, r_value, p_value, std_err = stats.linregress( x=fitting_times, y=fitting_level) print('Slope: {}, intercept: {}, R-squared: {}'.format( slope, intercept, r_value)) t_plot = np.arange(start=0, stop=rise_time + excitation_time + decay_time, step=0.1) y_fitted = slope * t_plot + intercept print("R-squared: {}".format(r_value)) RT = -60 / slope dummy_RT.append(RT) print("Reverberation Time at {} Hz: {}s".format(key, RT)) # exit() rt_df[mic] = dummy_RT print('Reverb time prior to averaging') print(rt_df) rt_df = rt_df.set_index('frequency_Hz') rt_df['avg'] = rt_df.mean(axis=1) print('Averaged RT') print(rt_df) print('Calculating Absorption Area') a = [] for freq in rt_df.index.array: print('Calculating absorption area for {} Hz'.format(freq)) rt = rt_df.loc[freq] print(rt) rt = rt['avg'] print('Average RT: {}'.format(rt)) a.append( iso354.soundAbsorptionArea(V=volume, RT=rt, T=temp, f=int(freq), hr=relativeHumidity, Pa=pressure * 1000)) rt_df['abs_area'] = a return rt_df
def performRTcalculation(data, db_decay='t20', decay_time=5): # Set path to configuration file - needs to be absolute for executable config_data = helpers.parseConfigFile( path=r'D:\Scripts\reverb-tkinter-interface\config.cfg') # Read all configuration data ao_device_name = config_data['aodevicename'] ai_device_name = config_data['aidevicename'] dio_device_name = config_data['diodevicename'] fs = int(config_data['samplingfrequency']) rise_time = float(config_data['risetime']) excitation_time = float(config_data['excitationtime']) # decay_time = float(config_data['decaytime']) mics = config_data['micid'] channel_name = config_data['micnames'] analog_output_id = config_data['outputid'] estRT = float(config_data['estimatedrt']) pRef = float(config_data['referencepressure']) n_mics = int(config_data['nummics']) # Perform ensemble averaging of individual mics for number of runs averaged_data = helpers.ensemble_average(raw_data=data, n_mics=n_mics) # General parameters t_tot = rise_time + excitation_time + decay_time N_samp = int(t_tot) * fs dt = 1.0 / fs t_array = np.arange(start=0, stop=t_tot, step=dt) wT = estRT / 48 windowN = int(wT * fs) print("Length of window: {}".format(windowN)) total_reverb = {} mic_location = [1, 2, 3, 4, 5, 6] # Loop through each mic location and calculate reverberation time for mic_location_data, color, mic_loc in zip(averaged_data, colors, mic_location): # Convert signal into third octave banded signals filtered_data = filterAndBands.thirdOctFilters(data=mic_location_data, fs=fs, f_low=100, f_high=5000) plots = [] reverb_time = [] freq_step = [] # Step through third octave bands for key in filtered_data: print("Processing: {}Hz at mic {}".format(key, mic_loc)) y = filtered_data[key] y_log = 10 * np.log10((abs(y)**2) / (pRef**2)) # Apply time weighting window # windowed_data = np.convolve(y_log, np.ones((windowN,))/windowN, mode='valid') windowed_data = signal.fftconvolve(in1=y_log, in2=np.ones( (windowN, )) / windowN, mode='valid') t_array = np.arange(start=0, stop=len(windowed_data) / fs, step=dt) # Calculate level of excitation and start decay curve 5dB below excitation level excitation_range = [ int((rise_time) * fs), int((rise_time + excitation_time) * fs) ] excitation_level = filterAndBands.dBavg( windowed_data[excitation_range[0] + int(0.5 * fs):excitation_range[1] - int(0.5 * fs)]) trigger_level = excitation_level - 5 # Calculate background level/level at end of decay and only use data 10dB above this ################## Consider using t20/T30 in place of this - have a setting in GUI ###################### min_decay_level = min(windowed_data[excitation_range[1]:]) bg_start = len(windowed_data) - 2 * fs bg_level = filterAndBands.dBavg(windowed_data[bg_start:]) if db_decay == 't20': bg_trigger_level = max([trigger_level - 20, bg_level + 5]) elif db_decay == 't30': bg_trigger_level = max([trigger_level - 30, bg_level + 5]) elif db_decay == 'max': bg_trigger_level = bg_level + 10 # Locate the time where the decay curve should start and end from ndx_start = np.where( windowed_data[excitation_range[1]:] <= trigger_level) decay_start = ndx_start[0][0] ndx_end = np.where( windowed_data[excitation_range[1]:] <= bg_trigger_level) decay_end = ndx_end[0][0] # Perform least squares fit to the decay curve and calculate the reverberation time fitting_level = windowed_data[excitation_range[1] + decay_start:excitation_range[1] + decay_end] fitting_times = t_array[excitation_range[1] + decay_start:excitation_range[1] + decay_end] slope, intercept, r_value, p_value, std_err = stats.linregress( x=fitting_times, y=fitting_level) t_plot = np.arange(start=0, stop=rise_time + excitation_time + decay_time, step=0.1) y_fitted = slope * t_plot + intercept RT = -60 / slope reverb_time.append(RT) freq_step.append(key) print("Reverberation Time: {}s".format(RT)) total_reverb[mic_loc] = reverb_time # Store reverb times and calculate mean statistics RT_df = pd.DataFrame.from_dict(total_reverb, orient='index', columns=freq_step) temp_df = pd.DataFrame({ 'Mean': RT_df.mean(axis=0), 'StdDev': RT_df.std(axis=0) }) final_df = RT_df.append(temp_df.transpose()) return final_df