def plot_spectrogram(audio_file,loc,t1_sec, t2_sec, geometry=(1,1,1)):
    
    fmin=0
    fmax=1000
    frame= 0.0625
    window_type= 'hann'
    nfft=0.0853
    step=0.01
    channel=0
    chunk=[t1_sec,t2_sec]
    
    graph_spectros = GrapherFactory('SoundPlotter', title='Spectrograms', frequency_max=fmax)
    sound = Sound(audio_file)
    sound.read(channel=channel, chunk=chunk, unit='sec', detrend=True)
    # Calculates  spectrogram
    spectro = Spectrogram(frame, window_type, nfft, step, sound.waveform_sampling_frequency, unit='sec')
    spectro.compute(sound, dB=True, use_dask=False)
    # Crop unused frequencies
    spectro.crop(frequency_min=fmin, frequency_max=fmax, inplace=True)
    # Plot
    graph_spectros.add_data(spectro)    
    
    #graph_spectros.add_annotation(loc, panel=0, color='burlywood',label='Detections')
    graph_spectros.add_annotation(loc, panel=0, color='peachpuff')
    
    graph_spectros.colormap = 'binary' #'jet'
    fig, ax = graph_spectros.show()

    if ax.get_geometry() != geometry :
        ax.change_geometry(*geometry)        
    return fig, ax
def plot_data(audio_files,frame, window_type, nfft, step, fmin, fmax, chunk = None, detections=None, detections_channel=0):
    graph_spectros = GrapherFactory('SoundPlotter', title='Spectrograms', frequency_max=fmax)
    graph_waveforms = GrapherFactory('SoundPlotter', title='Waveforms')
    for audio_file, channel in zip(audio_files['path'], audio_files['channel'] ): # for each channel
        # load waveform
        sound = Sound(audio_file)
        sound.read(channel=channel, chunk=chunk, unit='sec', detrend=True)
        # Calculates  spectrogram
        spectro = Spectrogram(frame, window_type, nfft, step, sound.waveform_sampling_frequency, unit='sec')
        spectro.compute(sound, dB=True, use_dask=False)
        # Crop unused frequencies
        spectro.crop(frequency_min=fmin, frequency_max=fmax, inplace=True)
        # Plot
        graph_spectros.add_data(spectro, time_offset_sec=chunk[0])
        graph_waveforms.add_data(sound, time_offset_sec=chunk[0])

    graph_spectros.colormap = 'binary'
    if detections:
        graph_spectros.add_annotation(detections, panel=detections_channel, color='green',label='Detections')
        graph_waveforms.add_annotation(detections, panel=detections_channel, color='green',label='Detections')

    if chunk:
        graph_spectros.time_min = chunk[0]
        graph_spectros.time_max = chunk[1]
        graph_waveforms.time_min = chunk[0]
        graph_waveforms.time_max = chunk[1]

    graph_spectros.show()
    graph_waveforms.show()
def stack_waveforms(audio_files, detec, TDOA_max_sec):
    waveform_stack = []
    for audio_file, channel in zip(audio_files['path'], audio_files['channel'] ): # for each channel
        # load waveform
        chan_wav = Sound(audio_file)
        chan_wav.read(channel=channel,
                      chunk=[detec['time_min_offset']-TDOA_max_sec, detec['time_max_offset']+TDOA_max_sec],
                      unit='sec',
                      detrend=True)
        # bandpass filter
        chan_wav.filter('bandpass', [detec['frequency_min'], detec['frequency_max']])
        # stack
        waveform_stack.append(chan_wav.waveform)
    return waveform_stack
示例#4
0
def run_detector(infile, outdir, classif_model=None, deployment_file=None):
    ## Input paraneters ##########################################################

    # Spectrogram parameters
    frame = 0.0625  #3000
    nfft = 0.0853  # 4096
    step = 0.01  # 5
    fmin = 0
    fmax = 1000
    window_type = 'hann'

    # start and stop time of wavfile to analyze
    #t1 = 0 # 24
    #t2 = 60 # 40
    ## ###########################################################################
    outfile = os.path.join(outdir, os.path.split(infile)[1] + '.nc')

    if os.path.exists(outfile) is False:
        # load audio data
        sound = Sound(infile)
        #sound.read(channel=0, chunk=[t1, t2], unit='sec')
        sound.read(channel=0, unit='sec')
        # Calculates  spectrogram
        print('Spectrogram')
        spectro = Spectrogram(frame,
                              window_type,
                              nfft,
                              step,
                              sound.waveform_sampling_frequency,
                              unit='sec')
        spectro.compute(sound, dB=True, use_dask=True, dask_chunks=100)
        # Crop unused frequencies
        spectro.crop(frequency_min=fmin, frequency_max=fmax, inplace=True)
        # Denoise
        print('Denoise')
        spectro.denoise(
            'median_equalizer',
            window_duration=3,
            use_dask=True,
            dask_chunks=(50, 50000),  #'auto',#(87,10000),
            inplace=True)
        # Detector
        print('Detector')
        file_timestamp = ecosound.core.tools.filename_to_datetime(infile)[0]
        detector = DetectorFactory('BlobDetector',
                                   kernel_duration=0.1,
                                   kernel_bandwidth=300,
                                   threshold=10,
                                   duration_min=0.05,
                                   bandwidth_min=40)
        detections = detector.run(
            spectro,
            start_time=file_timestamp,
            use_dask=True,
            dask_chunks=(4096, 50000),  #'auto',
            debug=False)
        # Maasurements
        print('Measurements')
        spectro_features = MeasurerFactory('SpectrogramFeatures',
                                           resolution_time=0.001,
                                           resolution_freq=0.1,
                                           interp='linear')
        measurements = spectro_features.compute(spectro,
                                                detections,
                                                debug=False,
                                                verbose=False,
                                                use_dask=True)

        # Add metadata
        if deployment_file:
            measurements.insert_metadata(deployment_file)

        # Add file informations
        file_name = os.path.splitext(os.path.basename(infile))[0]
        file_dir = os.path.dirname(infile)
        file_ext = os.path.splitext(infile)[1]
        measurements.insert_values(
            operator_name=platform.uname().node,
            audio_file_name=file_name,
            audio_file_dir=file_dir,
            audio_file_extension=file_ext,
            audio_file_start_date=ecosound.core.tools.filename_to_datetime(
                infile)[0])

        # Classification
        print('Classification')
        if classif_model:
            features = classif_model['features']
            model = classif_model['model']
            Norm_mean = classif_model['normalization_mean']
            Norm_std = classif_model['normalization_std']
            classes_encoder = classif_model['classes']
            # data dataframe
            data = measurements.data
            n1 = len(data)
            # drop observations/rows with NaNs
            data = data.replace([np.inf, -np.inf], np.nan)
            data.dropna(subset=features,
                        axis=0,
                        how='any',
                        thresh=None,
                        inplace=True)
            n2 = len(data)
            print('Deleted observations (due to NaNs): ' + str(n1 - n2))
            # Classification - predictions
            X = data[features]
            X = (X - Norm_mean) / Norm_std
            pred_class = model.predict(X)
            pred_prob = model.predict_proba(X)
            pred_prob = pred_prob[range(0, len(pred_class)), pred_class]
            # Relabel
            for index, row in classes_encoder.iterrows():
                pred_class = [
                    row['label'] if i == row['ID'] else i for i in pred_class
                ]
            # update measurements
            data['label_class'] = pred_class
            data['confidence'] = pred_prob

        # sort detections by ascending start date/time
        data.sort_values('time_min_offset',
                         axis=0,
                         ascending=True,
                         inplace=True)
        # save result as NetCDF file
        print('Saving')
        measurements.data = data
        measurements.to_netcdf(outfile)
    else:
        print('Recording already processed.')
nfft = 4096
step = 500
#ovlp = 2500
fmin = 0
fmax = 1000
window_type = 'hann'

# start and stop time of wavfile to analyze
t1 = 1
t2 = 50
## ###########################################################################
tic = time.perf_counter()

# load audio data
sound = Sound(single_channel_file)
sound.read(channel=0, chunk=[t1, t2], unit='sec', detrend=True)

# Calculates  spectrogram
spectro = Spectrogram(frame,
                      window_type,
                      nfft,
                      step,
                      sound.waveform_sampling_frequency,
                      unit='samp')
spectro.compute(sound, dB=True, use_dask=True, dask_chunks=40)

# Crop unused frequencies
spectro.crop(frequency_min=fmin, frequency_max=fmax, inplace=True)

# Denoise
spectro.denoise('median_equalizer',
def run_detector(infile, channel, config, chunk=None, deployment_file=None):

    sound = Sound(infile)
    # load audio data
    if chunk:
        sound.read(channel=channel, chunk=[t1, t2], unit='sec', detrend=True)
        time_offset_sec = t1
    else:
        sound.read(channel=channel, detrend=True)
        time_offset_sec = 0

    # Calculates  spectrogram
    spectro = Spectrogram(config['SPECTROGRAM']['frame_sec'],
                                  config['SPECTROGRAM']['window_type'],
                                  config['SPECTROGRAM']['nfft_sec'],
                                  config['SPECTROGRAM']['step_sec'],
                                  sound.waveform_sampling_frequency,
                                  unit='sec',)
    spectro.compute(sound,
                    config['SPECTROGRAM']['dB'],
                    config['SPECTROGRAM']['use_dask'],
                    config['SPECTROGRAM']['dask_chunks'],)

    spectro.crop(frequency_min=config['SPECTROGRAM']['fmin_hz'],
                         frequency_max=config['SPECTROGRAM']['fmax_hz'],
                         inplace=True,
                         )
    # Denoise
    spectro.denoise(config['DENOISER']['denoiser_name'],
                    window_duration=config['DENOISER']['window_duration_sec'],
                    use_dask=config['DENOISER']['use_dask'],
                    dask_chunks=tuple(config['DENOISER']['dask_chunks']),
                    inplace=True)
    # Detector
    file_timestamp = ecosound.core.tools.filename_to_datetime(sound.file_full_path)[0]
    detector = DetectorFactory(config['DETECTOR']['detector_name'],
                               kernel_duration=config['DETECTOR']['kernel_duration_sec'],
                               kernel_bandwidth=config['DETECTOR']['kernel_bandwidth_hz'],
                               threshold=config['DETECTOR']['threshold'],
                               duration_min=config['DETECTOR']['duration_min_sec'],
                               bandwidth_min=config['DETECTOR']['bandwidth_min_hz']
                               )
    start_time = file_timestamp + datetime.timedelta(seconds=time_offset_sec)
    detections = detector.run(spectro,
                              start_time=start_time,
                              use_dask=config['DETECTOR']['use_dask'],
                              dask_chunks=tuple(config['DETECTOR']['dask_chunks']),
                              debug=False,
                              )
    # add time offset in only a section of recording was analysed.
    detections.data['time_min_offset'] = detections.data['time_min_offset'] + time_offset_sec
    detections.data['time_max_offset'] = detections.data['time_max_offset'] + time_offset_sec

    # add deployment metadata
    detections.insert_metadata(deployment_file, channel=channel)

    # Add file informations
    file_name = os.path.splitext(os.path.basename(sound.file_full_path))[0]
    file_dir = os.path.dirname(sound.file_full_path)
    file_ext = os.path.splitext(sound.file_full_path)[1]
    detections.insert_values(operator_name=platform.uname().node,
                               audio_file_name=file_name,
                               audio_file_dir=file_dir,
                               audio_file_extension=file_ext,
                               audio_file_start_date=ecosound.core.tools.filename_to_datetime(sound.file_full_path)[0]
                               )

    return detections
示例#7
0
def run_detector(infile, outdir, classif_model, config, deployment_file=None, extension=".wav", overwrite=False, netcdf= True, pamlab=False, raven=False):
    """
    Run the fish sound detector.

        Parameters
        ----------
        infile : str
            Path of the audio file to process.
        outdir : str
            Path of the output folder where the results will be written.
        classif_model : str
            Path and name of the classification model to use (.sav pickle file)
        config : dict
            Dict with all parameters from the yaml file.
        deployment_file : str, optional
            Path and name of the csv file with all the deployment information.
            The default is None.
        extension : str, optional
            Extension of the audio files to process. The default is ".wav".
        overwrite : bool, optional
            If set to True, overwrites results (i.e. netcdf files) even if they
            already exist in the outdir folder. The default is False.
        netcdf : bool, optional
            If set to True, saves results as netcdf4 files (.nc).
            The default is True.
        pamlab : bool, optional
            If set to True, saves results as PAMlab files (.log).
            The default is False.
        raven : bool, optional
            If set to True, saves results as Raven files (.txt).
            The default is False.

        Returns
        -------
        None.

    """
    outfile = os.path.join(outdir, os.path.split(infile)[1] + '.nc')
    if (os.path.exists(outfile) is False) or (os.path.exists(outfile) and overwrite):
        # load audio data
        sound = Sound(infile)
        sound.read(channel=config['AUDIO']['channel'], unit='sec')
        # Calculates  spectrogram
        print('Spectrogram')
        spectro = Spectrogram(config['SPECTROGRAM']['frame_sec'],
                              config['SPECTROGRAM']['window_type'],
                              config['SPECTROGRAM']['nfft_sec'],
                              config['SPECTROGRAM']['step_sec'],
                              sound.waveform_sampling_frequency,
                              unit='sec',
                              )
        spectro.compute(sound,
                        config['SPECTROGRAM']['dB'],
                        config['SPECTROGRAM']['use_dask'],
                        config['SPECTROGRAM']['dask_chunks'],
                        )
        # Crop unused frequencies
        spectro.crop(frequency_min=config['SPECTROGRAM']['fmin_hz'],
                     frequency_max=config['SPECTROGRAM']['fmax_hz'],
                     inplace=True,
                     )
        # Denoise
        print('Denoise')
        spectro.denoise(config['DENOISER']['denoiser_name'],
                        window_duration=config['DENOISER']['window_duration_sec'],
                        use_dask=config['DENOISER']['use_dask'],
                        dask_chunks=tuple(config['DENOISER']['dask_chunks']),
                        inplace=True)
        # Detector
        print('Detector')
        file_timestamp = ecosound.core.tools.filename_to_datetime(infile)[0]
        detector = DetectorFactory(config['DETECTOR']['detector_name'],
                                   kernel_duration=config['DETECTOR']['kernel_duration_sec'],
                                   kernel_bandwidth=config['DETECTOR']['kernel_bandwidth_hz'],
                                   threshold=config['DETECTOR']['threshold'],
                                   duration_min=config['DETECTOR']['duration_min_sec'],
                                   bandwidth_min=config['DETECTOR']['bandwidth_min_hz']
                                   )
        detections = detector.run(spectro,
                                  start_time=file_timestamp,
                                  use_dask=config['DETECTOR']['use_dask'],
                                  dask_chunks=tuple(config['DETECTOR']['dask_chunks']),
                                  debug=False,
                                  )
        # Maasurements
        print('Measurements')
        spectro_features = MeasurerFactory(config['MEASURER']['measurer_name'],
                                           resolution_time=config['MEASURER']['resolution_time_sec'],
                                           resolution_freq=config['MEASURER']['resolution_freq_hz'],
                                           interp=config['MEASURER']['interp'],
                                           )
        measurements = spectro_features.compute(spectro,
                                                detections,
                                                debug=False,
                                                verbose=False,
                                                use_dask=config['MEASURER']['use_dask'])

        # Add metadata
        if deployment_file:
            measurements.insert_metadata(deployment_file)
        else:
            measurements.insert_values(audio_channel=0,
                                       UTC_offset=0,
                                       audio_sampling_frequency=0,
                                       audio_bit_depth=0,
                                       mooring_platform_name='',
                                       recorder_type='',
                                       recorder_SN='',
                                       hydrophone_model='',
                                       hydrophone_SN='',
                                       hydrophone_depth=0,
                                       location_name='',
                                       location_lat=0,
                                       location_lon=0,
                                       location_water_depth=0,
                                       deployment_ID='',
                                       )

        # Add file informations
        file_name = os.path.splitext(os.path.basename(infile))[0]
        file_dir = os.path.dirname(infile)
        file_ext = os.path.splitext(infile)[1]
        measurements.insert_values(operator_name=platform.uname().node,
                                   audio_file_name=file_name,
                                   audio_file_dir=file_dir,
                                   audio_file_extension=file_ext,
                                   audio_file_start_date=ecosound.core.tools.filename_to_datetime(infile)[0]
                                   )
        # Classification
        print('Classification')
        if classif_model:
            features = classif_model['features']
            model = classif_model['model']
            Norm_mean = classif_model['normalization_mean']
            Norm_std = classif_model['normalization_std']
            classes_encoder = classif_model['classes']
            # data dataframe
            data = measurements.data
            n1 = len(data)
            # drop observations/rows with NaNs
            data = data.replace([np.inf, -np.inf], np.nan)
            data.dropna(subset=features,
                        axis=0,
                        how='any',
                        thresh=None,
                        inplace=True)
            n2 = len(data)
            print('Deleted observations (due to NaNs): ' + str(n1-n2))
            # Classification - predictions
            X = data[features]
            X = (X-Norm_mean)/Norm_std
            pred_class = model.predict(X)
            pred_prob = model.predict_proba(X)
            pred_prob = pred_prob[range(0, len(pred_class)), pred_class]
            # Relabel
            for index, row in classes_encoder.iterrows():
                pred_class = [row['label'] if i == row['ID'] else i for i in pred_class]
            # update measurements
            data['label_class'] = pred_class
            data['confidence'] = pred_prob
        # sort detections by ascending start date/time
        data.sort_values('time_min_offset',
                         axis=0,
                         ascending=True,
                         inplace=True)
        # save result as NetCDF file
        print('Saving')
        measurements.data = data
        if netcdf:
            measurements.to_netcdf(outfile)
        if pamlab:
            measurements.to_pamlab(outdir)
        if raven:
            measurements.to_raven(outdir)

    else:
        print('Recording already processed.')
        logging.info('Recording already processed.')
示例#8
0
nfft = 4096
step = 500
#ovlp = 2500
fmin = 0
fmax = 1000
window_type = 'hann'

# start and stop time of wavfile to analyze
t1 = 1515
t2 = 1541
## ###########################################################################
tic = time.perf_counter()

# load audio data
sound = Sound(single_channel_file)
fs = sound.waveform_sampling_frequency
sound.read(channel=0, chunk=[round(t1 * fs), round(t2 * fs)])

# Calculates  spectrogram
Spectro = Spectrogram(frame, window_type, nfft, step, fs, unit='samp')
Spectro.compute(sound, dB=True, dask=True)

toc = time.perf_counter()
print(f"Executed in {toc - tic:0.4f} seconds")

# # Plot
# graph = GrapherFactory('SoundPlotter', title='Recording', frequency_max=1000)
# graph.add_data(sound)
# graph.add_data(Spectro)
# graph.colormap = 'jet'
# graph.show()
示例#9
0

if __name__ == '__main__':

    # # Create random signal
    # fs = 48000
    # sig_dur = 60#1800
    # sig = np.random.rand(sig_dur*fs)

    single_channel_file = r"../ecosound/resources/67674121.181018013806.wav"

    t1 = 24
    t2 = 120
    sound = Sound(single_channel_file)
    #sound.read(channel=0, chunk=[t1, t2], unit='sec')
    sound.read(channel=0)
    fs = sound.file_sampling_frequency
    sig = sound.waveform
    sig = sig - np.mean(sig)

    # Calculates  spectrogram
    frame_samp = 3000
    overlap_samp = 2500
    fft_samp = 4096

    #S1 = spectro_numpy(sig, fs, frame_samp, overlap_samp, fft_samp)
    #S2 = spectro_loop(sig, fs, frame_samp, overlap_samp, fft_samp)
    #S3 = spectro_loop_dask(sig, fs, frame_samp, overlap_samp, fft_samp)
    F, T, S3 = spectro_loop_dask2(
        sig,
        fs,
示例#10
0
@author: xavier.mouy
"""
import sys
sys.path.append("..") # Adds higher directory to python modules path.
from ecosound.core.audiotools import Sound, Filter

#single_channel_file = r"../ecosound/resources/AMAR173.4.20190916T061248Z.wav"
multi_channel_file = r"../ecosound/resources/671404070.190722162836.wav"


# load part of the file and plot
print('------------------------------')
#sig = Sound() # should return error
sig = Sound(multi_channel_file)
print(len(sig))
sig.read(channel=0, chunk=[10, 1000])
print('------------------------------')
print(len(sig))
print('start sample: ', sig.waveform_start_sample)
print('stop sample: ', sig.waveform_stop_sample)
print('duration:: ', sig.waveform_duration_sample)
print(len(sig))
sig.plot_waveform(newfig=True)

# extract a sinppet from the data
sig2 = sig.select_snippet([100,1000])
sig2.plot_waveform(newfig=True)
print('------------------------------')
print('start sample: ', sig2.waveform_start_sample)
print('stop sample: ', sig2.waveform_stop_sample)
print('duration:: ', sig2.waveform_duration_sample)