mode_bin='relative', bin_std=8, bin_per=0.5, verbose=False, display=False) # Finaly, we put together pixels that belong to the same acoustic event, and # remove very small events (<=25 pixel²) im_rois, df_rois = rois.select_rois(im_mask, min_roi=25, max_roi=None, display=True, **{'extent': ext}) # format dataframe df_rois in order to convert pixels into time and frequency df_rois = format_features(df_rois, tn, fn) # overlay bounding box on the original spectrogram ax0, fig0 = rois.overlay_rois(Sxx_db, df_rois, **{ 'vmin': 0, 'vmax': dB_max, 'extent': ext }) # Compute and visualize centroids df_centroid = features.centroid_features(Sxx_db, df_rois, im_rois) df_centroid = format_features(df_centroid, tn, fn) ax0, fig0 = features.overlay_centroid(Sxx_db, df_centroid, savefig=None, **{
from maad.util import read_audacity_annot, linear_scale, format_features, get_unimode, running_mean from maad.rois import overlay_rois, create_mask, select_rois, find_rois_cwt, remove_background, median_equalizer from skimage import morphology import numpy as np import pandas as pd ###=============== load audio ================= s, fs = load('./data/spinetail.wav') rois = read_audacity_annot( './data/spinetail.txt') ## annotations using Audacity ###=============== compute spectrogram ================= Sxx, tn, fn, ext = spectrogram(s, fs) Sxx = 10 * np.log10(Sxx) rois = format_features(rois, tn, fn) ###=============== from Audacity ================= ### with all labels ax, fig = overlay_rois(Sxx, ext, rois, vmin=-120, vmax=20) # Compute an visualize features shape, params = shape_features(Sxx, resolution='low', rois=rois) plot_shape(shape.mean(), params) # Compute and visualize centroids centroid = centroid_features(Sxx, rois) centroid = format_features(centroid, tn, fn) ax, fig = overlay_centroid(Sxx, ext,
import numpy as np import matplotlib.pyplot as plt from sklearn.decomposition import PCA from sklearn import preprocessing s, fs = load('../data/spinetail.wav') rois_tf = read_audacity_annot('../data/spinetail.txt') ## annotations using Audacity rois_cr = rois_tf.loc[rois_tf.label=='CRER',] rois_sp = rois_tf.loc[rois_tf.label=='SP',] Sxx_power, ts, f, ext = spectrogram(s, fs) Sxx_dB = power2dB(Sxx_power, db_range=90) + 96 # Visualize large vocalizations rois_cr = format_features(rois_cr, ts, f) overlay_rois(Sxx_dB, rois_cr, **{'extent':ext, 'vmin':0, 'vmax':80}) # Visualize short vocalizations rois_sp = format_features(rois_sp, ts, f) overlay_rois(Sxx_dB, rois_sp, **{'extent':ext, 'vmin':0, 'vmax':80}) # Compute an visualize features shape_cr, params = shape_features(Sxx_dB, resolution='med', rois=rois_cr) ax = plot_shape(shape_cr.mean(), params) shape_sp, params = shape_features(Sxx_dB, resolution='med', rois=rois_sp) ax = plot_shape(shape_sp.mean(), params) ######## Simple clustering with PCA
#%% # 1. Find regions of interest # --------------------------- # To find regions of interest in the spectrogram, we will remove stationary background noise and then find isolated sounds using a double threshold method. Small ROIs due to noise in the signal will be removed. Sxx_db_rmbg, _, _ = sound.remove_background(Sxx_db) Sxx_db_smooth = sound.smooth(Sxx_db_rmbg, std=1.2) im_mask = rois.create_mask(im=Sxx_db_smooth, mode_bin='relative', bin_std=2, bin_per=0.25) im_rois, df_rois = rois.select_rois(im_mask, min_roi=50, max_roi=None) # Format ROIs and visualize the bounding box on the audio spectrogram. df_rois = format_features(df_rois, tn, fn) ax0, fig0 = overlay_rois(Sxx_db, df_rois, **{ 'vmin': 0, 'vmax': 60, 'extent': ext }) #%% # 2. Compute acoustic features # ---------------------------- # The ``shape_feaures`` function uses bidimensional wavelets to get the texture and spectro-temporal shape coeficients of each ROI. Wavelets have the advantage of being robust when the signal-to-noise ratio is low, and derive homogeneous descriptors which facilitate the clustering process. The wavelet decomposition is performed on the complete spectrogram, hence the coeficients for ROIs do not vary much even when not the time-frequency bounds are not exact. The centroid features gives an estimate of the median frequency of the ROIs. df_shape, params = features.shape_features(Sxx_db, resolution='low', rois=df_rois) df_centroid = features.centroid_features(Sxx_db, df_rois)
bin_std=6, bin_per=0.5, verbose=False, display=False) # Finaly, we put together pixels that belong to the same acoustic event, and # remove very small events (<=25 pixel²) im_rois, df_rois = rois.select_rois(im_mask, min_roi=25, max_roi=None, ext=ext, display=False, figsize=(4, (t1 - t0))) # format dataframe df_rois in order to convert pixels into time and frequency df_rois = format_features(df_rois, tn, fn) # overlay bounding box on the original spectrogram ax, fig = rois.overlay_rois(Sxx_db, ext, df_rois, vmin=0, vmax=96) # Compute and visualize centroids df_centroid = features.centroid_features(Sxx_db, df_rois, im_rois) df_centroid = format_features(df_centroid, tn, fn) ax, fig = features.overlay_centroid(Sxx_db, ext, df_centroid, savefig=None, vmin=0, vmax=96, marker='+', ms=2,