예제 #1
0
def reduce_noise(click, url, select_data):
    if url is not None and select_data is not None:
        fs, y = audio_io.read_wave_local(TEMP_STORAGE + url, as_float=True)
        onsets = [point['customdata'] for point in select_data['points']]
        noises = [
            y[int(start_s * fs):int(end_s * fs)] for start_s, end_s in onsets
        ]
        noises = np.concatenate(noises)
        y = nr.reduce_noise(audio_clip=y, noise_clip=noises)
        audio_io.save_wav(y, fs, path=TEMP_STORAGE + url)
        return 1
예제 #2
0
def full_spectrogram_graph(select_data, url, selection, n_clicks, bandpass,
                           features, fig):
    if url is not None:
        if relayout_autosize_triggered():
            raise PreventUpdate
        temp_path = f'/tmp/{os.path.splitext(url)[0]}'
        spectrum_path = temp_path + '_spectrum.npy'
        time_path = temp_path + '_time.npy'

        if select_data and event_triggered('embedding-graph.selectedData'):
            start = fig['data'][0]['x'][0]
            end = fig['data'][0]['x'][-1]
            onsets = [point['customdata'] for point in select_data['points']]
            shapes = visualize.shapes_from_onsets(onsets,
                                                  x_min=start,
                                                  x_max=end,
                                                  color='red')
            fig['layout']['shapes'] = shapes
        elif selection is not None and 'xaxis.range[0]' in selection and 'xaxis.range[1]' in selection:
            start = selection['xaxis.range[0]']
            end = selection['xaxis.range[1]']
            Sxx = np.load(spectrum_path)
            time = np.load(time_path)
            fig = visualize.spectrogram_shaded(S=Sxx,
                                               time=time,
                                               fs=SAMPLING_RATE,
                                               start_time=start,
                                               end_time=end)
        elif os.path.exists(spectrum_path
                            ) and not event_triggered('apply-button.n_clicks'):
            Sxx = np.load(spectrum_path)
            time = np.load(time_path)
            fig = visualize.spectrogram_shaded(S=Sxx,
                                               time=time,
                                               fs=SAMPLING_RATE)
        else:
            fs, y = audio_io.read_wave_local(TEMP_STORAGE + url)
            lowcut, higcut = bandpass
            y = filters.frequency_filter(y,
                                         fs=SAMPLING_RATE,
                                         lowcut=lowcut,
                                         highcut=higcut)
            freq, time, Sxx = visualize.calculate_spectrogram(y,
                                                              fs,
                                                              backend='yaafe')
            np.save(spectrum_path, Sxx)
            np.save(time_path, time)
            fig = visualize.spectrogram_shaded(S=Sxx,
                                               time=time,
                                               fs=SAMPLING_RATE)

        return fig
    else:
        raise PreventUpdate
예제 #3
0
def display_click_image(click_data, select_data, n_clicks, url, bandpass,
                        fft_size):
    if url:
        lowcut, higcut = bandpass
        if click_data is not None and event_triggered(
                'embedding-graph.clickData'):
            start, end = click_data['points'][0]['customdata']
            wav = audio_io.read_wav_part_from_local(path=TEMP_STORAGE + url,
                                                    start_s=start -
                                                    AUDIO_MARGIN,
                                                    end_s=end + AUDIO_MARGIN)
            wav = filters.frequency_filter(wav,
                                           fs=SAMPLING_RATE,
                                           lowcut=lowcut,
                                           highcut=higcut)
            im = visualize.specgram_base64(y=wav,
                                           fs=SAMPLING_RATE,
                                           start=start,
                                           end=end,
                                           margin=AUDIO_MARGIN)

            return html.Img(src='data:image/png;base64, ' + im,
                            style={
                                'height': '25vh',
                                'display': 'block',
                                'margin': 'auto'
                            })
        else:
            if select_data is not None:
                onsets = [
                    point['customdata'] for point in select_data['points']
                ]
                if onsets:
                    wavs = audio_io.read_wav_parts_from_local(
                        path=TEMP_STORAGE + url, onsets=onsets, as_float=True)
                else:
                    raise PreventUpdate
            else:
                fs, wavs = audio_io.read_wave_local(TEMP_STORAGE + url,
                                                    as_float=True)

            wavs = filters.frequency_filter(wavs,
                                            fs=SAMPLING_RATE,
                                            lowcut=lowcut,
                                            highcut=higcut)
            fig = visualize.power_spectrum(wavs,
                                           fs=SAMPLING_RATE,
                                           block_size=fft_size,
                                           scaling='spectrum',
                                           cutoff=-90)
            return dcc.Graph(id='spectrum', figure=fig)
    else:
        raise PreventUpdate
예제 #4
0
def plot_embeddings(filename, n_clicks, embedding_type, fftsize, bandpass,
                    onset_threshold, sample_len, neighbours,
                    selected_features):
    if filename is not None:
        filepath = TEMP_STORAGE + filename
        lowpass, highpass = bandpass
        min_duration = sample_len - 0.05
        fs, X = audio_io.read_wave_local(filepath, as_float=True)
        features = get(X,
                       fs,
                       n_jobs=1,
                       selected_features=selected_features,
                       lowcut=lowpass,
                       highcut=highpass,
                       block_size=fftsize,
                       onset_detector_type='hfc',
                       onset_silence_threshold=-90,
                       onset_threshold=onset_threshold,
                       min_duration_s=min_duration,
                       sample_len=sample_len)

        params = map_parameters(embedding_type, neighbours)

        style = {
            'display': 'inline-block',
            'margin-left': 'auto',
            'margin-right': '20px',
            'float': 'right'
        }

        try:
            embeddings, algo, msg = get_embeddings(
                data=features.drop(columns=['onset', 'offset']),
                type=embedding_type,
                n_jobs=1,
                **params)

            # features.insert(0, column='filename', value=filenames[-1])
            extra_data = ['onset', 'offset']
            if 'freq_mean' in features:
                mean_freq = features['freq_mean'].astype(int).astype(
                    str) + ' Hz<br>'
            elif 'pitch_median' in features:
                mean_freq = features['pitch_median'].astype(int).astype(
                    str) + ' Hz<br>'
            else:
                mean_freq = ''
            interval = features['onset'].round(2).astype(
                str) + ' - ' + features['offset'].round(2).astype(str) + 's'
            text = mean_freq + interval
            figure = visualize.scatter_plot(x=embeddings[:, 0],
                                            y=embeddings[:, 1],
                                            customdata=features[extra_data],
                                            text=text)

            if msg is None:
                msg = f'Found {len(embeddings)} samples'
            else:
                style['color'] = 'red'

            features.insert(0, 'id', features.index)
            return figure, features.round(2).to_dict(orient='rows'), msg, style
        except Exception as ex:
            style['color'] = 'red'
            return go.Figure(), None, str(ex), style
    else:
        raise PreventUpdate