Exemplo n.º 1
0
def load_spectrograms(filename):
    train_data = []
    spectrogram, sp_time, sp_freq, fs = ap.get_spectrogram(
        filename, ST_WIN, ST_STEP)
    # These should change depending on the signal's size
    spec_resize_ratio_freq = 4
    spec_resize_ratio_time = 4

    f_low = F1 if F1 < fs / 2.0 else fs / 2.0
    f_high = F2 if F2 < fs / 2.0 else fs / 2.0

    # define feature sequence for vocalization detection
    f1 = np.argmin(np.abs(sp_freq - f_low))
    f2 = np.argmin(np.abs(sp_freq - f_high))

    spectral_energy_1 = spectrogram.sum(axis=1)
    spectral_energy_2 = spectrogram[:, f1:f2].sum(axis=1)
    seg_limits, thres_sm, _ = ap.get_syllables(spectral_energy_2,
                                               spectral_energy_1,
                                               ST_STEP,
                                               threshold_per=thres * 100,
                                               min_duration=MIN_VOC_DUR)

    train_data += (ar.cluster_syllables(seg_limits,
                                        spectrogram,
                                        sp_freq,
                                        f_low,
                                        f_high,
                                        ST_STEP,
                                        train=True))
    return train_data
Exemplo n.º 2
0
def signal_handler(signal, frame):
    """
    This function is called when Ctr + C is pressed and is used to output the
    final buffer into a WAV file
    """
    # write final buffer to wav file
    global fs
    if len(all_data) > 1:
        wavfile.write(outstr + ".wav", fs, np.int16(all_data))

    spectrogram, sp_time, sp_freq, fs = ap.get_spectrogram(
        outstr + ".wav", ST_WIN, ST_STEP)

    f_low = F1 if F1 < fs / 2.0 else fs / 2.0
    f_high = F2 if F2 < fs / 2.0 else fs / 2.0

    # define feature sequence for vocalization detection
    f1 = np.argmin(np.abs(sp_freq - f_low))
    f2 = np.argmin(np.abs(sp_freq - f_high))

    spectral_energy, means, max_values = ap.prepare_features(
        spectrogram[:, f1:f2])

    time_sec = 100
    seg_limits, thres_sm = ap.get_syllables(
        spectral_energy,
        means,
        max_values,
        ST_STEP,
        threshold_per=thres * 100,
        min_duration=MIN_VOC_DUR,
        threshold_buf=means,
    )

    for s in seg_limits:
        with open("debug_offline.csv", "a") as fp:
            fp.write(f'{count_mid_bufs * mid_buffer_size + s[0]},'
                     f'{count_mid_bufs * mid_buffer_size + s[1]}\n')

    sys.exit(0)
Exemplo n.º 3
0
                        "--ground_truth_file",
                        required=True,
                        nargs=None,
                        help="Ground truth file")
    return parser.parse_args()


if __name__ == "__main__":
    args = parse_arguments()

    thres = args.threshold
    spec_resize_ratio_freq = args.resize_freq
    spec_resize_ratio_time = args.resize_time

    # feature (spectrogram) extraction:
    spectrogram, sp_time, sp_freq, fs = ap.get_spectrogram(
        args.input_file, args.win, args.step)
    duration = spectrogram.shape[0] * args.step

    f_low = F1 if F1 < fs / 2.0 else fs / 2.0
    f_high = F2 if F2 < fs / 2.0 else fs / 2.0

    # define feature sequence for vocalization detection
    f1 = np.argmin(np.abs(sp_freq - f_low))
    f2 = np.argmin(np.abs(sp_freq - f_high))

    spectral_energy_1 = spectrogram.sum(axis=1)
    spectral_energy_2 = spectrogram[:, f1:f2].sum(axis=1)

    segs, thres_sm, spectral_ratio = ap.get_syllables(spectral_energy_2,
                                                      spectral_energy_1,
                                                      args.step,
Exemplo n.º 4
0
                dbc.Row(id='intermediate_val_total_clusters',
                        style={'display': 'none'}),
                dbc.Row(id='intermediate_val_clusters',
                        style={'display': 'none'}),
                dbc.Row(id='clustering_info', style={'display': 'none'})
            ],
            style={"height": "100vh"})
    return layout


if __name__ == "__main__":
    args = parse_arguments()
    global sp_time, sp_freq, moves, click_index
    click_index = -1
    time_start = time.time()
    spectrogram, sp_time, sp_freq, fs = ap.get_spectrogram(
        args.input_file, ST_WIN, ST_STEP)

    with open(
            'annotations_eval_{}.json'.format(
                (args.input_file.split('/')[-1]).split('.')[0]),
            'w') as outfile:
        x = json.dumps(
            {
                'input_file': args.input_file.split('/')[-1],
                'total_cluster_annotations': [],
                'cluster_annotations': [],
                'point_annotations': []
            },
            indent=4)
        outfile.write(x)
    # These should change depending on the signal's size