Exemplo n.º 1
0
        with multiprocessing.Pool(nproc) as p:
            p.map(reverb_mix_partial, range(nproc))
    else:
        # Easy case without multiprocessing
        reverberate_and_mix(reverb_folder,
                            input_folder,
                            rir_folder,
                            mix_info,
                            part=0,
                            nparts=1,
                            chat=True)

    # Check the data (fix sox bug)
    examples_list = make_example_list(reverb_folder)
    for example in examples_list:
        check_and_correct_example(example,
                                  reverb_folder,
                                  check_length=True,
                                  fix_length=True,
                                  check_mix=True,
                                  fix_mix=True,
                                  sample_rate=sample_rate,
                                  duration=clip_duration)

    rev_subset_folder = osp.join(reverb_folder, subset)
    post_process_txt_labels(rev_subset_folder,
                            output_folder=rev_subset_folder,
                            output_tsv=out_tsv,
                            rm_nOn_nOff=True)
    print(time.time() - t)
Exemplo n.º 2
0
            event_time_std,
            event_time_min,
            event_time_max,
        ),
        events_duration=(event_duration_dist, event_duration_min,
                         event_duration_max),
        snrs=(snr_dist, snr_min, snr_max),
        pitch_shifts=(pitch_dist, pitch_min, pitch_max),
        time_stretches=(time_stretch_dist, time_stretch_min, time_stretch_max),
        txt_file=True,
    )

    rm_high_polyphony(out_folder_500, 2)
    out_tsv = osp.join(out_tsv_folder, "500ms.tsv")
    post_process_txt_labels(out_folder_500,
                            output_folder=out_folder_500,
                            output_tsv=out_tsv)

    # Generate 2 variants of this dataset
    jams_to_modify = glob.glob(osp.join(out_folder_500, "*.jams"))
    # Be careful, if changing the values of the added onset value,
    # you maybe want to rerun the post_processing_annotations to be sure there is no inconsistency

    # 5.5s onset files
    out_folder_5500 = osp.join(base_out_folder, "5500ms")
    add_onset = 5.0
    modif_onset_5s = functools.partial(modify_fg_onset,
                                       slice_seconds=add_onset)
    modify_jams(jams_to_modify, modif_onset_5s, out_folder_5500)
    # we also need to generate a new DataFrame with the right values
    df = pd.read_csv(out_tsv, sep="\t")
Exemplo n.º 3
0
    pitch_max = 3.0

    time_stretch_dist = 'uniform'
    time_stretch_min = 1
    time_stretch_max = 1

    out_folder_ls_30 = osp.join(out_folder, "ls_30dB")
    create_folder(out_folder_ls_30)
    sg.generate(n_soundscapes, out_folder_ls_30, min_events, max_events, labels=('choose', []),
                source_files=('choose', []), sources_time=(source_time_dist, source_time),
                events_start=(evt_time_dist, evt_time_mean, evt_time_std, evt_time_min, evt_time_max),
                events_duration=(event_duration_dist, event_duration_min, event_duration_max),
                snrs=('const', 30), pitch_shifts=('uniform', -3.0, 3.0), time_stretches=('uniform', 1, 1),
                txt_file=True)

    rm_high_polyphony(out_folder_ls_30, 3)
    post_process_txt_labels(out_folder_ls_30, output_folder=out_folder_ls_30, output_tsv=args.out_tsv,
                            background_label=True)

    list_jams = glob.glob(osp.join(out_folder_ls_30, "*.jams"))
    # We create the same dataset with different background SNR
    # Be careful, 15 means the background SNR is 15,
    # so the foreground background snr ratio is between -9dB and 15dB
    out_folder_ls_15 = osp.join(out_folder, "ls_15dB")
    modify_jams(list_jams, change_snr, out_folder_ls_15, db_change=-15)

    # Same for 0dB FBSNR, from original fbsnr [6;30]dB, we go to [-24;0]dB FBSNR
    out_folder_ls_0 = osp.join(out_folder, "ls_0dB")
    modify_jams(list_jams, change_snr, out_folder_ls_15, db_change=-30)

Exemplo n.º 4
0
    # General output folder, in args
    out_folder = args.out_folder
    create_folder(out_folder)
    create_folder(osp.dirname(args.out_tsv))

    # Default parameters
    clip_duration = 10.0

    sg = SoundscapesGenerator(duration=clip_duration,
                              fg_folder=args.fg_folder,
                              bg_folder=args.bg_folder,
                              ref_db=cfg.ref_db,
                              samplerate=cfg.samplerate)

    # Generate events same way as the training set
    out_folder_30 = osp.join(out_folder, "30dB")
    create_folder(out_folder_30)
    param_json = args.json_params
    with open(param_json) as json_file:
        co_occur_dict = json.load(json_file)
    sg.generate_by_label_occurence(label_occurences=co_occur_dict,
                                   number=args.number,
                                   out_folder=out_folder_30,
                                   save_isolated_events=True)

    # Remove files that have polyphony greater than 3
    rm_high_polyphony(out_folder_30, 3)
    # Combine labels of same events appending at the same time or less than 150ms interval
    post_process_txt_labels(out_folder_30, output_folder=out_folder_30, output_tsv=args.out_tsv)
Exemplo n.º 5
0
if __name__ == '__main__':
    dataset_path = osp.join("..", "..", "dataset")
    parser = argparse.ArgumentParser()
    parser.add_argument('--in_dir',
                        type=str,
                        default=osp.join(dataset_path, "audio", "train",
                                         "one_event_train"))
    parser.add_argument('--out_dir',
                        type=str,
                        default=osp.join(dataset_path, "audio", "train",
                                         "one_event_train_no_bg"))
    parser.add_argument('--outtsv',
                        type=str,
                        default=osp.join(dataset_path, "metadata", "train",
                                         "one_event_train_no_bg.tsv"))
    parser.add_argument('--soundbank_dir', type=str, default=None)
    args = parser.parse_args()
    pformat(vars(args))

    fg_path = None
    bg_path = None
    if args.soundbank_dir is not None:
        fg_path = osp.join(args.soundbank_dir, "foreground")
        bg_path = osp.join(args.soundbank_dir, "background")

    generate_new_no_bg_files(args.in_dir, args.out_dir, fg_path, bg_path)
    post_process_txt_labels(args.out_dir,
                            wavdir=args.in_dir,
                            output_folder=args.out_dir,
                            output_tsv=args.outtsv)
Exemplo n.º 6
0
        if random_state is None:
            random_states = [None for i in range(nproc)]
        else:
            random_states = [random_state + i for i in range(nproc)]
        print(random_states)
        with multiprocessing.Pool(nproc) as p:
            p.starmap(generate_multiproc,
                      zip(list_start, numbers, random_states))
    # Single process
    else:
        sg = SoundscapesGenerator(duration=clip_duration,
                                  fg_folder=fg_folder,
                                  bg_folder=bg_folder,
                                  ref_db=ref_db,
                                  samplerate=sample_rate,
                                  random_state=random_state)
        sg.generate_by_label_occurence(label_occurences=co_occur_dict,
                                       number=n_soundscapes,
                                       out_folder=full_out_folder,
                                       save_isolated_events=True,
                                       pitch_shift=pitch_shift)

    # ##
    # Post processing
    rm_high_polyphony(full_out_folder, max_polyphony=2)
    # concat same labels overlapping
    post_process_txt_labels(full_out_folder,
                            output_folder=full_out_folder,
                            output_tsv=out_tsv,
                            rm_nOn_nOff=True)
Exemplo n.º 7
0
def test_postprocessing():
    """ Test the preprocessing method
    Returns:
    Should output Fixed 3 problems
    """
    folder = osp.join(absolute_dir_path, "material", "post_processing")
    checked_folder = osp.join(absolute_dir_path, "generated",
                              "post_processing")
    out_tsv = osp.join(absolute_dir_path, "generated", "post.tsv")

    post_process_txt_labels(folder,
                            output_folder=checked_folder,
                            output_tsv=out_tsv)
    df = pd.read_csv(out_tsv, sep="\t")
    print(df.to_dict())
    valid_df = pd.DataFrame({
        "filename": {
            0: "5.wav",
            1: "5.wav",
            2: "7.wav",
            3: "7.wav",
            4: "7.wav",
            5: "7.wav",
            6: "7.wav",
            7: "7.wav",
        },
        "onset": {
            0: 0.008,
            1: 4.969,
            2: 2.183,
            3: 2.406,
            4: 3.099,
            5: 3.406,
            6: 3.684,
            7: 6.406,
        },
        "offset": {
            0: 5.546,
            1: 9.609,
            2: 2.488,
            3: 5.2,
            4: 3.36,
            5: 6.2,
            6: 5.624,
            7: 10.0,
        },
        "event_label": {
            0: "Cat",
            1: "Speech",
            2: "Dishes",
            3: "Speech",
            4: "Dishes",
            5: "Cat",
            6: "Dishes",
            7: "Frying",
        },
    })
    check = df.round(3).sort_values("onset").reset_index(
        drop=True) == valid_df.sort_values("onset").reset_index(drop=True)

    assert check.all(axis=None), "Problem with post_processing_txt_annotations"