Beispiel #1
0
def download_sins(destination_folder):
    """ Download SINS database: see https://zenodo.org/record/1247102

    Args:
        destination_folder: str, the folder in which to download tut (will create `DCASE2018-task5-dev` folder in it)

    Returns:
        str, path of TUT extracted database
    """
    logger = create_logger(
        __name__ + "/" + inspect.currentframe().f_code.co_name,
        terminal_level=logging.INFO,
    )
    create_folder(destination_folder)
    zip_file_url_meta = f"https://zenodo.org/record/1247102/files/DCASE2018-task5-dev.meta.zip?download=1"
    download_and_unpack_archive(
        zip_file_url_meta, destination_folder, archive_format="zip"
    )

    for i in range(1, 24):
        logger.info(f"SINS downloading zip {i} / 23 ...")
        zip_file_url = f"https://zenodo.org/record/1247102/files/DCASE2018-task5-dev.audio.{i}.zip?download=1"
        download_and_unpack_archive(
            zip_file_url, destination_folder, archive_format="zip"
        )

    return os.path.join(destination_folder, "DCASE2018-task5-dev")
def generate_tsv_from_isolated_events(wav_folder, out_tsv=None):
    """ Generate list of separated wav files in a folder and export them in a tsv file
    Separated audio files considered are all wav files in 'subdirectories' of the 'wav_folder'
    Args:
        wav_folder: str, path of the folder containing subdirectories (one for each mixture separated)
        out_tsv: str, path of the csv in which to save the list of files
    Returns:
        pd.DataFrame, having only one column with the filename considered
    """
    if out_tsv is not None and os.path.exists(out_tsv):
        source_sep_df = pd.read_csv(out_tsv, sep="\t")
    else:
        source_sep_df = pd.DataFrame()
        list_dirs = [d for d in os.listdir(wav_folder) if osp.isdir(osp.join(wav_folder, d))]
        for dirname in list_dirs:
            list_isolated_files = []
            for directory, subdir, fnames in os.walk(osp.join(wav_folder, dirname)):
                for fname in fnames:
                    if osp.splitext(fname)[1] in [".wav"]:
                        # Get the level folders and keep it in the tsv
                        subfolder = directory.split(dirname + os.sep)[1:]
                        if len(subfolder) > 0:
                            subdirs = osp.join(*subfolder)
                        else:
                            subdirs = ""
                        # Append the subfolders and name in the list of files
                        list_isolated_files.append(osp.join(dirname, subdirs, fname))
                    else:
                        warnings.warn(f"Not only wav audio files in the separated source folder,"
                                      f"{fname} not added to the .tsv file")
            source_sep_df = source_sep_df.append(pd.DataFrame(list_isolated_files, columns=["filename"]))
        if out_tsv is not None:
            create_folder(os.path.dirname(out_tsv))
            source_sep_df.to_csv(out_tsv, sep="\t", index=False, float_format="%.3f")
    return source_sep_df
Beispiel #3
0
def test_short_background_only():
    foreground_fd = os.path.join(
        absolute_dir_path, "material", "soundbank", "foreground"
    )
    background_fd = os.path.join(
        absolute_dir_path, "material", "soundbank", "background"
    )

    sc = Scaper(10, foreground_fd, background_fd, random_state=2020)
    sc.sr = 16000
    sc.add_background(
        ("const", "label"),
        ("const", os.path.join(background_fd, "label", "noise-free-sound-0055.wav")),
        ("const", 0),
    )
    fname = "test_bg"
    fpath = os.path.join(absolute_dir_path, "generated", "short_bg_scaper", fname)
    create_folder(os.path.dirname(fpath))
    sc.generate(f"{fpath}.wav", f"{fpath}.jams")

    audio_g, sr_g = soundfile.read(f"{fpath}.wav")
    audio_s, sr_s = soundfile.read(
        os.path.join(absolute_dir_path, "material", "scaper", f"{fname}.wav")
    )
    print(f"audio gen: {audio_g}")
    print(f"audio source: {audio_s}")
    print(f"shapes, source: {audio_s.shape}, gen: {audio_g.shape}")
    assert audio_g.shape == audio_s.shape
Beispiel #4
0
def generate_new_no_bg_files(in_dir, out_dir, fg_path=None, bg_path=None):
    """ Generate the new JAMS and audio files with a different background SNR
    Args:
        new_snr: float, Sound to noise ratio (SNR) of the background from the reference
        in_dir: str, folder containing JAMS file with background SNR to be changed
        out_dir: str, folder where to save the new audio and JAMS

    Returns:

    """
    logger = create_logger(__name__, "Desed.log")
    create_folder(out_dir)
    for jam_file in sorted(glob.glob(osp.join(in_dir, "*.jams"))):
        jams_obj = delete_bg(jam_file)
        out_jams = osp.join(out_dir, osp.basename(jam_file))
        jams_obj.save(out_jams)

        audiofile = osp.join(out_dir,
                             osp.splitext(osp.basename(jam_file))[0] + ".wav")
        logger.debug(audiofile)
        scaper.generate_from_jams(out_jams,
                                  audiofile,
                                  fg_path,
                                  bg_path,
                                  jams_outfile=out_jams)
Beispiel #5
0
def download_tut(destination_folder):
    """ Download 'TUT Acoustic scenes 2017, Development dataset', see: https://zenodo.org/record/400515
    Args:
        destination_folder: str, the folder in which to download tut (will create `TUT-acoustic-scenes-2017-development`
            folder in it)

    Returns:
        str, path of extracted TUT database
    """
    logger = create_logger(
        __name__ + "/" + inspect.currentframe().f_code.co_name,
        terminal_level=logging.INFO,
    )
    create_folder(destination_folder)
    zip_meta_tut = f"https://zenodo.org/record/400515/files/TUT-acoustic-scenes-2017-development.meta.zip?download=1"
    download_and_unpack_archive(zip_meta_tut, destination_folder, archive_format="zip")

    for i in range(1, 11):
        logger.info(f"TUT (scenes-2017-dev) downloading zip {i} / 10 ...")
        zip_file_url = (
            f"https://zenodo.org/record/400515/files/"
            f"TUT-acoustic-scenes-2017-development.audio.{i}.zip?download=1"
        )
        download_and_unpack_archive(
            zip_file_url, destination_folder, archive_format="zip"
        )

    return os.path.join(destination_folder, "TUT-acoustic-scenes-2017-development")
Beispiel #6
0
def split_desed_soundbank_train_val(basedir):
    """ Split the training into training and validation (pre-made, 90%/10%) of backgrounds and foregrounds
    Args:
        basedir: str, path where the soundbank is downloaded (parent folder of "audio")

    Returns:

    """
    logger = create_logger(
        __name__ + "/" + inspect.currentframe().f_code.co_name,
        terminal_level=logging.INFO,
    )
    fname_valid = (
        "https://zenodo.org/record/4307908/files/soundbank_validation.tsv?download=1"
    )
    fpath = os.path.join(basedir, "soundbank_validation.tsv")
    download_file_from_url(fname_valid, fpath)
    df = pd.read_csv(fpath, sep="\t")
    for fpath in df.filepath:
        source_path = os.path.join(basedir, fpath.replace("validation", "train"))
        if os.path.exists(source_path):
            destination_path = os.path.join(basedir, fpath)
            create_folder(os.path.dirname(destination_path))
            shutil.move(source_path, destination_path)
    logger.info("Soundbank splitted in train and validation (90%/10%)")
Beispiel #7
0
def download_zenodo_soundbank(destination_folder):
    """ Be careful, there are only the foregrounds of training.
    Args:
        destination_folder: str, the path of the root of the soundbank (will create the structure inside)

    Returns:
    """
    create_folder(destination_folder)
    zip_meta_tut = "https://zenodo.org/record/4307908/files/DESED_synth_soundbank.tar.gz?download=1"
    download_and_unpack_archive(zip_meta_tut, destination_folder)
Beispiel #8
0
def download_eval_public(dataset_folder):
    """ Download the public eval part of desed dataset from Zenodo.

    Args:
        dataset_folder: str, the path to the root of the dataset where to download the evaluation files (this folder
            contains audio and metadata folders).

    Returns:

    """
    create_folder(dataset_folder)
    url_public_eval = (
        f"https://zenodo.org/record/4560759/files/DESED_public_eval.tar.gz?download=1"
    )
    download_and_unpack_archive(url_public_eval, dataset_folder)
Beispiel #9
0
def test_download_file():
    fname_valid = (
        "https://zenodo.org/record/4307908/files/soundbank_validation.tsv?download=1"
    )
    fpath = os.path.join(
        absolute_dir_path, "generated", "utils", "soundbank_validation.tsv"
    )
    create_folder(osp.dirname(fpath))
    download_file_from_url(fname_valid, fpath)
    material = os.path.join(
        absolute_dir_path, "material", "utils", "soundbank_validation.tsv"
    )
    df_download = pd.read_csv(fpath)
    df_material = pd.read_csv(material)
    assert df_download.equals(
        df_material
    ), "Wrong file downloaded, not matching: soundbank_validation.tsv"
Beispiel #10
0
def generate_training(n_soundscapes,
                      fg_folder,
                      bg_folder,
                      param_file,
                      outfolder,
                      duration=10.0,
                      ref_db=-50):
    create_folder(outfolder)

    with open(param_file) as json_file:
        params = json.load(json_file)

    sg = SoundscapesGenerator(duration, fg_folder, bg_folder, ref_db=ref_db)
    sg.generate_by_label_occurence(params,
                                   n_soundscapes,
                                   outfolder,
                                   min_events=1,
                                   max_events=1,
                                   pitch_shift=('uniform', -3, 3))
Beispiel #11
0
def download_and_unpack_archive(url, destination_folder, archive_format="gztar"):
    """ Download and unpack an archive from the internet. Useful for Zenodo archives.

    Args:
        url: str, URL to be download.
        destination_folder: str, the folder in which to extract the content of the archive.
        archive_format: str, the format of the archive to unpack.

    Returns:

    """
    create_folder(destination_folder)
    # not using tempdir because too big files for some /tmp folders
    archive_folder = tempfile.mkdtemp(prefix="tmp_", dir="./")
    path_dl_tar = tempfile.NamedTemporaryFile(
        dir=archive_folder, suffix="." + os.path.splitext(url.split("?")[0])[1]
    ).name
    download_file_from_url(url, path_dl_tar)
    shutil.unpack_archive(path_dl_tar, destination_folder, format=archive_format)
    shutil.rmtree(archive_folder)
Beispiel #12
0
def unsplit_desed_soundbank(basedir):
    """ UnSplit the the soundbank from training and validation (pre-made, 90%/10%) to training only folder.
    Args:
        basedir: str, path where the soundbank is downloaded (parent folder of "audio")

    Returns:

    """
    logger = create_logger(
        __name__ + "/" + inspect.currentframe().f_code.co_name,
        terminal_level=logging.INFO,
    )
    validation_path = os.path.join(basedir, "audio", "validation")
    for rootdir, subdirs, files in os.walk(validation_path):
        for fname in files:
            source_file = os.path.join(rootdir, fname)
            destination_file = source_file.replace("validation", "train")
            create_folder(os.path.dirname(destination_file))
            shutil.move(source_file, destination_file)
    shutil.rmtree(validation_path)
    logger.info("Unsplitted soundbank, validation moved back to train")
Beispiel #13
0
    )
    parser.add_argument("--bg_folder", type=str, required=True)
    parser.add_argument("--out_folder", type=str, required=True)
    parser.add_argument(
        "--out_tsv_folder",
        type=str,
        default=osp.join("..", "..", "data", "generated",
                         "soundscapes_generated_var_onset"),
    )
    parser.add_argument("--number", type=int, default=1000)
    args = parser.parse_args()
    pformat(vars(args))

    # General output folder, in args
    base_out_folder = args.out_folder
    create_folder(base_out_folder)

    out_tsv_folder = args.out_tsv_folder
    create_folder(out_tsv_folder)

    # ################
    # Varying onset of a single event
    # ###########
    # SCAPER SETTINGS
    clip_duration = 10.0
    sg = SoundscapesGenerator(duration=clip_duration,
                              fg_folder=args.fg_folder,
                              bg_folder=args.bg_folder)
    n_soundscapes = args.number
    source_time_dist = "const"
    source_time = 0.0
Beispiel #14
0
    sample_rate = cfg.samplerate
    clip_duration = cfg.clip_duration

    rir_folder = args.rir_folder
    input_folder = args.input_folder
    rir_subset = args.rir_subset
    reverb_folder = args.reverb_out_folder
    if reverb_folder is None:
        reverb_folder = input_folder + "_reverb"
    if args.reverb_out_tsv is None:
        out_tsv = osp.join(reverb_folder.replace("audio", "metadata"),
                           subset + ".tsv")
    else:
        out_tsv = args.reverb_out_tsv

    create_folder(reverb_folder)
    create_folder(osp.dirname(out_tsv))
    # ########
    # Make lists of examples, rir and mix_info needed to reverberate
    # (see reverberate_and_mix.py from Google folder for more info)
    # ########
    src_list_file = args.src_list_file
    rir_list_file = args.rir_list_file
    mix_info_file = args.mix_info_file
    np.random.seed(args.random_seed)
    mix_info = None
    source_dict = None
    rir_dict = None
    if mix_info_file != "":
        if osp.exists(mix_info_file):
            mix_info = read_mix_info(mix_info_file)
Beispiel #15
0
if __name__ == '__main__':
    LOG = create_logger(__name__, terminal_level=logging.INFO)
    LOG.info(__file__)
    t = time.time()
    parser = argparse.ArgumentParser()
    parser.add_argument('--out_folder', type=str, default=osp.join(cfg.audio_path_eval, 'soundscapes_generated_ls'))
    parser.add_argument('--out_tsv', type=str, default=osp.join(cfg.meta_path_eval, "soundscapes_generated_ls", "XdB.tsv"))
    parser.add_argument('--number', type=int, default=1000)
    parser.add_argument('--fg_folder', type=str, default=osp.join(cfg.audio_path_eval, "soundbank", "foreground_short"))
    parser.add_argument('--bg_folder', type=str, default=osp.join(cfg.audio_path_eval, "soundbank", "background_long"))
    args = parser.parse_args()
    pformat(vars(args))

    # General output folder, in args
    out_folder = args.out_folder
    create_folder(out_folder)
    create_folder(osp.dirname(args.out_tsv))

    # ################
    # Long event as background, short events as foreground
    # ###########
    duration = 10.0
    sg = SoundscapesGenerator(duration=duration,
                              fg_folder=args.fg_folder,
                              bg_folder=args.bg_folder,
                              ref_db=cfg.ref_db,
                              samplerate=cfg.samplerate)

    n_soundscapes = args.number
    # Distribution of events
    min_events = 1
Beispiel #16
0
    parser = argparse.ArgumentParser()
    parser.add_argument("--fg_folder", type=str, required=True)
    parser.add_argument("--bg_folder", type=str, required=True)
    parser.add_argument("--out_folder", type=str, required=True)
    parser.add_argument(
        "--out_tsv",
        type=str,
        default=osp.join("..", "..", "data", "generated", "FBSNR_XdB.tsv"),
    )
    parser.add_argument("--number", type=int, default=1000)
    args = parser.parse_args()
    pformat(vars(args))

    # General output folder, in args
    out_folder = args.out_folder
    create_folder(out_folder)
    out_tsv = args.out_tsv
    if out_tsv is not None:
        create_folder(osp.dirname(args.out_tsv))

    # Default parameters
    clip_duration = 10.0
    ref_db = -55
    samplerate = 16000

    sg = SoundscapesGenerator(
        duration=clip_duration,
        fg_folder=args.fg_folder,
        bg_folder=args.bg_folder,
        ref_db=ref_db,
        samplerate=samplerate,
Beispiel #17
0
def download_audioset_files(
    filenames,
    result_dir,
    n_jobs=1,
    chunk_size=10,
    missing_files_tsv="..",
    platform="youtube",
):
    """ download files in parallel from youtube given a tsv file listing files to download.
    It also stores not downloaded files with their associated error in "missing_files_[tsv_file].tsv"

       Args:
           filenames : pandas Series, named "filename" listing AudioSet filenames to download
           result_dir : str, result directory which will contain downloaded files
           n_jobs : int, number of download to execute in parallel
           chunk_size : int, number of files to download before updating the progress bar. Bigger it is, faster it goes
                because data is filled in memory but progress bar only updates after a chunk is finished.
           missing_files_tsv: str, path of the tsv which will contain the files that couldn't have been downloaded.
           platform: str, the platform the filenames are coming from "youtube" or "vimeo"

       Returns:
           missing_files : pandas.DataFrame, files not downloaded whith associated error.

       """
    warnings.filterwarnings("ignore")
    create_folder(result_dir)
    TMP_FOLDER = "tmp/"
    create_folder(TMP_FOLDER)

    p = None
    files_error = []
    try:
        if n_jobs == 1:
            for filename in tqdm(filenames):
                files_error.append(
                    _download_audioset_file(filename, result_dir, platform)
                )
        # multiprocessing
        else:
            with closing(Pool(n_jobs)) as p:
                # Put result_dir and platform as constants variable with result_dir in download_file
                download_file_alias = functools.partial(
                    _download_audioset_file,
                    result_dir=result_dir,
                    platform=platform,
                    tmp_folder=TMP_FOLDER,
                )

                for val in tqdm(
                    p.imap_unordered(download_file_alias, filenames, chunk_size),
                    total=len(filenames),
                ):
                    files_error.append(val)

        # Store files which gave error
        missing_files = pd.DataFrame(files_error).dropna()
        if not missing_files.empty:
            # Save missing_files to be able to ask them
            missing_files.columns = ["filename", "error"]
            missing_files.to_csv(missing_files_tsv, index=False, sep="\t")
            warnings.warn(
                f"There are missing files at {missing_files_tsv}, \n"
                f"see info on https://github.com/turpaultn/desed on how to get them",
                DesedWarning,
            )

    except KeyboardInterrupt:
        if p is not None:
            p.terminate()
        raise KeyboardInterrupt

    if os.path.exists(TMP_FOLDER):
        shutil.rmtree(TMP_FOLDER)
    warnings.resetwarnings()
    return missing_files
Beispiel #18
0
def download_audioset_data(
    dataset_folder,
    weak=True,
    unlabel_in_domain=True,
    validation=True,
    n_jobs=3,
    chunk_size=10,
):
    """ Download the DESED dataset files from Audioset.

    Args:
        dataset_folder: str, the path to the root of the dataset where to download the evaluation files (this folder
            contains audio and metadata folders).
        weak: bool, whether to download the weak set or not.
        unlabel_in_domain: bool, whether to download the unlabel_in_domain set or not.
        validation: bool, whether to download the validation set or not.
        n_jobs : int, number of download to execute in parallel
        chunk_size : int, number of files to download before updating the progress bar. Bigger it is, faster it goes
            because data is filled in memory but progress bar only updates after a chunk is finished.

    Returns:
        list, list of missing files paths
    """
    logger = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name)
    basedir_missing_files = "missing_files"
    create_folder(basedir_missing_files)
    create_folder(dataset_folder)

    # Metadata:
    url_metadata = (
        f"https://zenodo.org/record/4639776/files/audioset_metadata.tar.gz?download=1"
    )
    download_and_unpack_archive(url_metadata, dataset_folder)

    missing_files_paths = []
    if weak:
        logger.info("Downloading Weakly labeled data...")
        path_missing_files_weak = os.path.join(
            basedir_missing_files, "missing_files_" + "weak" + ".tsv"
        )
        download_audioset_files_from_csv(
            os.path.join(dataset_folder, "metadata", "train", "weak.tsv"),
            os.path.join(dataset_folder, "audio", "train", "weak"),
            missing_files_tsv=path_missing_files_weak,
            n_jobs=n_jobs,
            chunk_size=chunk_size,
        )
        missing_files_paths.append(path_missing_files_weak)

    if unlabel_in_domain:
        logger.info("Downloading Unlabeled (in_domain) labeled data...")
        path_missing_files_unlabel = os.path.join(
            basedir_missing_files, "missing_files_" + "unlabel_in_domain" + ".tsv"
        )
        download_audioset_files_from_csv(
            os.path.join(dataset_folder, "metadata", "train", "unlabel_in_domain.tsv"),
            os.path.join(dataset_folder, "audio", "train", "unlabel_in_domain"),
            missing_files_tsv=path_missing_files_unlabel,
            n_jobs=n_jobs,
            chunk_size=chunk_size,
        )
        missing_files_paths.append(path_missing_files_unlabel)

    if validation:
        logger.info("Downloading validation, strongly labeled data...")
        path_missing_files_valid = os.path.join(
            basedir_missing_files, "missing_files_" + "validation" + ".tsv"
        )
        download_audioset_files_from_csv(
            os.path.join(dataset_folder, "metadata", "validation", "validation.tsv"),
            os.path.join(dataset_folder, "audio", "validation", "validation"),
            missing_files_tsv=path_missing_files_valid,
            n_jobs=n_jobs,
            chunk_size=chunk_size,
        )
        missing_files_paths.append(path_missing_files_valid)

    logger.info(
        f"Please check your missing_files: {basedir_missing_files}, "
        f"you can relaunch 'download_audioset_sets' to try to recude them, "
        "then, send these missing_files to "
    )
    return missing_files_paths
Beispiel #19
0
def test_create_folder():
    to_create = os.path.join(absolute_dir_path, "generated", "tmp")
    create_folder(to_create)
    create_folder(to_create)
Beispiel #20
0
def download_fsd50k(destination_folder, gtruth_only=False):
    """ Download FSD50k dataset from Zenodo.
    Args:
        destination_folder: str, path where the FSD50k is extracted.
    """
    logger = create_logger(
        __name__ + "/" + inspect.currentframe().f_code.co_name,
        terminal_level=logging.INFO,
    )
    create_folder(destination_folder)
    if not gtruth_only:
        archive_folder = os.path.join("tmp_fsd50k")
        create_folder(archive_folder)
        # Train
        for id in ["01", "02", "03", "04", "05", "ip"]:
            logger.info(f"Downloading zip file: FSD50K.dev_audio.z{id}")
            url_dev = f"https://zenodo.org/record/4060432/files/FSD50K.dev_audio.z{id}?download=1"
            download_file_from_url(
                url_dev, os.path.join(archive_folder, f"FSD50K.dev_audio.z{id}")
            )
        logger.info("Unpacking files")
        subprocess.call(
            [
                "zip",
                "-s",
                "0",
                os.path.join(archive_folder, "FSD50K.dev_audio.zip"),
                "--out",
                os.path.join(archive_folder, "unsplit_dev.zip"),
            ]
        )
        shutil.unpack_archive(
            os.path.join(archive_folder, "unsplit_dev.zip"), destination_folder
        )

        # Eval
        for id in ["01", "ip"]:
            logger.info(f"Downloading zip file: FSD50K.eval_audio.z{id}")
            url_eval = f"https://zenodo.org/record/4060432/files/FSD50K.eval_audio.z{id}?download=1"
            download_file_from_url(
                url_eval, os.path.join(archive_folder, f"FSD50K.eval_audio.z{id}")
            )
        logger.info("Unpacking files")
        subprocess.call(
            [
                "zip",
                "-s",
                "0",
                os.path.join(archive_folder, "FSD50K.eval_audio.zip"),
                "--out",
                os.path.join(archive_folder, "unsplit_eval.zip"),
            ]
        )
        shutil.unpack_archive(
            os.path.join(archive_folder, "unsplit_eval.zip"), destination_folder
        )
        shutil.rmtree(archive_folder)

    url_doc = "https://zenodo.org/record/4060432/files/FSD50K.doc.zip?download=1"
    url_gtruth = (
        "https://zenodo.org/record/4060432/files/FSD50K.ground_truth.zip?download=1"
    )
    url_meta = "https://zenodo.org/record/4060432/files/FSD50K.metadata.zip?download=1"
    for url in [url_doc, url_gtruth, url_meta]:
        download_and_unpack_archive(url, destination_folder, archive_format="zip")
Beispiel #21
0
    LOG.info(__file__)
    t = time.time()
    args = parse_args()

    # General parameters
    out_folder = args.out_folder
    soundbank_path = args.soundbank
    n_soundscapes = args.number
    random_state = args.random_seed

    subset = "soundscapes"  # Needed for RIR, so used everywhere (need to be the same in reverberate_data.py)
    full_out_folder = osp.join(out_folder, subset)
    out_tsv = args.out_tsv
    if out_tsv is None:
        out_tsv = full_out_folder.replace("audio", "metadata") + ".tsv"
    create_folder(full_out_folder)
    create_folder(osp.dirname(out_tsv))

    # ############
    # Generate soundscapes
    # ############
    # Parameters (default)
    clip_duration = cfg.clip_duration
    sample_rate = cfg.sample_rate
    ref_db = cfg.ref_db
    pitch_shift = cfg.pitch_shift

    # Defined
    fg_folder = osp.join(soundbank_path, "foreground")
    bg_folder = osp.join(soundbank_path, "background")
    with open(args.json_path) as json_file: