Beispiel #1
0
def separate_and_evaluate(
    track,
    targets,
    model_name,
    niter,
    alpha,
    softmask,
    output_dir,
    eval_dir,
    device='cpu'
):
    estimates = test.separate(
        audio=track.audio,
        targets=targets,
        model_name=model_name,
        niter=niter,
        alpha=alpha,
        softmask=softmask,
        device=device
    )
    if output_dir:
        mus.save_estimates(estimates, track, output_dir)

    scores = museval.eval_mus_track(
        track, estimates, output_dir=eval_dir
    )
    return scores
Beispiel #2
0
def separate_and_evaluate(track, args, ext):
    estimates = test.separate(track.audio, args)

    if args.out_dir:
        mus.save_estimates(estimates, track, args.out_dir)

    scores = museval.eval_mus_track(track, estimates, output_dir=args.out_dir)
    # clear cache memory
    ext.clear_memory_cache()
    return scores
Beispiel #3
0
def separate_and_evaluate(
    track,
    model,
    niter,
    alpha,
    softmask,
    output_dir,
    eval_dir,
):
    estimates = test.separate(audio=track.audio,
                              model_path=model,
                              niter=niter,
                              alpha=alpha,
                              softmask=softmask)

    if output_dir:
        mus.save_estimates(estimates, track, output_dir)

    scores = museval.eval_mus_track(track, estimates, output_dir=eval_dir)
    return scores
Beispiel #4
0
     root=root,
     download=root is None,
     subsets=subset,
     is_wav=is_wav
 )
 
 
 # iterate over all tracks present in test folder
 for track in mus.tracks:
     outdir = exp_model_res_path + track.name + '/'    
     print(track.name)
     estimates = test.separate(
         audio=track.audio,
         targets=targets,
         model_name=model,
         niter=2,
         alpha=1,
         softmask=False,
         device=device
     )    
     
     for target, estimate in estimates.items():
         sf.write(
             outdir / Path(target).with_suffix('.wav'),
             estimate,
             samplerate
         )
     
     print("SAVED SEPARATED VOCALS AND ACCOMPANIMENTS!")
     
     audio_estimates = []
Beispiel #5
0
        "oops, output directory {} already exists - delete this first or change outdir"
        .format(basename))
    sys.exit(1)

os.makedirs(basename)

# https://www.youtube.com/watch?v=DQLUygS0IAQ
# https://www.youtube.com/watch?v=J9gKyRmic20

use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")

audio, rate = librosa.load(infile, sr=44100, mono=False)

estimates = test.separate(audio=audio.T,
                          targets=['vocals', 'drums', 'bass', 'other'],
                          device=device,
                          residual_model=False)

# estimates['vocals'].shape = (11953152, 2)
# audio.shape = (2, 11954040)

# import soundfile as sf
# sf.available_formats()

# librosa.output.write_wav('out_west.wav', audio, rate)
# librosa.output.write_wav('out_west.mp3', audio, rate)

librosa.output.write_wav('{}/out0_all.wav'.format(basename), audio, rate)
librosa.output.write_wav('{}/out1_vocals.wav'.format(basename),
                         estimates['vocals'], rate)
librosa.output.write_wav('{}/out2_drums.wav'.format(basename),