def main(pool): import museval results, task_list, eval_out_bundle_path, model_name = main_estimate(pool) check_task_list(results, task_list, True) print(results) method = museval.MethodStore() method.add_evalstore(results, model_name) method.save(eval_out_bundle_path)
pool.imap_unordered(func=functools.partial(separate_and_evaluate, model=args.model, niter=args.niter, alpha=args.alpha, softmask=args.softmask, output_dir=args.outdir, eval_dir=args.evaldir), iterable=mus.tracks, chunksize=1)) pool.close() pool.join() for scores in scores_list: results.add_track(scores) else: results = museval.EvalStore() for track in tqdm.tqdm(mus.tracks): scores = separate_and_evaluate(track=track, model=args.model, niter=args.niter, alpha=args.alpha, softmask=args.softmask, output_dir=args.outdir, eval_dir=args.evaldir) results.add_track(scores) print(results) method = museval.MethodStore() method.add_evalstore(results, 'x-umx') method.save('x-umx.pandas')
print(target) print('################') for tag in tags: if tag in exclude_tags: continue museval_path = os.path.join('exp', tag, 'bss_eval.pkl') if not os.path.exists(museval_path): continue museval_data = pd.read_pickle(museval_path) # ------------------------------------------------------------------------------------------------------------------ # median over frames, median over tracks method_median_median = museval.MethodStore(frames_agg='median', tracks_agg='median') method_median_median.df = museval_data agg_median_median = method_median_median.agg_frames_tracks_scores() agg_median_median = pd.DataFrame(agg_median_median, index=None) median_median = {'tag': [tag], 'target': [target]} # add museval metrics to method results dicts for row in agg_median_median.itertuples(): if target == 'all': metric = row.Index[2] median_median[metric] = [row.score] elif row.Index[1] == target: metric = row.Index[2] median_median[metric] = [row.score]
import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import museval comparisons = museval.MethodStore() comparisons.add_sisec18() agg_df = comparisons.agg_frames_scores() sns.set() sns.set_context("notebook") metrics = ['SDR'] selected_targets = ['vocals', 'drums', 'bass', 'other'] oracles = ['IBM1', 'IBM2', 'IRM1', 'IRM2', 'MWF', 'IMSK'] # Convert to Pandas Dataframes agg_df['oracle'] = agg_df.method.isin(oracles) agg_df = agg_df[agg_df.target.isin(selected_targets)].dropna() # Get sorting keys (sorted by median of SDR:vocals) df_sort_by = agg_df[(agg_df.metric == "SDR") & (agg_df.target == "vocals")] methods_by_sdr = df_sort_by.score.groupby( df_sort_by.method).median().sort_values().index.tolist() # df = df[df.target == "vocals"] g = sns.FacetGrid(agg_df, row="target", col="metric", row_order=selected_targets,