def individual_composer_piano_solo_meta_accuracy(args): """Calcualte individual composer piano solo meta accuracy. Composers include: Bach, Johann Sebastian Mozart, Wolfgang Amadeus Beethoven, Ludwig van Chopin, Frédéric Liszt, Franz Debussy, Claude Args: workspace: str surname: str firstname: str surname_in_youtube_title: bool Returns: None """ workspace = args.workspace surname = args.surname firstname = args.firstname surname_in_youtube_title = args.surname_in_youtube_title eval_num = 200 csv_path = os.path.join( workspace, 'full_music_pieces_youtube_similarity_pianosoloprob_split.csv') meta_dict = read_csv_to_meta_dict(csv_path) audios_num = len(meta_dict['surname']) indexes = [] tp, fp = 0, 0 for n in range(audios_num): if meta_dict['giant_midi_piano'][n] != '' and int( meta_dict['giant_midi_piano'][n]) == 1: match_composer = (surname == meta_dict['surname'][n] and firstname == meta_dict['firstname'][n]) if surname_in_youtube_title and int( meta_dict['surname_in_youtube_title'][n]) == 0: flag = False else: flag = True if flag: if surname in meta_dict['youtube_title'][n] or match_composer: if match_composer: tp += 1 else: fp += 1 accuracy = tp / (tp + fp) print('Match: {}, Accuracy: {}'.format(tp, fp)) print('Accuracy: {:.3f}'.format(accuracy))
def calculate_piano_solo_prob(args): """Calculate the piano solo probability of all downloaded mp3s, and append the probability to the meta csv file. """ # Arguments & parameters workspace = args.workspace mp3s_dir = args.mp3s_dir mini_data = args.mini_data sample_rate = piano_detection_model.SR if mini_data: prefix = 'minidata_' else: prefix = '' # Paths similarity_csv_path = os.path.join(workspace, '{}full_music_pieces_youtube_similarity.csv'.format(prefix)) piano_prediction_path = os.path.join(workspace, '{}full_music_pieces_youtube_similarity_pianosoloprob.csv'.format(prefix)) # Meta info meta_dict = read_csv_to_meta_dict(similarity_csv_path) meta_dict['piano_solo_prob'] = [] meta_dict['audio_name'] = [] meta_dict['audio_duration'] = [] count = 0 piano_solo_detector = piano_detection_model.PianoSoloDetector() for n in range(len(meta_dict['surname'])): mp3_path = os.path.join(mp3s_dir, '{}, {}, {}, {}.mp3'.format( meta_dict['surname'][n], meta_dict['firstname'][n], meta_dict['music'][n], meta_dict['youtube_id'][n]).replace('/', '_')) if os.path.exists(mp3_path): (audio, _) = librosa.core.load(mp3_path, sr=sample_rate, mono=True) try: probs = piano_solo_detector.predict(audio) prob = np.mean(probs) except: prob = 0 print(n, mp3_path, prob) meta_dict['audio_name'].append(get_filename(mp3_path)) meta_dict['piano_solo_prob'].append(prob) meta_dict['audio_duration'].append(len(audio) / sample_rate) else: meta_dict['piano_solo_prob'].append('') meta_dict['audio_name'].append('') meta_dict['audio_duration'].append('') write_meta_dict_to_csv(meta_dict, piano_prediction_path) print('Write out to {}'.format(piano_prediction_path))
def transcribe_piano(args): """Transcribe piano solo mp3s to midi files. """ # Arguments & parameters workspace = args.workspace mp3s_dir = args.mp3s_dir midis_dir = args.midis_dir begin_index = args.begin_index end_index = args.end_index mini_data = args.mini_data device = 'cuda' if torch.cuda.is_available() else 'cpu' if mini_data: prefix = 'minidata_' else: prefix = '' # Paths csv_path = os.path.join('./resources/full_music_pieces_youtube_similarity_pianosoloprob_split.csv') os.makedirs(midis_dir, exist_ok=True) # Meta info meta_dict = read_csv_to_meta_dict(csv_path) # Transcriptor transcriptor = piano_transcription_inference.PianoTranscription(device=device) count = 0 transcribe_time = time.time() audios_num = len(meta_dict['surname']) for n in range(begin_index, min(end_index, audios_num)): if meta_dict['giant_midi_piano'][n] and int(meta_dict['giant_midi_piano'][n]) == 1: count += 1 mp3_path = os.path.join(mp3s_dir, '{}.mp3'.format(meta_dict['audio_name'][n])) print(n, mp3_path) midi_path = os.path.join(midis_dir, '{}.mid'.format(meta_dict['audio_name'][n])) (audio, _) = piano_transcription_inference.load_audio(mp3_path, sr=piano_transcription_inference.sample_rate, mono=True) try: # Transcribe transcribed_dict = transcriptor.transcribe(audio, midi_path) except: print('Failed for this audio!') print('Time: {:.3f} s'.format(time.time() - transcribe_time))
def create_subset200_piano_solo_eval_csv(args): r"""Select 200 pieces from GiantMIDI-Piano to evaluate the music piece accuracy. Args: workspace: str Returns: None """ # arguments & parameters workspace = args.workspace eval_num = 200 # paths csv_path = os.path.join( workspace, 'full_music_pieces_youtube_similarity_pianosoloprob_split.csv') output_path = os.path.join('subset_csvs_for_evaluation', 'subset200_piano_solo_eval.csv') os.makedirs(os.path.dirname(output_path), exist_ok=True) meta_dict = read_csv_to_meta_dict(csv_path) audios_num = len(meta_dict['surname']) indexes = [] for n in range(audios_num): if meta_dict['giant_midi_piano'][n] != '' and int( meta_dict['giant_midi_piano'][n]) == 1: indexes.append(n) skip_num = len(indexes) // eval_num eval_indexes = indexes[0::skip_num][0:eval_num] new_meta_dict = {key: [] for key in meta_dict.keys()} new_meta_dict['index_in_csv'] = [] for index in eval_indexes: for key in meta_dict.keys(): new_meta_dict[key].append(meta_dict[key][index]) new_meta_dict['index_in_csv'].append(index) new_meta_dict['meta_correct'] = [''] * eval_num new_meta_dict['sequenced'] = [''] * eval_num write_meta_dict_to_csv(new_meta_dict, output_path) print('Write out to {}'.format(output_path))
def create_subset200_eval_csv(args): r"""Select 200 files from 60,724 downloaded files to evaluate the precision, recall of piano solo detection. Args: workspace: str Returns: None """ workspace = args.workspace eval_num = 200 csv_path = os.path.join( workspace, 'full_music_pieces_youtube_similarity_pianosoloprob_split.csv') output_path = os.path.join('subset_csvs_for_evaluation', 'subset200_eval.csv') os.makedirs(os.path.dirname(output_path), exist_ok=True) meta_dict = read_csv_to_meta_dict(csv_path) audios_num = len(meta_dict['surname']) indexes = [] for n in range(audios_num): if float(meta_dict['similarity'][n]) > 0.6: indexes.append(n) skip_num = len(indexes) // eval_num eval_indexes = indexes[0::skip_num][0:eval_num] new_meta_dict = {key: [] for key in meta_dict.keys()} new_meta_dict['index_in_csv'] = [] for index in eval_indexes: for key in meta_dict.keys(): new_meta_dict[key].append(meta_dict[key][index]) new_meta_dict['index_in_csv'].append(index) new_meta_dict['piano_solo'] = [''] * eval_num new_meta_dict['electronic_piano'] = [''] * eval_num new_meta_dict['sequenced'] = [''] * eval_num write_meta_dict_to_csv(new_meta_dict, output_path) print('Write out to {}'.format(output_path))
def create_piano_split(args): """Validation, test, train: 1:1:8 """ # Arguments & parameters workspace = args.workspace # Paths piano_prediction_path = os.path.join( workspace, 'full_music_pieces_youtube_similarity_pianosoloprob.csv') split_path = os.path.join( workspace, 'full_music_pieces_youtube_similarity_pianosoloprob_split.csv') # Meta info to be downloaded meta_dict = read_csv_to_meta_dict(piano_prediction_path) splits = [] i = 0 for n in range(len(meta_dict['surname'])): if float(meta_dict['piano_solo_prob'][n]) >= 0.5: if i == 0: splits.append('validation') elif i == 1: splits.append('test') else: splits.append('train') i += 1 else: splits.append('none') # Reset i if moved to next composer if n > 0: previous_name = '{}, {}'.format(meta_dict['surname'][n - 1], meta_dict['surname'][n - 1]) current_name = '{}, {}'.format(meta_dict['surname'][n], meta_dict['surname'][n]) if previous_name != current_name: i = 0 if i == 10: i = 0 meta_dict['split'] = splits write_meta_dict_to_csv(meta_dict, split_path) print('Write csv to {}'.format(split_path))
def create_surname_checked_subset(args): """Select MIDI files whose YouTube titles must contain composer surnames. This procedure will select and filter 7,236 MIDI files from 10,854 MIDI files. Args: workspace: str Returns: NoReturn """ # arguments & parameters workspace = args.workspace # paths midis_dir = os.path.join(workspace, "midis") surname_checked_midis_dir = os.path.join(workspace, "surname_checked_midis") csv_path = os.path.join( workspace, 'full_music_pieces_youtube_similarity_pianosoloprob_split.csv') os.makedirs(surname_checked_midis_dir, exist_ok=True) # Read csv file. meta_dict = read_csv_to_meta_dict(csv_path) audios_num = len(meta_dict['audio_name']) count = 0 for n in range(audios_num): if meta_dict['giant_midi_piano'][n] != "" and int( meta_dict['giant_midi_piano'][n]) == 1: if int(meta_dict['surname_in_youtube_title'][n]) == 1: midi_name = "{}.mid".format(meta_dict['audio_name'][n]) midi_path = os.path.join(midis_dir, midi_name) surname_checked_midi_path = os.path.join( surname_checked_midis_dir, midi_name) exec_str = 'cp "{}" "{}"'.format(midi_path, surname_checked_midi_path) print(count, exec_str) os.system(exec_str) count += 1 print("Copy {} surname checked midi files to {}".format( count, surname_checked_midis_dir))
def piano_solo_performed_ratio(args): r"""Calcualte piano piece accuracy from 200 files from GiantMIDI-Piano. Args: subset200_piano_solo_eval_with_labels_path: str youtube_title_contain_surname: bool Returns: None """ # arguments & parameters subset200_piano_solo_eval_with_labels_path = args.subset200_piano_solo_eval_with_labels_path surname_in_youtube_title = args.surname_in_youtube_title meta_dict = read_csv_to_meta_dict( subset200_piano_solo_eval_with_labels_path) audios_num = len(meta_dict['surname']) tp, fp = 0, 0 for n in range(audios_num): if meta_dict['audio_name'][n] == '': flag = False else: if surname_in_youtube_title and int( meta_dict['surname_in_youtube_title'][n]) == 0: flag = False else: flag = True if flag: if int(meta_dict['sequenced'][n]) == 1: tp += 1 else: fp += 1 sequenced_ratio = tp / (tp + fp) print('Performance ratio: {:.3f}'.format(1. - sequenced_ratio))
def piano_solo_meta_accuracy(args): r"""Calcualte piano piece accuracy from 200 files from GiantMIDI-Piano. Args: subset200_piano_solo_eval_with_labels_path: str youtube_title_contain_surname: bool Returns: None """ # arguments & parameters subset200_piano_solo_eval_with_labels_path = args.subset200_piano_solo_eval_with_labels_path surname_in_youtube_title = args.surname_in_youtube_title meta_dict = read_csv_to_meta_dict( subset200_piano_solo_eval_with_labels_path) audios_num = len(meta_dict['surname']) tp, fp = 0, 0 for n in range(audios_num): if meta_dict['audio_name'][n] == '': flag = False else: if surname_in_youtube_title and int( meta_dict['surname_in_youtube_title'][n]) == 0: flag = False else: flag = True if flag: if int(meta_dict['meta_correct'][n]) == 1: tp += 1 else: fp += 1 precision = tp / (tp + fp) print('Correct rate: {} / {}, {}'.format(tp, tp + fp, precision))
def create_piano_split(args): """Add 'giant_midi_piano', 'split', and 'surname_in_youtube_title' flags to csv file and write out the csv file. The ratio of validation, test, train subsets are 1:1:8. Args: workspace: str Returns: NoReturn """ # arguments & parameters workspace = args.workspace # Paths piano_prediction_path = os.path.join( workspace, 'full_music_pieces_youtube_similarity_pianosoloprob.csv') split_path = os.path.join( workspace, 'full_music_pieces_youtube_similarity_pianosoloprob_split.csv') # Meta info to be downloaded meta_dict = read_csv_to_meta_dict(piano_prediction_path) giant_midi_pianos = [] splits = [] surname_in_youtube_titles = [] i = 0 # Add 'giant_midi_piano', 'split', and 'surname_in_youtube_title' flags to .csv file. for n in range(len(meta_dict['surname'])): if meta_dict['piano_solo_prob'][n] == "": giant_midi_pianos.append("") splits.append("") surname_in_youtube_titles.append("") else: if float(meta_dict['piano_solo_prob'][n]) >= 0.5: giant_midi_pianos.append(1) if i == 0: splits.append('validation') elif i == 1: splits.append('test') else: splits.append('train') i += 1 else: giant_midi_pianos.append(0) splits.append("") if meta_dict['surname'][n] in meta_dict['youtube_title'][n]: surname_in_youtube_titles.append(1) else: surname_in_youtube_titles.append(0) # Reset i if moved to next composer if n > 0: previous_name = '{}, {}'.format(meta_dict['surname'][n - 1], meta_dict['surname'][n - 1]) current_name = '{}, {}'.format(meta_dict['surname'][n], meta_dict['surname'][n]) if previous_name != current_name: i = 0 if i == 10: i = 0 meta_dict['giant_midi_piano'] = giant_midi_pianos meta_dict['split'] = splits meta_dict['surname_in_youtube_title'] = surname_in_youtube_titles write_meta_dict_to_csv(meta_dict, split_path) print('Write csv to {}'.format(split_path))
def meta_info(args): """Calculate statistics of number of music pieces, nationalities, birth, etc.""" # Arugments & parameters workspace = args.workspace # Paths csv_path = os.path.join(workspace, 'full_music_pieces_youtube_similarity_pianosoloprob.csv') statistics_path = os.path.join(workspace, 'statistics.pkl') os.makedirs(os.path.dirname(statistics_path), exist_ok=True) meta_dict = read_csv_to_meta_dict(csv_path) """keys: ['surname', 'firstname', 'music', 'nationality', 'birth', 'death', 'youtube_title', 'youtube_id', 'similarity', 'piano_solo_prob', 'audio_name']""" for key in meta_dict.keys(): meta_dict[key] = np.array(meta_dict[key]) # Filter piano solo indexes = np.where(meta_dict['piano_solo_prob'].astype(np.float32) >= 0.5)[0] print('Music pieces num: {}'.format(len(indexes))) # Composers full_names = [] for idx in indexes: full_names.append('{}, {}'.format(meta_dict['surname'][idx], meta_dict['firstname'][idx])) composers = np.array(list(set(full_names))) print('Composers num: {}'.format(len(composers))) # Number of works works_dict = {composer: {'audio_names': [], 'nationality': None, 'birth': None, 'death': None} for composer in composers} for idx in indexes: composer = '{}, {}'.format(meta_dict['surname'][idx], meta_dict['firstname'][idx]) works_dict[composer]['audio_names'].append(meta_dict['audio_name'][idx]) works_dict[composer]['nationality'] = meta_dict['nationality'][idx] works_dict[composer]['birth'] = meta_dict['birth'][idx] works_dict[composer]['death'] = meta_dict['death'][idx] number_of_works = np.array([len(works_dict[composer]['audio_names']) for composer in composers]) statistics_dict = {'composers': composers, 'number_of_piano_works': number_of_works} # Sort by number of works sorted_idx = np.argsort(number_of_works)[::-1] sorted_list = [] for idx in sorted_idx: composer = composers[idx] sorted_list.append([composer, len(works_dict[composer]['audio_names']), works_dict[composer]['nationality'], works_dict[composer]['birth'], works_dict[composer]['death']]) """E.g., [..., ['Schmitt, Florent', 132, 'French', '1870', '1958'], ...]""" # Count by nationalities nationalities = [e[2] for e in sorted_list] unique_nationalities = list(set(nationalities)) nationalities_count = [] for na in unique_nationalities: nationalities_count.append(nationalities.count(na)) _idxes = np.argsort(nationalities_count)[::-1] unique_nationalities = np.array(unique_nationalities)[_idxes] nationalities_count = np.array(nationalities_count)[_idxes] print('-------- Nationalities --------') print('Nationalities:', unique_nationalities) print('Count:', nationalities_count) # Plot nationalities fig_path = 'results/nationalities.pdf' N = len(nationalities_count) fig, ax = plt.subplots(1, 1, figsize=(8, 4)) ax.set_xlim(-1, N - 1) ax.set_ylim(0, 200) ax.set_xlabel('Nationalities', fontsize=14) ax.set_ylabel('Number of composers', fontsize=14) ax.bar(np.arange(N - 1), nationalities_count[1 : N], align='center', color='C0', alpha=1) ax.xaxis.set_ticks(np.arange(N - 1)) ax.xaxis.set_ticklabels(unique_nationalities[1 : N], rotation=90, fontsize=12) ax.yaxis.grid(color='k', linestyle='--', linewidth=0.3) # only horizontal grid plt.tight_layout() plt.savefig(fig_path) print('Save fig to {}'.format(fig_path)) # Year births = [int(e[3]) // 100 for e in sorted_list if e[3] != 'unknown'] unique_births = list(set(births)) births_count = [] for na in unique_births: births_count.append(births.count(na)) _idxes = np.argsort(births_count)[::-1] unique_births = np.array(unique_births)[_idxes] births_count = np.array(births_count)[_idxes] print('-------- Birth centery --------') print('Birth centuries:', unique_births) print('Count:', births_count) # Lifespan lifespan = [int(e[4]) - int(e[3]) for e in sorted_list if e[3] != 'unknown'] unique_lifespan = list(set(lifespan)) lifespan_count = [] for na in unique_lifespan: lifespan_count.append(lifespan.count(na)) _idxes = np.argsort(unique_lifespan) unique_lifespan = np.array(unique_lifespan)[_idxes] lifespan_count = np.array(lifespan_count)[_idxes] print('-------- Lifespan --------') print('Life span (years):', unique_lifespan) print('Count:', lifespan_count) # Dump statistics to disk pickle.dump(statistics_dict, open(statistics_path, 'wb')) print('Save to {}'.format(statistics_path))
def plot_composer_durations(args): def _get_composer_durations(meta_dict, indexes, composers): """Get the number of works of composers. Args: meta_dict, dict, keys: ['surname', 'firstname', 'music', 'nationality', 'birth', 'death', 'youtube_title', 'youtube_id', 'similarity', 'piano_solo_prob', 'audio_name', 'audio_duration'] indexes: 1darray, e.g., [0, 2, 5, 6, ...] composers: list Returns: durations: (composers_num,) sorted_indexes: (composers_num,) """ # Composers full_names = [] # Number of works durations_dict = {composer: 0 for composer in composers} for idx in indexes: composer = '{}, {}'.format(meta_dict['surname'][idx], meta_dict['firstname'][idx]) if composer in composers: durations_dict[composer] += float(meta_dict['audio_duration'][idx]) / 3600 durations = np.array([durations_dict[composer] for composer in composers]) # Sort by number of works sorted_indexes = np.argsort(durations)[::-1] return durations, sorted_indexes # Arugments & parameters workspace = args.workspace # Paths csv_path = os.path.join(workspace, 'full_music_pieces_youtube_similarity_pianosoloprob.csv') all_music_events_path = os.path.join(workspace, 'all_music_events.pkl') fig_path = 'results/composer_durations.pdf' os.makedirs(os.path.dirname(fig_path), exist_ok=True) meta_dict = read_csv_to_meta_dict(csv_path) """keys: ['surname', 'firstname', 'music', 'nationality', 'birth', 'death', 'youtube_title', 'youtube_id', 'similarity', 'piano_solo_prob', 'audio_name']""" for key in meta_dict.keys(): meta_dict[key] = np.array(meta_dict[key]) # Filter by indexes, larger 1e-6 indicates audio has been downloaded, larger than 0.5 indicates piano solo all_indexes = np.where(meta_dict['piano_solo_prob'].astype(np.float32) >= 1e-6)[0] piano_indexes = np.where(meta_dict['piano_solo_prob'].astype(np.float32) >= 0.5)[0] # Get composer names piano_composers = [] for idx in piano_indexes: piano_composers.append('{}, {}'.format(meta_dict['surname'][idx], meta_dict['firstname'][idx])) piano_composers = list(set(piano_composers)) # Get composer works number composer_durations_full, _ = _get_composer_durations(meta_dict, all_indexes, piano_composers) composer_durations_piano, sorted_indexes = _get_composer_durations(meta_dict, piano_indexes, piano_composers) # Plot N = 100 fig, ax = plt.subplots(1, 1, figsize=(20, 6)) ax.set_xlim(-1, N) ax.set_ylim(0, 40) ax.set_ylabel('Durations (h)', fontsize=15) line1 = ax.bar(np.arange(N), np.array(composer_durations_full)[sorted_indexes[0 : N]], align='center', color='pink', alpha=0.5, label='Full works') line2 = ax.bar(np.arange(N), np.array(composer_durations_piano)[sorted_indexes[0 : N]], align='center', color='C0', alpha=1, label='Piano works') ax.xaxis.set_ticks(np.arange(N)) ax.xaxis.set_ticklabels(np.array(piano_composers)[sorted_indexes[0 : N]], rotation=90, fontsize=13) ax.tick_params(axis="y", labelsize=13) ax.yaxis.grid(color='k', linestyle='--', linewidth=0.3) ax.legend(handles=[line1, line2], fontsize=15, loc=1, framealpha=1.) plt.tight_layout(0, 0, 0) plt.savefig(fig_path) print('Save fig to {}'.format(fig_path))
def plot_piano_solo_p_r_f1(args): r"""Plot piano solo detection precision, recall, and F1 score. Args: subset200_eval_with_labels_path: str surname_in_youtube_title: bool Returns: None """ # arguments & paramteres subset200_eval_with_labels_path = args.subset200_eval_with_labels_path surname_in_youtube_title = args.surname_in_youtube_title # paths out_fig_path = os.path.join('results', 'piano_solo_p_r_f1.pdf') meta_dict = read_csv_to_meta_dict(subset200_eval_with_labels_path) audios_num = len(meta_dict['surname']) precs = [] recalls = [] thresholds = [] f1s = [] for threshold in np.arange(0, 0.99, 0.1): tp, fn, fp, tn = 0, 0, 0, 0 for n in range(audios_num): if meta_dict['audio_name'][n] == '': flag = False else: if surname_in_youtube_title and int( meta_dict['surname_in_youtube_title'][n]) == 0: flag = False else: flag = True if flag: if float(meta_dict['piano_solo_prob'][n]) >= threshold: pred = 1 else: pred = 0 target = int(meta_dict['piano_solo'][n]) if target == 1 and pred == 1: tp += 1 if target == 1 and pred == 0: fn += 1 if target == 0 and pred == 1: fp += 1 if target == 0 and pred == 0: tn += 1 prec = tp / np.clip(tp + fp, 1e-8, np.inf) recall = tp / np.clip(tp + fn, 1e-8, np.inf) f1 = 2 * prec * recall / (prec + recall) precs.append(prec) recalls.append(recall) thresholds.append(threshold) f1s.append(f1) if threshold == 0.5: print('Threshold: {:.3f}, TP: {}, FN: {}, FP: {}, TN: {}'.format( threshold, tp, fn, fp, tn)) print('Total num: {}'.format(tp + fn + fp + tn)) print('Thresholds: {}'.format(thresholds)) print('Precisions: {}'.format(precs)) print('Recalls: {}'.format(recalls)) print('F1s: {}'.format(f1s)) N = len(thresholds) fontsize = 14 fig, axs = plt.subplots(1, 1, sharex=True, figsize=(5, 3)) axs.scatter(np.arange(N), precs, color='blue') axs.scatter(np.arange(N), recalls, color='green') axs.scatter(np.arange(N), f1s, color='red') line_p, = axs.plot(precs, label='Precision', linestyle='--', color='blue') line_r, = axs.plot(recalls, label='Recall', linestyle='-.', color='green') line_f1, = axs.plot(f1s, label='F1', linestyle='-', color='red') axs.set_ylim(0., 1.02) axs.set_xlabel(r"Thresholds", fontsize=fontsize) axs.set_ylabel('Scores', fontsize=fontsize) axs.legend(handles=[line_p, line_r, line_f1], loc=4) axs.xaxis.set_ticks(np.arange(N)) axs.xaxis.set_ticklabels(['{:.2f}'.format(e) for e in thresholds], rotation=0) plt.tight_layout(pad=0, h_pad=0, w_pad=0) os.makedirs(os.path.dirname(out_fig_path), exist_ok=True) plt.savefig(out_fig_path) print('Write out to {}'.format(out_fig_path))