def dataframe(): path = scores_file_path(package_path(baseline_model_package)) baseline_df = load_dataframe(path) path = scores_file_path(package_path(finetuned_model_package)) finetuned_df = load_dataframe(path) return pd.concat([ baseline_df[Metric.LOSS.value], finetuned_df[Metric.LOSS.value], baseline_df[Metric.ACCURACY.value], finetuned_df[Metric.ACCURACY.value] ], axis=1, keys=list(itertools.chain(*COMPARE_TEST_SCORES_COLUMNS)))
def dataframe(): path = scores_file_path(package_path(baseline_model_package)) baseline_df = load_dataframe(path) path = scores_file_path(package_path(finetuned_model_package)) finetuned_df = load_dataframe(path) return pd.concat([ finetuned_df[Metric.LOSS.value] - baseline_df[Metric.LOSS.value], finetuned_df[Metric.ACCURACY.value] - baseline_df[Metric.ACCURACY.value] ], axis=1, keys=[Metric.LOSS.value, Metric.ACCURACY.value]).transpose()
def dataframe(baseline_training_run, finetuned_training_run): path = history_path(package_path(baseline_model_package), baseline_training_run) baseline_df = load_dataframe(path) path = history_path(package_path(finetuned_model_package), finetuned_training_run) finetuned_df = load_dataframe(path) return pd.concat([ baseline_df[Metric.ACCURACY.value], finetuned_df[Metric.ACCURACY.value], baseline_df[Metric.VAL_ACCURACY.value], finetuned_df[Metric.VAL_ACCURACY.value] ], axis=1, keys=list(itertools.chain(*COMPARE_TRAINING_COLUMNS)))
def display_scores(base_path: Path): """Displays a scores file. Arguments: base_path: A Path object pointing to the base directory where the testing artifacts are located. """ path = scores_file_path(base_path) df = load_dataframe(path) print(df)
def plot_learning_curves(): for training_run in TRAINING_RUNS: path = history_path(Path(), training_run) df = load_dataframe(path) loss_accuracy_plot(df)