예제 #1
0
def stylize_results(df):

    df = df.to_string()
    header, *content = df.split("\n")
    header = bold(header)

    stylized = []
    for item in content:
        first_space = item.index(" ")
        experiment, metrics = item[:first_space], item[first_space:]
        stylized.append(bold(blue(experiment)) + bold(green(metrics)))

    df = "\n".join([header] + stylized)

    return df
예제 #2
0
def collect_results(directory, metrics):

    experiments = os.listdir(directory)
    if experiments:
        fullpath = os.path.abspath(directory)
        print(bold("\nResults for {directory}:".format(directory=fullpath)))
        print()

    all_results = defaultdict(list)

    for experiment in experiments:
        results_file = os.path.join(directory, experiment, "results.json")
        if os.path.isfile(results_file):
            result = Config.from_json(results_file)
            result = result.as_flat_dict()
            all_results["experiment"].append(experiment)
            for metric in metrics:
                all_results[metric].append(result.get(metric, ""))

    return all_results
                ]),
                clean_transform=Compose([
                    LoadAudio(),
                    MapLabels(class_map=class_map),
                ]),
            ),
            shuffle=False,
            batch_size=args.batch_size,
            collate_fn=make_collate_fn(
                {"signal": audio_transform.padding_value}),
            **loader_kwargs)

        model = TwoDimensionalCNNClassificationModel(experiment,
                                                     device=args.device)
        model.load_best_model(fold)
        model.eval()

        val_preds = model.predict(valid_loader, n_tta=args.n_tta)
        val_labels = np.array(
            [item["labels"] for item in valid_loader.dataset])

        all_labels[valid] = val_labels
        all_predictions[valid] = val_preds

        metric = lwlrap(val_labels, val_preds)
        print("Fold metric:", metric)

    metric = lwlrap(all_labels, all_predictions)

    print("\nOverall metric:", green(bold(metric)))
예제 #4
0
    stylized = []
    for item in content:
        first_space = item.index(" ")
        experiment, metrics = item[:first_space], item[first_space:]
        stylized.append(bold(blue(experiment)) + bold(green(metrics)))

    df = "\n".join([header] + stylized)

    return df


if __name__ == "__main__":

    parser = argparse.ArgumentParser(
        description=bold("Summarize results from different experiments."),
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    parser.add_argument("directory",
                        type=str,
                        help="directory to print summary for")
    parser.add_argument("--metrics",
                        type=str,
                        nargs="+",
                        help="Which metrics to use for summary")

    args = parser.parse_args()

    results = collect_results(args.directory, args.metrics)
    index = results.pop("experiment")
    df = pd.DataFrame(results, index=index)
예제 #5
0

def target(alphas, *args):
    prediction = np.sum([a * p for a, p in zip(alphas, prediction_values)],
                        axis=0)
    return -lwlrap(actual_labels, prediction)


alphas = scipy.optimize.minimize(target,
                                 initial(),
                                 constraints=list(constraints()),
                                 method="COBYLA").x

print()
for experiment, alpha in zip(args.experiments, alphas):
    print("{}: {}".format(green(bold(experiment)), blue(bold(alpha))))

print()
print("Final lwlrap:", bold(green(-target(alphas))))


def load_test_predictions(experiment):

    prediction_files = ("experiments" / Path(experiment) /
                        "predictions").glob("test_preds*")
    dfs = [pd.read_csv(f) for f in prediction_files]
    dfs = [df.sort_values(by="fname") for df in dfs]
    return dfs


test_preds = []