def dataset_results_to_json(task_name, dataset_name, dataset_results):
    """Convert the raw dataset results to pretty JSON for web."""
    dataset = utils.get_function(task_name, "datasets", dataset_name)
    output = dict(
        name=dataset.metadata["dataset_name"],
        headers=dict(names=["Rank"],
                     fixed=["Name", "Paper", "Website", "Code"]),
        results=list(),
    )
    ranking = compute_ranking(task_name, dataset_results)
    metric_names = set()
    for method_name, method_results in dataset_results.items():
        method = utils.get_function(task_name, "methods", method_name)
        result = {
            "Name": method.metadata["method_name"],
            "Paper": method.metadata["paper_name"],
            "Paper URL": method.metadata["paper_url"],
            "Year": method.metadata["paper_year"],
            "Code": method.metadata["code_url"],
            "Version": method.metadata["code_version"],
            "Runtime (min)": parse_time_to_min(method_results["duration"]),
            "CPU (%)": float(method_results["%cpu"].replace("%", "")),
            "Memory (GB)": parse_size_to_gb(method_results["peak_rss"]),
            "Rank": ranking[method_name],
        }
        for metric_name, metric_result in method_results["metrics"].items():
            metric = utils.get_function(task_name, "metrics", metric_name)
            result[metric.metadata["metric_name"]] = metric_result
            metric_names.add(metric.metadata["metric_name"])
        output["results"].append(result)
    output["headers"]["names"].extend(list(metric_names))
    output["headers"]["names"].extend([
        "Memory (GB)",
        "Runtime (min)",
        "CPU (%)",
        "Name",
        "Paper",
        "Code",
        "Year",
    ])
    return output
def compute_ranking(task_name, dataset_results):
    """Rank all methods on a specific dataset."""
    rankings = np.zeros(len(dataset_results))
    metric_names = list(dataset_results.values())[0]["metrics"].keys()
    for metric_name in metric_names:
        metric = utils.get_function(task_name, "metrics", metric_name)
        sorted_order = np.argsort([
            dataset_results[method_name]["metrics"][metric_name]
            for method_name in dataset_results
        ])
        if metric.metadata["maximize"]:
            sorted_order = sorted_order[::-1]
        rankings += np.argsort(sorted_order)
    final_ranking = {
        method_name: rank + 1
        for method_name, rank in zip(dataset_results,
                                     np.argsort(np.argsort(rankings)))
    }
    return final_ranking