示例#1
0
def test_predict(predict_commands):
    model_folder, use_labels, modes = predict_commands
    out_dir = join(model_folder, "split-0/best-loss/test-RANDOM")

    if exists(out_dir):
        shutil.rmtree(out_dir)

    # Correction of JSON file for ROI
    if "roi" in modes:
        json_path = join(model_folder, "maps.json")
        with open(json_path, "r") as f:
            parameters = json.load(f)
        parameters["roi_list"] = ["leftHippocampusBox", "rightHippocampusBox"]
        json_data = json.dumps(parameters, skipkeys=True, indent=4)
        with open(json_path, "w") as f:
            f.write(json_data)

    maps_manager = MapsManager(model_folder, verbose="debug")
    maps_manager.predict(
        data_group="test-RANDOM",
        caps_directory="data/dataset/OasisCaps_example",
        tsv_path="data/dataset/OasisCaps_example/data.tsv",
        gpu=False,
        use_labels=use_labels,
        overwrite=True,
    )

    for mode in modes:
        maps_manager.get_prediction(data_group="test-RANDOM", mode=mode)
        if use_labels:
            maps_manager.get_metrics(data_group="test-RANDOM", mode=mode)
示例#2
0
def test_predict(predict_commands):
    model_folder, use_labels, modes = predict_commands
    out_dir = join(model_folder, "fold-0/best-loss/test-RANDOM")

    if os.path.exists(out_dir):
        shutil.rmtree(out_dir)

    maps_manager = MapsManager(model_folder, verbose="debug")
    maps_manager.predict(
        data_group="test-RANDOM",
        caps_directory="data/dataset/OasisCaps_example",
        tsv_path="data/dataset/OasisCaps_example/data.tsv",
        use_cpu=True,
        use_labels=use_labels,
        overwrite=True,
    )

    for mode in modes:
        maps_manager.get_prediction(data_group="test-RANDOM", mode=mode)
        if use_labels:
            maps_manager.get_metrics(data_group="test-RANDOM", mode=mode)
示例#3
0
def meta_maps_analysis(launch_dir, evaluation_metric="loss"):
    """
    This function summarizes the validation performance according to `evaluation_metric`
    of several MAPS stored in the folder `launch_dir`.
    The output TSV files are written in `launch_dir`.

    Args:
        launch_dir (str): Path to the directory containing several MAPS.
        evaluation_metric (str): Name of the metric used for validation evaluation.
    """

    jobs_list = [
        job
        for job in os.listdir(launch_dir)
        if path.exists(path.join(launch_dir, job, "maps.json"))
    ]

    selection_set = set()  # Set of all selection metrics seen
    folds_set = set()  # Set of all folds seen

    performances_dict = dict()
    for job in jobs_list:
        performances_dict[job] = dict()
        maps_manager = MapsManager(path.join(launch_dir, job))
        folds = maps_manager._find_folds()
        folds_set = folds_set | set(folds)
        for fold in folds:
            performances_dict[job][fold] = dict()
            selection_metrics = maps_manager._find_selection_metrics(fold)
            selection_set = selection_set | set(selection_metrics)
            for metric in selection_metrics:
                validation_metrics = maps_manager.get_metrics(
                    "validation", fold, metric
                )
                if evaluation_metric not in validation_metrics:
                    raise ValueError(
                        f"Evaluation metric {evaluation_metric} not found in "
                        f"MAPS {job}, for fold {fold} and selection {metric}."
                    )
                performances_dict[job][fold][metric] = validation_metrics[
                    evaluation_metric
                ]

    # Produce one analysis for each selection metric
    for metric in selection_set:
        df = pd.DataFrame()
        filename = f"analysis_metric-{evaluation_metric}_selection-{metric}.tsv"
        for job in jobs_list:
            for fold in folds_set:
                df.loc[job, f"fold-{fold}"] = performances_dict[job][fold][metric]
        df.to_csv(path.join(launch_dir, filename), sep="\t")