Esempio n. 1
0
def evaluate(experiment_directory, checkpoint, data_dir, split_filename):

    with open(split_filename, "r") as f:
        split = json.load(f)

    chamfer_results = []
    p = Pool(8)
    ds = []
    cn = []
    inn = []
    exd = []
    ckp = []
    dtd = []

    print('data_preparing')
    for dataset in split:
        for class_name in split[dataset]:
            for iii, instance_name in enumerate(split[dataset][class_name]):
                if iii >= 300:
                    break
                ds.append(dataset)
                cn.append(class_name)
                inn.append(instance_name)
                exd.append(experiment_directory)
                ckp.append(checkpoint)
                dtd.append(data_dir)

    print('multi thread start')
    chamfer_results = p.map(evaluate_one_instance, ds, cn, inn, exd, ckp, dtd)
    print(np.mean([q[1] for q in chamfer_results]), np.median([q[1] for q in chamfer_results]))
    print(np.mean([q[2] for q in chamfer_results]), np.median([q[2] for q in chamfer_results]))


    with open(
        os.path.join(
            ws.get_evaluation_dir(experiment_directory, checkpoint, True), "chamfer_and_emd.csv"
        ),
        "w",
    ) as f:
        f.write("shape, chamfer_dist\n")
        for result in chamfer_results:
            f.write("{}, {}, {}\n".format(result[0], result[1], result[2]))
Esempio n. 2
0
def evaluate(experiment_directory, checkpoint, data_dir, split_filename):

    with open(split_filename, "r") as f:
        split = json.load(f)

    chamfer_results = []

    for dataset in split:
        for class_name in split[dataset]:
            for instance_name in split[dataset][class_name]:
                logging.debug("evaluating " +
                              os.path.join(dataset, class_name, instance_name))

                reconstructed_mesh_filename = ws.get_reconstructed_mesh_filename(
                    experiment_directory, checkpoint, dataset, class_name,
                    instance_name)

                logging.debug('reconstructed mesh is "' +
                              reconstructed_mesh_filename + '"')

                ground_truth_samples_filename = os.path.join(
                    data_dir,
                    "SurfaceSamples",
                    dataset,
                    class_name,
                    instance_name + ".npy",
                )

                logging.debug("ground truth samples are " +
                              ground_truth_samples_filename)

                # normalization_params_filename = os.path.join(
                #     data_dir,
                #     "NormalizationParameters",
                #     dataset,
                #     class_name,
                #     instance_name + ".npz",
                # )

                # logging.debug(
                #     "normalization params are " + ground_truth_samples_filename
                # )

                ground_truth_points = np.load(ground_truth_samples_filename)
                ground_truth_points = ground_truth_points.reshape(-1, 2)
                reconstruction = np.load(reconstructed_mesh_filename)
                reconstruction = reconstruction.reshape(-1, 2)

                # normalization_params = np.load(normalization_params_filename)

                chamfer_dist = deep_sdf.metrics.chamfer.compute_trimesh_chamfer(
                    ground_truth_points, reconstruction)

                logging.debug("chamfer distance: " + str(chamfer_dist))

                chamfer_results.append(
                    (os.path.join(dataset, class_name,
                                  instance_name), chamfer_dist))

    with open(
            os.path.join(
                ws.get_evaluation_dir(experiment_directory, checkpoint, True),
                "chamfer.csv"),
            "w",
    ) as f:
        f.write("shape, chamfer_dist\n")
        for result in chamfer_results:
            f.write("{}, {}\n".format(result[0], result[1]))