Esempio n. 1
0
# Get all files from the dataset.
print("Find all files for evaluation...")
pickle_files = glob.glob(os.path.join(dataset_path, "**", "*.p"))

# Evaluate all files.
# TODO parallelize this.
print("Evaluate all files...")
data = {"results": []}
for index, pickle_file in enumerate(pickle_files):
    name = os.path.basename(pickle_file).split(".")[0]

    # Load and preprocess the data.
    pointcloud, targets = pickle.load(open(pickle_file, "rb"))
    pointcloud = np.array(
        [preprocess_pointcloud(pointcloud, 1024, list(range(3)))])
    #targets = preprocess_targets(targets, [0])[0] # 0 is height
    targets = preprocess_targets(targets, [1])[0]  # 1 is weight
    predicted_targets = model.predict(pointcloud)[0][0]

    # Store results.
    result = {
        "name": name,
        "targets": str(targets),
        "predicted_targets": str(predicted_targets),
        "error": str(np.abs(predicted_targets - targets))
    }
    data["results"].append(result)
    if index % 1000 == 0:
        print("{} per cent".format(int(100 * (index / len(pickle_files)))))
Esempio n. 2
0
 def py_load_pickle(path):
     pointcloud, targets = pickle.load(open(path.numpy(), "rb"))
     pointcloud = preprocess_pointcloud(pointcloud, subsample_size,
                                        channels)
     targets = preprocess_targets(targets, targets_indices)
     return pointcloud, targets