Example #1
0
    def __init__(self, camera_path):
        self.serial_number = pyfreenect2.getDefaultDeviceSerialNumber()
        self.device = pyfreenect2.Freenect2Device(self.serial_number)
        self.frame_listener = pyfreenect2.SyncMultiFrameListener(
            pyfreenect2.Frame.COLOR, pyfreenect2.Frame.IR,
            pyfreenect2.Frame.DEPTH)

        self.device.setColorFrameListener(self.frame_listener)
        self.device.setIrAndDepthFrameListener(self.frame_listener)
        self.camera = Camera.load_from_json(camera_path)
Example #2
0
 def intrinsics(self):
     distortion = []
     for i in range(5):
         distortion.append(self.device.colour_intrinsics.coeffs[i])
     camera = Camera((self.device.colour_intrinsics.fx,
                      self.device.colour_intrinsics.fy),
                     (self.device.colour_intrinsics.ppx,
                      self.device.colour_intrinsics.ppy),
                     (self.device.colour_intrinsics.width,
                      self.device.colour_intrinsics.height), distortion)
     print(camera)
     return camera
Example #3
0
    def load(self):
        """
        Load a viewpoints.json to dataset's structure
        Todo: datastructure should be more similar to json structure...
        :return: return false if the dataset is empty.
        """
        # Load viewpoints file and camera file
        try:
            with open(os.path.join(self.path, "viewpoints.json")) as data_file:
                data = json.load(data_file)
            self.camera = Camera.load_from_json(self.path)
        except FileNotFoundError:
            return False
        self.metadata = data["metaData"]
        self.set_save_type(self.metadata["save_type"])
        count = 0
        # todo this is not clean!
        while True:
            try:
                id = str(count)
                pose = Transform.from_parameters(
                    *[float(data[id]["vector"][str(x)]) for x in range(6)])
                self.add_pose(None, None, pose)
                if "pairs" in data[id]:
                    for i in range(int(data[id]["pairs"])):
                        pair_id = "{}n{}".format(id, i)
                        pair_pose = Transform.from_parameters(*[
                            float(data[pair_id]["vector"][str(x)])
                            for x in range(6)
                        ])
                        self.add_pair(None, None, pair_pose, count)
                count += 1

            except KeyError:
                break
        return True
"""
    Visual tests of modelrenderer

"""
from deeptracking.data.modelrenderer import ModelRenderer, InitOpenGL
from deeptracking.utils.uniform_sphere_sampler import UniformSphereSampler
from deeptracking.utils.camera import Camera
import cv2
import numpy as np

if __name__ == '__main__':
    camera = Camera.load_from_json("../data/camera.json")
    window = InitOpenGL(camera.width, camera.height)
    renderers = []

    sampler = UniformSphereSampler(0.5, 0.7)
    dict = {}
    id = 0
    renderer = ModelRenderer("../data/test.ply", "../../data/shaders", camera, window)
    renderer.load_ambiant_occlusion_map("../data/test_ao.ply")
    for sample in sampler:
        rgb, depth = renderer.render(sample.transpose())
        cv2.imshow("rgb", rgb[:, :, ::-1])
        cv2.imshow("depth", (depth / np.max(depth) * 255).astype(np.uint8))
        cv2.waitKey()
Example #5
0
    OUTPUT_PATH = data["output_path"]
    SAMPLE_QUANTITY = int(data["sample_quantity"])
    TRANSLATION_RANGE = float(data["translation_range"])
    ROTATION_RANGE = math.radians(float(data["rotation_range"]))
    SPHERE_MIN_RADIUS = float(data["sphere_min_radius"])
    SPHERE_MAX_RADIUS = float(data["sphere_max_radius"])
    IMAGE_SIZE = (int(data["image_size"]), int(data["image_size"]))
    PRELOAD = data["preload"] == "True"
    SATURATION_THRESHOLD = int(data["saturation_threshold"])

    if not os.path.exists(OUTPUT_PATH):
        os.mkdir(OUTPUT_PATH)

    real_dataset = Dataset(REAL_PATH)
    real_dataset.load()
    camera = Camera.load_from_json(real_dataset.path)
    real_dataset.camera = camera
    output_dataset = Dataset(OUTPUT_PATH, frame_class=data["save_type"])
    output_dataset.camera = camera
    window_size = (real_dataset.camera.width, real_dataset.camera.height)
    window = InitOpenGL(*window_size)

    model = MODELS[0]
    vpRender = ModelRenderer(model["model_path"], SHADER_PATH,
                             real_dataset.camera, window, window_size)
    vpRender.load_ambiant_occlusion_map(model["ambiant_occlusion_model"])
    OBJECT_WIDTH = int(model["object_width"])

    metadata = {}
    metadata["translation_range"] = str(TRANSLATION_RANGE)
    metadata["rotation_range"] = str(ROTATION_RANGE)
Example #6
0
    if not os.path.exists(OUTPUT_PATH):
        os.mkdir(OUTPUT_PATH)

    # Write important misc data to file
    metadata = {}
    metadata["translation_range"] = str(TRANSLATION_RANGE)
    metadata["rotation_range"] = str(ROTATION_RANGE)
    metadata["image_size"] = str(IMAGE_SIZE[0])
    metadata["save_type"] = data["save_type"]
    metadata["object_width"] = {}
    for model in MODELS:
        metadata["object_width"][model["name"]] = str(model["object_width"])
    metadata["min_radius"] = str(SPHERE_MIN_RADIUS)
    metadata["max_radius"] = str(SPHERE_MAX_RADIUS)

    camera = Camera.load_from_json(data["camera_path"])
    dataset = Dataset(OUTPUT_PATH, frame_class=data["save_type"])
    dataset.camera = camera
    window_size = (camera.width, camera.height)
    window = InitOpenGL(*window_size)
    sphere_sampler = UniformSphereSampler(SPHERE_MIN_RADIUS, SPHERE_MAX_RADIUS)
    preload_count = 0
    if PRELOAD:
        if dataset.load():
            preload_count = dataset.size()
            print("This Dataset already contains {} samples".format(
                preload_count))
    # Iterate over all models from config files
    for model in MODELS:
        vpRender = ModelRenderer(model["model_path"], SHADER_PATH,
                                 dataset.camera, window, window_size)
import numpy as np

from deeptracking.detector.detector_aruco import ArucoDetector

if __name__ == '__main__':
    dataset_path = "/media/mathieu/e912e715-2be7-4fa2-8295-5c3ef1369dd0/dataset/deeptracking/sequences/skull"
    detector_path = "../deeptracking/detector/aruco_layout.xml"
    model_path = "/home/mathieu/Dataset/3D_models/skull/skull.ply"
    model_ao_path = "/home/mathieu/Dataset/3D_models/skull/skull_ao.ply"
    shader_path = "../deeptracking/data/shaders"

    dataset = Dataset(dataset_path)
    offset = Transform.from_matrix(
        np.load(os.path.join(dataset.path, "offset.npy")))

    camera = Camera.load_from_json(dataset_path)
    dataset.camera = camera
    files = [
        f for f in os.listdir(dataset_path) if
        os.path.splitext(f)[-1] == ".png" and 'd' not in os.path.splitext(f)[0]
    ]
    detector = ArucoDetector(camera, detector_path)
    window = InitOpenGL(camera.width, camera.height)
    vpRender = ModelRenderer(model_path, shader_path, camera, window,
                             (camera.width, camera.height))
    vpRender.load_ambiant_occlusion_map(model_ao_path)
    ground_truth_pose = None

    for i in range(len(files)):
        img = cv2.imread(os.path.join(dataset.path, "{}.png".format(i)))
        detection = detector.detect(img)