コード例 #1
0
    def fromConfig(cls, config):
        if 'camera' not in config:
            return None

        camera = PinholeCamera.fromConfig(config['camera'])

        if camera:
            monocular = MonocularWrapper()

            if 'calibrator' in config:
                calibrator = Calibrator.fromConfig(camera, monocular,
                                                   config['calibrator'])
            else:
                calibrator = Calibrator(camera, monocular)

            if calibrator:
                for key in cls.CONFIG_KEYS:
                    if key not in config:
                        return None

                roi = config['roi']
                velocitaCrociera = config['velocitaCrociera']
                #Parametri opzionali
                distanzaMinima = config[
                    'distanzaMinima'] if 'distanzaMinima' in config else None
                distanzaLimite = config[
                    'distanzaLimite'] if 'distanzaLimite' in config else None
                windowSize = config[
                    'windowSize'] if 'windowSize' in config else None
                skipThreshold = config[
                    'skipThreshold'] if 'skipThreshold' in config else None

                return cls(camera, monocular, calibrator, roi,
                           velocitaCrociera, distanzaMinima, distanzaLimite,
                           windowSize, skipThreshold)
コード例 #2
0
def load_data(data_file):
    data = loadmat(data_file)

    # 3x3 intrinsic camera matrix; converts normalized camera coordinates into
    # pixel coordinates
    K = data["K"]

    # the viewport defines the pixel space for our sensor
    viewport = data["crop_region"].flat  # top, left, height, width
    viewport[0] -= 1  # the original file used Matlab's 1-based indexing
    viewport[1] -= 1

    # note, foreground statue is placed closer to the camera by a factor of two
    fg = data["ForegroundPointCloudRGB"].T
    bg = data["BackgroundPointCloudRGB"].T
    points3D = np.row_stack((fg[:, :3], bg[:, :3]))  # Nx3
    rgb = np.row_stack((fg[:, 3:], bg[:, 3:]))  # Nx3, [0,1]^3

    camera = PinholeCamera(K)

    return camera, viewport, points3D, rgb
コード例 #3
0
    def __init__(self,
                 parent_actor,
                 width,
                 height,
                 gamma_correction,
                 queueLength=None,
                 fov=None):

        if queueLength == None:
            queueLength = Defaults.queueLength

        if fov == None:
            fov = Defaults.fov

        self.width = width
        self.height = height
        self.fov = fov

        self.focal = self.width / (2.0 * np.tan(self.fov * np.pi / 360.0))
        self.distCoeff = [0.0, 0.0, 0.0, 0.0, 0.0]

        self.sensor = None
        self.surface = None

        self._parent = parent_actor

        self.queueLength = queueLength

        self.cameraQueue = []
        self.depthQueue = []

        self.camera = PinholeCamera(width, height, self.focal, self.focal,
                                    width / 2.0, height / 2.0, self.distCoeff)

        self.camera_transform = (carla.Transform(carla.Location(x=2.5, z=1.2)),
                                 carla.AttachmentType.Rigid)
        """ ['sensor.camera.rgb', cc.Raw, 'Camera RGB Distorted',
                {'lens_circle_multiplier': '3.0',
                'lens_circle_falloff': '3.0',
                'chromatic_aberration_intensity': '0.5',
                'chromatic_aberration_offset': '0'}] """

        self.sensors = [
            [
                'sensor.camera.rgb', cc.Raw, 'Camera RGB', {
                    'bloom_intensity': '0.0',
                    'lens_flare_intensity': '0.0',
                    'motion_blur_intensity': '0.0'
                }
            ],
            ['sensor.camera.depth', cc.Raw, 'Camera Depth', {}],
        ]

        self.rgbaSensor = None
        self.depthSensor = None

        world = self._parent.get_world()
        bp_library = world.get_blueprint_library()

        for item in self.sensors:
            bp = bp_library.find(item[0])
            if item[0].startswith('sensor.camera'):
                bp.set_attribute('image_size_x', str(self.width))
                bp.set_attribute('image_size_y', str(self.height))
                bp.set_attribute('fov', str(self.fov))

                if bp.has_attribute('gamma'):
                    bp.set_attribute('gamma', str(gamma_correction))

                for attr_name, attr_value in item[3].items():
                    bp.set_attribute(attr_name, attr_value)

            item.append(bp)

        self.init_sensors()
コード例 #4
0
from feature_tracker_configs import FeatureTrackerConfigs

#-------------

config = Config()
# forced camera settings to be kept coherent with the input file below
config.config_parser[
    config.dataset_type]['cam_settings'] = 'settings/KITTI04-12.yaml'
config.get_cam_settings()

#dataset = dataset_factory(config.dataset_settings)
#grountruth = groundtruth_factory(config.dataset_settings)

cam = PinholeCamera(
    config.cam_settings['Camera.width'], config.cam_settings['Camera.height'],
    config.cam_settings['Camera.fx'], config.cam_settings['Camera.fy'],
    config.cam_settings['Camera.cx'], config.cam_settings['Camera.cy'],
    config.DistCoef, config.cam_settings['Camera.fps'])

#============================================
# Init Feature Tracker
#============================================

num_features = 2000

tracker_type = FeatureTrackerTypes.DES_BF  # descriptor-based, brute force matching with knn
#tracker_type = FeatureTrackerTypes.DES_FLANN  # descriptor-based, FLANN-based matching

# select your tracker configuration (see the file feature_tracker_configs.py)
tracker_config = FeatureTrackerConfigs.TEST
tracker_config['num_features'] = num_features
コード例 #5
0
ファイル: main.py プロジェクト: Vinohith/VO_pipeline
from visual_odometry import VisualOdometry
from camera import PinholeCamera
import cv2
import glob
import numpy as np

camera = PinholeCamera(1241.0, 376.0, 718.8560, 607.1928, 185.2157)
vo = VisualOdometry(camera, "00.txt")


img_paths = sorted(glob.glob("data/*"))
traj = np.zeros((600,600,3), dtype=np.uint8)
frame_id = 0
for path in img_paths:
    img = cv2.imread(path, 0)
    vo.process_image(img, frame_id)
    t = vo.t
    if frame_id > 1:
        x, y, z = t[0], t[1], t[2]
    else:
        x, y, z = 0, 0, 0
    draw_x, draw_y = int(x)+290, int(z)+90
    true_x, true_y = int(vo.trueX)+290, int(vo.trueZ)+90
    
    cv2.circle(traj, (true_x,true_y), 1, (255,0,0), 2)
    cv2.circle(traj, (draw_x, draw_y), 1, (0,0,255), 2)
    cv2.imshow('Road facing camera', img)
    cv2.imshow('Trajectory', traj)
    cv2.waitKey(1)
    frame_id += 1
cv2.imwrite('map2.png', traj)