예제 #1
0
    def __init__(self, intrinsic: Intrinsic, se3: np.ndarray):
        if intrinsic.extract_fx() > 0 or intrinsic.extract_fy() > 0:
            warnings.warn(
                "focal length is positive in right handed coordinate system, may lead to inverted image",
                RuntimeWarning)

        self.intrinsic = intrinsic
        self.se3 = se3
        self.se3_inv = SE3.invert(se3)
        self.origin_ws = SE3.extract_translation(self.se3_inv)
예제 #2
0
 def setUpClass(cls):
     cls.pixels_uint8 = np.array([[1, 2], [3, 4]]).astype(np.uint8)
     cls.pixels_float32 = np.array([[1, 2], [3, 4]]).astype(Utils.image_data_type)
     cls.depth_float32 = np.array([[0.1, 0.5], [1, 2]]).astype(Utils.image_data_type)
     cls.se3_identity = np.identity(4, dtype=Utils.matrix_data_type)
     cls.intrinsic_identity = Intrinsic.Intrinsic(-1,-1,0,0)
     cls.camera_identity = Camera.Camera(cls.intrinsic_identity,cls.se3_identity)
예제 #3
0
def normalized_camera_with_look_at(camera_position, camera_target, camera_up,
                                   o_x, o_y):
    look_at = look_at_matrix(camera_position, camera_target, camera_up)
    intrinsics = Intrinsic.Intrinsic(-1, -1, o_x, o_y)
    return Camera(intrinsics, look_at)
예제 #4
0
def normalized_camera(x_trans, y_trans, o_x, o_y):
    look_at = look_at_matrix(np.array([x_trans, y_trans, 0]),
                             np.array([x_trans, y_trans, -1]),
                             np.array([0, 1, 0]))
    intrinsics = Intrinsic.Intrinsic(-1, -1, o_x, o_y)
    return Camera(intrinsics, look_at)
예제 #5
0
    SE3_ref_target = Parser.generate_ground_truth_se3(groundtruth_text,image_groundtruth_dict,ref_id,target_id,None)
    im_greyscale_reference, im_depth_reference = Parser.generate_image_depth_pair(dataset_root,rgb_text,depth_text,match_text,ref_id)
    im_greyscale_target, im_depth_target = Parser.generate_image_depth_pair(dataset_root,rgb_text,depth_text,match_text,target_id)

    ground_truth_acc = np.matmul(SE3_ref_target,ground_truth_acc)

    ground_truth_list.append(ground_truth_acc)
    ref_image_list.append((im_greyscale_reference, im_depth_reference))
    target_image_list.append((im_greyscale_target, im_depth_target))


im_greyscale_reference_1, im_depth_reference_1 = ref_image_list[0]
(image_height, image_width) = im_greyscale_reference_1.shape
se3_identity = np.identity(4, dtype=Utils.matrix_data_type)
# image gradient induces a coordiante system where y is flipped i.e have to flip it here
intrinsic_identity = Intrinsic.Intrinsic(-517.3, -516.5, 318.6, 239.5) # freiburg_1
if use_ndc:
    #intrinsic_identity = Intrinsic.Intrinsic(1, 1, 1/2, 1/2) # for ndc
    intrinsic_identity = Intrinsic.Intrinsic(-1, -516.5/517.3, 318.6/image_width, 239.5/image_height) # for ndc


camera_reference = Camera.Camera(intrinsic_identity, se3_identity)
camera_target = Camera.Camera(intrinsic_identity, se3_identity)

visualizer = Visualizer.Visualizer(ground_truth_list)

motion_cov_inv = np.identity(6,dtype=Utils.matrix_data_type)
twist_prior = np.zeros((6,1),dtype=Utils.matrix_data_type)

for i in range(0, len(ref_image_list)):
    im_greyscale_reference, im_depth_reference = ref_image_list[i]
예제 #6
0
        #acceleration_values_avg_x = (float(acceleration_values[0]) + float(acceleration_values_prev[0]))/2.0
        #acceleration_values_avg_y = (float(acceleration_values[1]) + float(acceleration_values_prev[1]))/2.0
        #acceleration_values_avg_z = (float(acceleration_values[2]) + float(acceleration_values_prev[2]))/2.0
        #acceleration_command = AccelerationCommand(acceleration_values_avg_x,acceleration_values_avg_y,acceleration_values_avg_z)
    #else:
    acceleration_command = AccelerationCommand(float(acceleration_values[0]),float(acceleration_values[1]),float(acceleration_values[2]))

    acceleration_list.append(acceleration_command)


im_greyscale_reference_1, im_depth_reference_1 = ref_image_list[0]
(image_height, image_width) = im_greyscale_reference_1.shape
se3_identity = np.identity(4, dtype=Utils.matrix_data_type)

intrinsic_identity = Intrinsic.Intrinsic(520.9, 521.0, 321.5, 249.7) # freiburg_2
if use_ndc:
    #intrinsic_identity = Intrinsic.Intrinsic(1, 1, 1/2, 1/2) # for ndc
    intrinsic_identity = Intrinsic.Intrinsic(1, 521.0/520.9, 321.5/image_width, 249.7/image_height) # for ndc


camera_reference = Camera.Camera(intrinsic_identity, se3_identity)
camera_target = Camera.Camera(intrinsic_identity, se3_identity)

linear_motion = Linear.Linear(acceleration_list, dt_list)
linear_cov_list = linear_motion.covariance_for_command_list(acceleration_list, dt_list)

visualizer = Visualizer.Visualizer(ground_truth_list)

motion_cov_inv = np.identity(6,dtype=Utils.matrix_data_type)
#motion_cov_inv = np.zeros((6,6),dtype=Utils.matrix_data_type)
예제 #7
0
#depth_target = cv2.imread('/Users/marchaubenstock/Workspace/Diplomarbeit_Resources/VO_Synthetic/depthbuffer_translated_fov_90_square_Y.png',0).astype(
#    Utils.depth_data_type)
depth_target = cv2.imread(
    '/Users/marchaubenstock/Workspace/Diplomarbeit_Resources/VO_Synthetic/depthbuffer_translated_fov_90_square.png',
    0).astype(Utils.depth_data_type)
#depth_target = cv2.imread('/Users/marchaubenstock/Workspace/Diplomarbeit_Resources/VO_Synthetic/depthbuffer_left_90_fov_90_square.png',0).astype(Utils.depth_data_type)
#depth_target = cv2.imread('/Users/marchaubenstock/Workspace/Diplomarbeit_Resources/VO_Synthetic/depthbuffer_translated_fov_90_square_negative.png',0).astype(Utils.depth_data_type)
#depth_target = cv2.imread('/Users/marchaubenstock/Workspace/Diplomarbeit_Resources/VO_Home_Images/Images_ZR300_X_Trans_Depth/image_depth_30_small.png',0).astype(Utils.depth_data_type)
#depth_target = ImageProcessing.z_standardise(depth_target)

(image_height, image_width) = im_greyscale_reference.shape

se3_identity = np.identity(4, dtype=Utils.matrix_data_type)
# fx and fy affect the resulting coordiante system of the se3 matrix
intrinsic_identity = Intrinsic.Intrinsic(-1, 1, image_width / 2,
                                         image_height / 2)
#intrinsic_identity = Intrinsic.Intrinsic(-1, -1, 1/2, 1/2) # for ndc

# reference frame is assumed to be the origin
# target frame SE3 is unknown i.e. what we are trying to solve
camera_reference = Camera.Camera(intrinsic_identity, se3_identity)
camera_target = Camera.Camera(intrinsic_identity, se3_identity)

# We only need the gradients of the target frame
frame_reference = Frame.Frame(im_greyscale_reference, depth_reference,
                              camera_reference, False)
frame_target = Frame.Frame(im_greyscale_target, depth_target, camera_target,
                           True)

SE3_est = Solver_Cython.solve_photometric(frame_reference,
                                          frame_target,
예제 #8
0
#depth_target = cv2.imread('/Users/marchaubenstock/Workspace/Diplomarbeit_Resources/VO_Home_Images/Images_ZR300_Board_X_Trans_Depth_Aligned_Scaled/image_depth_aligned_127.png',cv2.IMREAD_ANYDEPTH).astype(Utils.depth_data_type)
#depth_target = ImageProcessing.z_standardise(depth_target)

(image_height, image_width) = im_greyscale_reference.shape

# Some depth image were aquired without scaling i.e. scale here
depth_factor = 1000
use_ndc = True

#depth_target *= depth_factor
#depth_reference *= depth_factor

se3_identity = np.identity(4, dtype=Utils.matrix_data_type)

# fx and fy affect the resulting coordiante system of the se3 matrix
intrinsic_identity = Intrinsic.Intrinsic(1, 1, image_width / 2,
                                         image_height / 2)
if use_ndc:
    intrinsic_identity = Intrinsic.Intrinsic(-1, -1, 1 / 2, 1 / 2)  # for ndc

# reference frame is assumed to be the origin
# target frame SE3 is unknown i.e. what we are trying to solve
camera_reference = Camera.Camera(intrinsic_identity, se3_identity)
camera_target = Camera.Camera(intrinsic_identity, se3_identity)

# We only need the gradients of the target frame
frame_reference = Frame.Frame(im_greyscale_reference, depth_reference,
                              camera_reference, False)
frame_target = Frame.Frame(im_greyscale_target, depth_target, camera_target,
                           True)

#visualizer = Visualizer.Visualizer(photometric_solver)
예제 #9
0
#im_greyscale = cv2.imread('/Users/marchaubenstock/Workspace/Diplomarbeit_Resources/VO_Home_Images/Images_ZR300_XTrans/image_1.png',0)
#im_greyscale = cv2.imread('/Users/marchaubenstock/Workspace/Diplomarbeit_Resources/rccar_26_09_18/marc_1_full/color/966816.173323313.png',cv2.IMREAD_GRAYSCALE)
#im_greyscale = cv2.imread('/Users/marchaubenstock/Workspace/Diplomarbeit_Resources/VO_Bench/rgbd_dataset_freiburg2_desk/rgb/1311868164.363181.png',cv2.IMREAD_GRAYSCALE)
im_greyscale = cv2.imread('/Users/marchaubenstock/Workspace/Rust/open-cv/images/calib.png',cv2.IMREAD_GRAYSCALE)
#im_greyscale = im_greyscale.astype(Utils.image_data_type)

pixels_standardised = ImageProcessing.z_standardise(im_greyscale)
pixels_norm = im_greyscale.astype(np.float64)

pixels_normalized_disp = ImageProcessing.normalize_to_image_space(pixels_standardised)
pixels_disp = ImageProcessing.normalize_to_image_space(pixels_norm)
depth_image = pixels_standardised.astype(Utils.depth_data_type_int)

se3_identity = np.identity(4, dtype=Utils.matrix_data_type)
intrinsic_identity = Intrinsic.Intrinsic(-1, -1, 0, 0)
camera_identity = Camera.Camera(intrinsic_identity, se3_identity)

frame = Frame.Frame(pixels_standardised, depth_image, camera_identity, True)

#cv2.imshow('grad x',frame.grad_x)
cv2.imshow('grad x abs',np.abs(frame.grad_x))
#cv2.imshow('neg sobel x',-frame.grad_x)
#cv2.imshow('sobel y',frame.grad_y)
#cv2.imshow('image',pixels_disp)
#cv2.imshow('image z-standard',pixels_normalized_disp)


#grayscale_image = ImageProcessing.normalize_to_image_space(frame.grad_x)
#abs = np.absolute(frame.grad_x)
#normed = cv2.normalize(abs, None, alpha=0, beta=65535, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_16UC1)
예제 #10
0
    ref_image_list.append((im_greyscale_reference, im_depth_reference))
    target_image_list.append((im_greyscale_target, im_depth_target))

    encoder_ts = float(rgb_encoder_dict[ref_id][0])
    encoder_values = encoder_dict[encoder_ts]
    encoder_values_float = [float(encoder_values[0]), float(encoder_values[1])]

    encoder_list.append(encoder_values_float)

im_greyscale_reference_1, im_depth_reference_1 = ref_image_list[0]
(image_height, image_width) = im_greyscale_reference_1.shape
se3_identity = np.identity(4, dtype=Utils.matrix_data_type)
# image gradient induces a coordiante system where y is flipped i.e have to flip it here

intrinsic_identity = Intrinsic.Intrinsic(619.225, 618.836, 317.603, 244.876)
if use_ndc:
    #intrinsic_identity = Intrinsic.Intrinsic(1, 1, 1/2, 1/2) # for ndc
    intrinsic_identity = Intrinsic.Intrinsic(1, 619.225 / 618.836,
                                             317.603 / image_width,
                                             244.876 / image_height)  # for ndc

camera_reference = Camera.Camera(intrinsic_identity, se3_identity)
camera_target = Camera.Camera(intrinsic_identity, se3_identity)

steering_commands = list(
    map(lambda cmd: SteeringCommand.SteeringCommands(cmd[0], cmd[1]),
        encoder_list))
ackermann_motion = Ackermann.Ackermann(steering_commands, dt_list)
ackermann_cov_list = ackermann_motion.covariance_dead_reckoning_for_command_list(
    steering_commands, dt_list)
예제 #11
0
    ref_image_list.append((im_greyscale_reference, im_depth_reference))
    target_image_list.append((im_greyscale_target, im_depth_target))

    encoder_ts = float(rgb_encoder_dict[ref_id][0])
    encoder_values = encoder_dict[encoder_ts]
    encoder_values_float = [float(encoder_values[0]), float(encoder_values[1])]

    encoder_list.append(encoder_values_float)

im_greyscale_reference_1, im_depth_reference_1 = ref_image_list[0]
(image_height, image_width) = im_greyscale_reference_1.shape
se3_identity = np.identity(4, dtype=Utils.matrix_data_type)
# image gradient induces a coordiante system where y is flipped i.e have to flip it here

intrinsic_identity = Intrinsic.Intrinsic(606.585, 612.009, 340.509, 226.075)
if use_ndc:
    #intrinsic_identity = Intrinsic.Intrinsic(1, 1, 1/2, 1/2) # for ndc
    intrinsic_identity = Intrinsic.Intrinsic(1, 612.009 / 606.585,
                                             340.509 / image_width,
                                             226.075 / image_height)  # for ndc

camera_reference = Camera.Camera(intrinsic_identity, se3_identity)
camera_target = Camera.Camera(intrinsic_identity, se3_identity)

steering_commands = list(
    map(lambda cmd: SteeringCommand.SteeringCommands(cmd[0], cmd[1]),
        encoder_list))
ackermann_motion = Ackermann.Ackermann(steering_commands, dt_list)
ackermann_cov_list = ackermann_motion.covariance_dead_reckoning_for_command_list(
    steering_commands, dt_list)