Ejemplo n.º 1
0
 def post_process_in_mem(self, se3):
     rot = SE3.extract_rotation(se3)
     euler = SE3.rotationMatrixToEulerAngles(rot)
     rot_new = SE3.makeS03(euler[0], euler[1], -euler[2])
     se3[0:3, 0:3] = rot_new
     #se3[0, 3] = -se3[0, 3]
     se3[1, 3] = -se3[1, 3]
Ejemplo n.º 2
0
def lie_ackermann_correction(gradient_step, motion_cov_inv, ackermann_twist,
                             vo_twist, twist_size):
    # ack_prior = np.multiply(Gradient_step_manager.current_alpha,ackermann_pose_prior)
    ack_prior = ackermann_twist

    #ack_prior = np.matmul(motion_cov_inv, ack_prior) # 1
    #ack_prior = np.multiply(gradient_step, ack_prior) # 2

    R_w, t_w = exp(vo_twist, twist_size)
    R_ack, t_ack = exp(ack_prior, twist_size)

    SE_3_w = np.append(np.append(R_w, t_w, axis=1),
                       Utils.homogenous_for_SE3(),
                       axis=0)
    SE_3_ack = np.append(np.append(R_ack, t_ack, axis=1),
                         Utils.homogenous_for_SE3(),
                         axis=0)

    SE3_w_ack = SE3.pose_pose_composition_inverse(SE_3_w, SE_3_ack)

    w_inc = ln(SE3.extract_rotation(SE3_w_ack),
               SE3.extract_translation(SE3_w_ack), twist_size)

    w_inc = np.multiply(gradient_step, np.matmul(motion_cov_inv, w_inc))  # 1
    #w_inc = np.matmul(motion_cov_inv, w_inc) # 2
    #w_inc = np.multiply(gradient_step, w_inc)

    return w_inc
Ejemplo n.º 3
0
def generate_se3_from_groundtruth(groundtruth_list):
    tx = float(groundtruth_list[0])
    ty = float(groundtruth_list[1])
    tz = float(groundtruth_list[2])

    qx = float(groundtruth_list[3])
    qy = float(groundtruth_list[4])
    qz = float(groundtruth_list[5])
    qw = float(groundtruth_list[6])

    #qx *= -1
    #qy *= -1
    #qz *= -1
    #qw *= -1

    se3 = np.identity(4)

    roll, pitch, yaw = SE3.Quaternion_toEulerianRadians(qx, qy, qz, qw)
    #roll*=-1
    #pitch*=-1
    #yaw*=-1
    SO3 = SE3.makeS03(roll, pitch, yaw)  #  seems to be more precise
    #SO3 = SE3.quaternion_to_s03(qx,qy,qz,qw)

    se3[0:3, 0:3] = SO3[0:3, 0:3]
    se3[0, 3] = tx
    se3[1, 3] = ty
    se3[2, 3] = tz

    return se3
Ejemplo n.º 4
0
    def __init__(self, intrinsic: Intrinsic, se3: np.ndarray):
        if intrinsic.extract_fx() > 0 or intrinsic.extract_fy() > 0:
            warnings.warn(
                "focal length is positive in right handed coordinate system, may lead to inverted image",
                RuntimeWarning)

        self.intrinsic = intrinsic
        self.se3 = se3
        self.se3_inv = SE3.invert(se3)
        self.origin_ws = SE3.extract_translation(self.se3_inv)
Ejemplo n.º 5
0
    def post_process_in_mem(self, se3):
        x = se3[0, 3]
        y = se3[1, 3]
        z = se3[2, 3]
        rot = SE3.extract_rotation(se3)
        euler = SE3.rotationMatrixToEulerAngles(rot)
        rot_new = SE3.makeS03(euler[2], -euler[1], euler[0])
        #se3[0:4,0:4] = se3_new[0:4,0:4]

        se3[0:3, 0:3] = rot_new[0:3, 0:3]

        se3[0, 3] = z
        #se3[1, 3] = y
        se3[2, 3] = -x
Ejemplo n.º 6
0
def load_vo_from_file(file_path):
    SE3_list = []
    encoder_list = []
    f = open(file_path)
    data = f.read()
    lines = data.replace(delimiter, " ").replace("\t", " ").split("\n")
    list = [[v.strip() for v in line.split(" ") if v.strip() != ""]
            for line in lines if len(line) > 0 and line[0] != "#"]
    for data_line in list:
        data_line_len = len(data_line)
        assert data_line_len == 6 or data_line_len == 8

        twist = np.zeros((6, 1), dtype=Utils.matrix_data_type)
        twist[0, 0] = data_line[0]
        twist[1, 0] = data_line[1]
        twist[2, 0] = data_line[2]
        twist[3, 0] = data_line[3]
        twist[4, 0] = data_line[4]
        twist[5, 0] = data_line[5]
        SE3_mat = SE3.twist_to_SE3(twist)
        SE3_list.append(SE3_mat)

        if data_line_len > 6:
            enc = np.zeros(2, dtype=Utils.matrix_data_type)
            enc[0] = data_line[6]
            enc[1] = data_line[7]
            encoder_list.append(enc)

    return SE3_list, encoder_list
Ejemplo n.º 7
0
    def post_process_in_mem(self, se3):
        rot = SE3.extract_rotation(se3)
        euler = SE3.rotationMatrixToEulerAngles(rot)
        #rot_new = SE3.makeS03(euler[1], -euler[2], euler[0])
        rot_new = SE3.makeS03(euler[0], euler[2], euler[1])
        #se3[0:3, 0:3] = rot_new
        x = se3[0, 3]
        y = se3[1, 3]
        z = se3[2, 3]
        #se3[0, 3] = -y
        #se3[1, 3] = z
        #se3[2, 3] = -x

        se3[0, 3] = x
        se3[1, 3] = z
        se3[2, 3] = y
Ejemplo n.º 8
0
def plot_rmse(se3_gt_list, se3_est_list, ax,  style = 'bx', clear = False, draw = True, offset = 1):
    if clear:
        ax.clear()

    #rmse_list = SE3.root_mean_square_error_for_entire_list(se3_gt_list,se3_est_list)
    rmse_list = SE3.root_mean_square_error_for_consecutive_frames_accumulated(se3_gt_list, se3_est_list, offset=offset)

    for i in range(0,len(rmse_list), 1):
        rmse = rmse_list[i]
        ax.plot([i],[rmse],style)


    if draw:
        plt.draw()
        plt.pause(1)
Ejemplo n.º 9
0
 def render(self):
     (height, width) = self.resolution
     for x in range(0, width):
         for y in range(0, height):
             ray_direction_camera_space = self.camera.camera_ray_direction_camera_space(
                 x, y, width, height, self.fov)
             camera_to_world = self.camera.se3_inv
             rot = SE3.extract_rotation(camera_to_world)
             ray_world_space = Utils.normalize(
                 np.matmul(rot, ray_direction_camera_space))
             ray = Geometry.Ray(self.camera.origin_ws[0:3], ray_world_space)
             (b, t, sphere) = self.find_closest_intersection(ray)
             if sphere.is_intersection_acceptable(b, t):
                 intersection_point = Geometry.point_for_ray(ray, t)
                 normal = sphere.normal_for_point(intersection_point)
                 depth = intersection_point[2]
                 self.depth_buffer[y, x] = fabs(depth)
                 self.frame_buffer[y, x] = phong_shading(
                     self.light_ws, intersection_point, normal)
Ejemplo n.º 10
0
def generate_ground_truth_se3(ground_truth_dict,
                              image_groundtruth_dict,
                              reference_id,
                              target_id,
                              post_process_object=None):
    groundtruth_ts_ref = image_groundtruth_dict[reference_id]
    #groundtruth_data_ref = associate.return_dictionary_data(ground_truth_file_path, groundtruth_ts_ref)
    groundtruth_data_ref = ground_truth_dict[groundtruth_ts_ref]
    SE3_ref = generate_se3_from_groundtruth(groundtruth_data_ref)

    groundtruth_ts_target = image_groundtruth_dict[target_id]
    #groundtruth_data_target = associate.return_dictionary_data(ground_truth_file_path, groundtruth_ts_target)
    groundtruth_data_target = ground_truth_dict[groundtruth_ts_target]
    SE3_target = generate_se3_from_groundtruth(groundtruth_data_target)

    if post_process_object:
        post_process_object.post_process_in_mem(SE3_ref)
        post_process_object.post_process_in_mem(SE3_target)

    SE3_ref_target = SE3.pose_pose_composition_inverse(SE3_ref, SE3_target)

    return SE3_ref_target
Ejemplo n.º 11
0
dt = 1.0
pose = Pose.Pose()
steering_command_straight = SteeringCommand.SteeringCommands(1.0, 0.0)

ackermann_motion = Ackermann.Ackermann()

new_motion_delta = ackermann_motion.ackermann_dead_reckoning_delta(steering_command_straight)
pose.apply_motion(new_motion_delta,dt)

#TODO investigate which theta to use
# this might actually be better since we are interested in the uncertainty only in this timestep
theta = new_motion_delta.delta_theta
# traditional uses accumulated theta
#theta = pose.theta

motion_cov_small = ackermann_motion.covariance_dead_reckoning(steering_command_straight,theta,dt)

se3 = SE3.generate_se3_from_motion_delta(new_motion_delta)
origin_transformed = np.matmul(se3, origin)

points = np.append(origin, origin_transformed, axis=1)
points_xyz = points[0:3,:]

fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')

Plot3D.plot_array_lines(points_xyz, ax)


Ejemplo n.º 12
0
ground_truth_acc = np.identity(4,Utils.matrix_data_type)
se3_estimate_acc = np.identity(4,Utils.matrix_data_type)
ground_truth_list = []
pose_estimate_list = []
ref_image_list = []
target_image_list = []

depth_factor = 5000.0
#depth_factor = 1.0
use_ndc = True


image_groundtruth_dict = dict(associate.match(rgb_text, groundtruth_text))
#se3_ground_truth_prior = np.transpose(SE3.quaternion_to_s03(0.6132, 0.5962, -0.3311, -0.3986))
se3_ground_truth_prior = SE3.makeS03(0,0,pi)
se3_ground_truth_prior = np.append(se3_ground_truth_prior,np.zeros((3,1),dtype=Utils.matrix_data_type),axis=1)
se3_ground_truth_prior = SE3.append_homogeneous_along_y(se3_ground_truth_prior)
#se3_ground_truth_prior = SE3.invert(se3_ground_truth_prior)
se3_ground_truth_prior[0:3,3] = 0


for i in range(0, len(ref_id_list)):

    ref_id = ref_id_list[i]
    target_id = target_id_list[i]

    SE3_ref_target = Parser.generate_ground_truth_se3(groundtruth_text,image_groundtruth_dict,ref_id,target_id,None)
    im_greyscale_reference, im_depth_reference = Parser.generate_image_depth_pair(dataset_root,rgb_text,depth_text,match_text,ref_id)
    im_greyscale_target, im_depth_target = Parser.generate_image_depth_pair(dataset_root,rgb_text,depth_text,match_text,target_id)
Ejemplo n.º 13
0
        se3_estimate_acc_2 = np.matmul(se3_estimate_acc_2, SE3_est_2)
        pose_estimate_list_2.append(se3_estimate_acc_2)

    if data_file_3:
        SE3_est_3 = pose_estimate_list_loaded_3[i]
        se3_estimate_acc_3 = np.matmul(se3_estimate_acc_3, SE3_est_3)
        pose_estimate_list_3.append(se3_estimate_acc_3)

    if data_file_4:
        SE3_est_4 = pose_estimate_list_loaded_4[i]
        se3_estimate_acc_4 = np.matmul(se3_estimate_acc_4, SE3_est_4)
        pose_estimate_list_4.append(se3_estimate_acc_4)


delta = 30
if (count - 1) - start_count >= delta:

    print(SE3.rmse_avg_raw(ground_truth_list,pose_estimate_list, delta))

visualizer = Visualizer.Visualizer(ground_truth_list,plot_steering=False, plot_trajectory=False)
visualizer.visualize_ground_truth(clear=True,draw=False)
if plot_vo:
    visualizer.visualize_poses(pose_estimate_list, draw= False)
    if data_file_2:
        visualizer.visualize_poses(pose_estimate_list_2, draw= False, style='-ro')
    if data_file_3:
        visualizer.visualize_poses(pose_estimate_list_3, draw= False, style='-bx')
    if data_file_3:
        visualizer.visualize_poses(pose_estimate_list_4, draw= False, style='-bo')
print('visualizing..')
visualizer.show()
Ejemplo n.º 14
0
origin = Utils.to_homogeneous_positions(X, Y, Z, H)

dt = 1.0
steering_command_straight = SteeringCommand.SteeringCommands(1.5, 0.0)
steering_commands = [steering_command_straight, steering_command_straight]
dt_list = list(map(lambda _: dt, steering_commands))
ackermann_motion = Ackermann.Ackermann(steering_commands, dt_list)

cov_list = ackermann_motion.covariance_dead_reckoning_for_command_list(
    steering_commands, dt_list)
ellipse_factor_list = Ackermann.get_standard_deviation_factors_from_covaraince_list(
    cov_list)

# TODO put this in visualizer
se3_list = SE3.generate_se3_from_motion_delta_list(
    ackermann_motion.pose_delta_list)
motion_delta = ackermann_motion.pose_delta_list[0]
se3 = SE3.generate_se3_from_motion_delta(motion_delta)
origin_transformed = np.matmul(se3, origin)
origin_transformed_2 = np.matmul(se3, origin_transformed)

points = np.append(np.append(origin, origin_transformed, axis=1),
                   origin_transformed_2,
                   axis=1)
points_xyz = points[0:3, :]

# testing inverse
#cov_inv = np.linalg.inv(motion_cov_small_1)
#t = np.matmul(cov_inv,motion_cov_small_1)

fig = plt.figure()
Ejemplo n.º 15
0
# We only need the gradients of the target frame
frame_reference = Frame.Frame(im_greyscale_reference, depth_reference,
                              camera_reference, False)
frame_target = Frame.Frame(im_greyscale_target, depth_target, camera_target,
                           True)

#visualizer = Visualizer.Visualizer(photometric_solver)

SE3_est = Solver.solve_photometric(frame_reference,
                                   frame_target,
                                   threadLock=None,
                                   pose_estimate_list=None,
                                   max_its=20000,
                                   eps=0.001,
                                   alpha_step=1.0,
                                   gradient_monitoring_window_start=5.0,
                                   image_range_offset_start=0,
                                   twist_prior=None,
                                   hessian_prior=None,
                                   use_ndc=use_ndc,
                                   use_robust=True,
                                   track_pose_estimates=False,
                                   debug=False)
euler_angles_XYZ = SE3.rotationMatrixToEulerAngles(
    SE3.extract_rotation(SE3_est))

print(SE3_est)
print(Utils.radians_to_degrees(euler_angles_XYZ[0]),
      Utils.radians_to_degrees(euler_angles_XYZ[1]),
      Utils.radians_to_degrees(euler_angles_XYZ[2]))
Ejemplo n.º 16
0
#rgb_id_ref = 1305031106.675279
#rgb_id_target = 1305031106.711508

image_groundtruth_dict = dict(associate.match(rgb_text, groundtruth_text))

groundtruth_ts_ref = image_groundtruth_dict[rgb_id_ref]
groundtruth_data_ref = associate.return_dictionary_data(
    groundtruth_text, groundtruth_ts_ref)
SE3_ref = Parser.generate_se3_from_groundtruth(groundtruth_data_ref)

groundtruth_ts_target = image_groundtruth_dict[rgb_id_target]
groundtruth_data_target = associate.return_dictionary_data(
    groundtruth_text, groundtruth_ts_target)
SE3_target = Parser.generate_se3_from_groundtruth(groundtruth_data_target)

SE3_ref_target = SE3.pose_pose_composition_inverse(SE3_ref, SE3_target)

rgb_ref_file_path, depth_ref_file_path = associate.return_rgb_depth_from_rgb_selection(
    rgb_text, depth_text, match_text, dataset_root, rgb_id_ref)
rgb_target_file_path, depth_target_file_path = associate.return_rgb_depth_from_rgb_selection(
    rgb_text, depth_text, match_text, dataset_root, rgb_id_target)

im_greyscale_reference = cv2.imread(
    rgb_ref_file_path, cv2.IMREAD_GRAYSCALE).astype(Utils.image_data_type)
im_depth_reference = cv2.imread(depth_ref_file_path,
                                cv2.IMREAD_ANYDEPTH).astype(
                                    Utils.depth_data_type_float)

im_greyscale_target = cv2.imread(
    rgb_target_file_path, cv2.IMREAD_GRAYSCALE).astype(Utils.image_data_type)
im_depth_target = cv2.imread(depth_target_file_path,
Ejemplo n.º 17
0
#rgb_id_target = 1305031102.211214

rgb_id_ref = 1305031108.743502
rgb_id_target = 1305031108.775493

#rgb_id_ref = 1305031119.615017
#rgb_id_target = 1305031119.647903

image_groundtruth_dict = dict(associate.match(rgb_text,groundtruth_text))

groundtruth_ts_ref = image_groundtruth_dict[rgb_id_ref]
groundtruth_data_ref = associate.return_dictionary_data(groundtruth_text, groundtruth_ts_ref)
SE3_ref = Parser.generate_se3_from_groundtruth(groundtruth_data_ref)

groundtruth_ts_target = image_groundtruth_dict[rgb_id_target]
groundtruth_data_target = associate.return_dictionary_data(groundtruth_text, groundtruth_ts_target)
SE3_target = Parser.generate_se3_from_groundtruth(groundtruth_data_target)

SE3_ref_target = SE3.pose_pose_composition_inverse(SE3_ref,SE3_target)

print('*'*80)
print('GROUND TRUTH\n')
print(SE3_ref_target)
print('*'*80)






Ejemplo n.º 18
0
match_text = dataset_root + 'matches.txt'
groundtruth_text = dataset_root + 'groundtruth.txt'

groundtruth_dict = associate.read_file_list(groundtruth_text)

rgb_folder = dataset_root + rgb_folder
depth_folder = dataset_root + depth_folder

rgb_files = ListGenerator.get_files_from_directory(rgb_folder, delimiter='.')
depth_files = ListGenerator.get_files_from_directory(depth_folder,
                                                     delimiter='.')

rgb_file_total = len(rgb_files)
depth_file_total = len(depth_files)

euler = SE3.Quaternion_toEulerianRadians(0.8772, -0.1170, 0.0666, -0.4608)

ground_truth_acc = np.identity(4, Utils.matrix_data_type)
se3_estimate_acc = np.identity(4, Utils.matrix_data_type)
ground_truth_list = []
pose_estimate_list = []
ref_image_list = []
target_image_list = []
vo_twist_list = []

depth_factor = 5000.0
#depth_factor = 1.0
use_ndc = True
calc_vo = True
plot_steering = True
Ejemplo n.º 19
0
    def __init__(self, intrinsic: Intrinsic, se3: np.ndarray):

        self.intrinsic = intrinsic
        self.se3 = se3
        self.se3_inv = SE3.invert(se3)
        self.origin_ws = SE3.extract_translation(self.se3_inv)
Ejemplo n.º 20
0
        pose_estimate_list_2.append(se3_estimate_acc_2)

    if data_file_3:
        SE3_est_3 = pose_estimate_list_loaded_3[i]
        se3_estimate_acc_3 = np.matmul(se3_estimate_acc_3, SE3_est_3)
        pose_estimate_list_3.append(se3_estimate_acc_3)

    if data_file_4:
        SE3_est_4 = pose_estimate_list_loaded_4[i]
        se3_estimate_acc_4 = np.matmul(se3_estimate_acc_4, SE3_est_4)
        pose_estimate_list_4.append(se3_estimate_acc_4)

delta = 30
if (count - 1) - start_count >= delta:
    print("1: " +
          str(SE3.rmse_avg_raw(ground_truth_list, pose_estimate_list, delta)))
    if data_file_2:
        print("2: " + str(
            SE3.rmse_avg_raw(ground_truth_list, pose_estimate_list_2, delta)))
    if data_file_3:
        print("3: " + str(
            SE3.rmse_avg_raw(ground_truth_list, pose_estimate_list_3, delta)))
    if data_file_4:
        print("4: " + str(
            SE3.rmse_avg_raw(ground_truth_list, pose_estimate_list_4, delta)))

handles = []

visualizer = Visualizer.Visualizer(ground_truth_list,
                                   plot_steering=False,
                                   plot_trajectory=False,
Ejemplo n.º 21
0
def solve_photometric(frame_reference,
                      frame_target,
                      threadLock,
                      pose_estimate_list,
                      max_its,
                      eps,
                      alpha_step,
                      gradient_monitoring_window_start,
                      image_range_offset_start,
                      max_depth,
                      twist_prior=None,
                      motion_cov_inv_in=None,
                      use_ndc=False,
                      use_robust=False,
                      track_pose_estimates=False,
                      use_motion_prior=False,
                      ackermann_pose_prior=None,
                      use_ackermann=False,
                      debug=False):

    if track_pose_estimates and (threadLock == None
                                 or pose_estimate_list == None):
        raise RuntimeError(
            'Visualization Flag is set, but no list and lock are supplied')

    # init
    # array for twist values x, y, z, roll, pitch, yaw
    t_est = np.array([0, 0, 0], dtype=matrix_data_type).reshape((3, 1))
    #R_est = np.array([[0.0, -1.0, 0],
    #                  [1.0, 0.0, 0],
    #                  [0, 0, 1]], dtype=matrix_data_type)
    R_est = np.identity(3, dtype=matrix_data_type)
    I_3 = np.identity(3, dtype=matrix_data_type)
    I_4 = np.identity(4, dtype=matrix_data_type)
    I_6 = np.identity(6, dtype=matrix_data_type)
    zero_cov = np.zeros((6, 6), dtype=matrix_data_type)
    #SE3_best = np.identity(4,dtype=matrix_data_type)
    (height, width) = frame_target.pixel_image.shape
    N = height * width
    position_vector_size = 3
    twist_size = 6
    stacked_obs_size = position_vector_size * N
    homogeneous_se3_padding = Utils.homogenous_for_SE3()
    variance = -1
    v_mean = maxsize
    image_range_offset = image_range_offset_start
    degrees_of_freedom = 5.0  # empirically derived: see paper
    normal_matrix_ret = np.identity(6, dtype=Utils.matrix_data_type)
    motion_cov_inv = motion_cov_inv_in
    #motion_cov_inv = np.linalg.inv(motion_cov_inv_in)
    w = np.zeros((twist_size, 1), dtype=Utils.matrix_data_type)
    w_empty = np.zeros((twist_size, 1), dtype=Utils.matrix_data_type)
    w_prev = np.zeros((twist_size, 1), dtype=Utils.matrix_data_type)
    w_acc = np.zeros((twist_size, 1), dtype=Utils.matrix_data_type)
    v_id = np.zeros((N, 1), dtype=matrix_data_type, order='F')
    pseudo_inv = np.identity(twist_size, dtype=matrix_data_type)
    not_better = False
    valid_pixel_ratio = 1.0
    motion_cov_inv_norm = Utils.norm_covariance_row(motion_cov_inv_in)

    fx = frame_reference.camera.intrinsic.extract_fx()
    fy = frame_reference.camera.intrinsic.extract_fy()

    depth_factor = np.sign(fx)
    #depth_factor = -np.sign(fx)

    Gradient_step_manager = GradientStepManager.GradientStepManager(
        alpha_start=alpha_step,
        alpha_min=-0.7,
        alpha_step=-0.01,
        alpha_change_rate=0,
        gradient_monitoring_window_start=gradient_monitoring_window_start,
        gradient_monitoring_window_size=0)

    SE_3_est = np.append(np.append(R_est, t_est, axis=1),
                         Utils.homogenous_for_SE3(),
                         axis=0)
    SE_3_prev = np.append(np.append(R_est, t_est, axis=1),
                          Utils.homogenous_for_SE3(),
                          axis=0)
    #SE_3_est_orig = np.append(np.append(R_est, t_est, axis=1), Utils.homogenous_for_SE3(), axis=0)
    #SE_3_est_last_valid = np.append(np.append(R_est, t_est, axis=1), Utils.homogenous_for_SE3(), axis=0)

    generator_x = Lie.generator_x_3_4()
    #generator_x = Lie.generator_x_3_4_neg()
    generator_y = Lie.generator_y_3_4()
    #generator_y = Lie.generator_y_3_4_neg()
    #generator_z = Lie.generator_z_3_4()
    generator_z = Lie.generator_z_3_4_neg()

    # Depth factor of -1.0 leads to inverted roll and pitch when displaying
    # Why?: Generator defines the direction of increase (My thoughts)
    generator_roll = Lie.generator_roll_3_4()
    #generator_roll = Lie.generator_roll_3_4_neg()
    #generator_pitch = Lie.generator_pitch_3_4()
    generator_pitch = Lie.generator_pitch_3_4_neg()
    generator_yaw = Lie.generator_yaw_3_4()
    #generator_yaw = Lie.generator_yaw_3_4_neg()

    X_back_projection = depth_factor * np.ones((4, N), Utils.matrix_data_type)
    X_back_projection[3, :] = 1.0
    #X_back_projection_last_valid = np.ones((4, N), Utils.matrix_data_type)
    valid_measurements_reference = np.full(N, False)
    valid_measurements_target = np.full(N, False)
    #valid_measurements_last = np.full(N,False)
    #valid_measurements_target = np.full(N,False)
    valid_measurements = valid_measurements_reference
    number_of_valid_measurements = N
    #v = np.zeros((N, 1), dtype=matrix_data_type, order='F')

    # Precompute back projection of pixels
    GaussNewtonRoutines.back_project_image(
        width, height, image_range_offset, frame_reference.camera,
        frame_reference.pixel_depth, frame_target.pixel_depth,
        X_back_projection, valid_measurements, valid_measurements_target,
        use_ndc, depth_factor, max_depth)

    count = np.sum(valid_measurements)
    count_target = np.sum(valid_measurements_target)

    z_rot = SE3.makeS03(0, 0, math.pi)
    se3_rot = np.identity(4, dtype=matrix_data_type)
    se3_rot[0:3, 0:3] = z_rot
    #X_back_projection = np.matmul(se3_rot,X_back_projection)

    if debug:
        Plot3D.save_projection_of_back_projected(height, width,
                                                 frame_reference,
                                                 X_back_projection)

    # Precompute the Jacobian of SE3 around the identity
    J_lie = JacobianGenerator.get_jacobians_lie(generator_x,
                                                generator_y,
                                                generator_z,
                                                generator_yaw,
                                                generator_pitch,
                                                generator_roll,
                                                X_back_projection,
                                                N,
                                                stacked_obs_size,
                                                coefficient=1.0)

    # Precompute the Jacobian of the projection function
    J_pi = JacobianGenerator.get_jacobian_camera_model(
        frame_reference.camera.intrinsic, X_back_projection)

    # count the number of true
    #valid_measurements_total = np.logical_and(valid_measurements_reference,valid_measurements_target)

    #number_of_valid_reference = np.sum(valid_measurements_reference)
    #number_of_valid_total = np.sum(valid_measurements_total)
    #number_of_valid_measurements = number_of_valid_reference

    #target_index_projections_id = frame_target.camera.apply_perspective_pipeline(I_4)

    target_index_projections = frame_target.camera.apply_perspective_pipeline(
        X_back_projection, use_ndc, width, height)

    GaussNewtonRoutines.compute_residual(
        width, height, target_index_projections, valid_measurements,
        valid_measurements_target, frame_target.pixel_image,
        frame_reference.pixel_image, frame_target.pixel_depth,
        frame_reference.pixel_depth, v_id, image_range_offset)

    v = np.copy(v_id)
    W = np.ones((1, N), dtype=matrix_data_type, order='F')

    for it in range(0, max_its, 1):
        start = time.time()
        # accumulators
        #TODO: investigate preallocate and clear in a for loop
        g = np.zeros((twist_size, 1))
        normal_matrix = np.identity(twist_size, dtype=matrix_data_type)

        # TODO investigate performance impact
        if track_pose_estimates:
            threadLock.acquire()
            pose_estimate_list.append(SE_3_est)
            threadLock.release()

        #v_diff = math.fabs(Gradient_step_manager.last_error_mean_abs - v_mean)
        #v_diff = Gradient_step_manager.last_error_mean_abs - v_mean

        #Gradient_step_manager.track_gradient(v_mean,it)

        # TODO investigate absolute error threshold aswel?
        #if ((v_diff <= eps)) and Gradient_step_manager.check_iteration(it) :
        #    print('done, mean error:', v_mean, 'diff: ', v_diff, 'pixel ratio:', valid_pixel_ratio)
        #    break

        # no if statement means solver 2
        #if v_mean <= Gradient_step_manager.last_error_mean_abs:
        not_better = False
        prior_empty = False
        if twist_prior[0] == 0 and twist_prior[1] == 0 and twist_prior[2] == 0 and twist_prior[3] == 0 and \
                twist_prior[4] == 0 and twist_prior[5] == 0:
            prior_empty = True

        if use_motion_prior:
            converged = GaussNewtonRoutines.gauss_newton_step_motion_prior(
                width, height, valid_measurements, valid_measurements_target,
                W, J_pi, J_lie, frame_target.grad_x, frame_target.grad_y, v, g,
                normal_matrix, motion_cov_inv, twist_prior, w,
                image_range_offset)
        else:
            converged = GaussNewtonRoutines.gauss_newton_step(
                width, height, valid_measurements, valid_measurements_target,
                W, J_pi, J_lie, frame_target.grad_x, frame_target.grad_y, v, g,
                normal_matrix, image_range_offset)
        normal_matrix_ret = normal_matrix

        #try:
        #    pseudo_inv = linalg.inv(normal_matrix)
        #except:
        #    print('Cant invert')
        #    return SE_3_est

        #w_new = np.matmul(pseudo_inv, g)

        try:
            w_new = linalg.solve(normal_matrix, g)
        except:
            print('Cant solve')
            return SE_3_est

        # initial step with empty motion prior seems to be quite large
        #if use_motion_prior and prior_empty:
        #    w_new = np.multiply(Gradient_step_manager.current_alpha/2.0, w_new)
        #else:
        w_new = np.multiply(Gradient_step_manager.current_alpha, w_new)

        # For using ackermann motion
        if use_ackermann:
            # V1
            # inc = ackermann_pose_prior - w
            # w_new += np.matmul(motion_cov_inv,inc)
            # w_new += inc

            # V2
            #factor = 0.1*Gradient_step_manager.current_alpha
            factor = 0.1
            #factor = math.pow(Gradient_step_manager.current_alpha,it)
            # ack_prior = np.multiply(Gradient_step_manager.current_alpha,ackermann_pose_prior)
            ack_prior = ackermann_pose_prior

            w_new += Lie.lie_ackermann_correction(factor, motion_cov_inv,
                                                  ack_prior, w, twist_size)

        #else:
        #    not_better = True
        #    w_new = w_empty

        R_cur, t_cur = Lie.exp(w, twist_size)
        R_new, t_new = Lie.exp(w_new, twist_size)

        # C_new . C_cur
        #t_est = np.add(np.matmul(R_new, t_cur), t_new)
        #R_est = np.matmul(R_new, R_cur)

        # C_Cur . C_new
        t_est = np.add(np.matmul(R_cur, t_new), t_cur)
        R_est = np.matmul(R_cur, R_new)

        w = Lie.ln(R_est, t_est, twist_size)

        #SE_3_current = np.append(np.append(R_cur, t_cur, axis=1), homogeneous_se3_padding, axis=0)
        SE_3_est = np.append(np.append(R_est, t_est, axis=1),
                             homogeneous_se3_padding,
                             axis=0)

        #debug_list  = [i for i, x in enumerate(valid_measurements) if x]

        Y_est = np.matmul(SE_3_est, X_back_projection)

        target_index_projections = frame_target.camera.apply_perspective_pipeline(
            Y_est, use_ndc, width, height)
        #target_index_projections[2,:] -= depth_factor*1

        v = GaussNewtonRoutines.compute_residual(
            width, height, target_index_projections, valid_measurements,
            valid_measurements_target, frame_target.pixel_image,
            frame_reference.pixel_image, frame_target.pixel_depth,
            frame_reference.pixel_depth, v, image_range_offset)

        number_of_valid_measurements = np.sum(valid_measurements)
        valid_pixel_ratio = number_of_valid_measurements / N

        if number_of_valid_measurements <= 0 and Gradient_step_manager.check_iteration(
                it):
            print('pixel ratio break')
            print('done, mean error:', v_mean, 'diff: ', v_diff,
                  'pixel ratio:', valid_pixel_ratio)
            #SE_3_est = SE3_best
            break

        if use_robust:
            variance = GaussNewtonRoutines.compute_t_dist_variance(
                v,
                degrees_of_freedom,
                N,
                valid_measurements,
                valid_measurements_target,
                number_of_valid_measurements,
                variance_min=1000,
                eps=0.001)
            if variance > 0.0:
                # clear old weights
                for i in range(0, N):
                    W[0, i] = 1
                GaussNewtonRoutines.generate_weight_matrix(
                    W, v, variance, degrees_of_freedom, N)

        v_weighted = np.copy(v)
        GaussNewtonRoutines.multiply_v_by_diagonal_matrix(
            W, v_weighted, N, valid_measurements)

        v_sum = np.matmul(np.transpose(v), v_weighted)[0][0]

        end = time.time()

        #if v_mean < Gradient_step_manager.last_error_mean_abs:
        #    SE3_best = np.copy(SE_3_est)
        #if not not_better: # solver 6
        Gradient_step_manager.save_previous_mean_error(v_mean)

        if number_of_valid_measurements > 0:
            v_mean = v_sum / number_of_valid_measurements
        else:
            v_mean = maxsize

        v_diff = math.fabs(Gradient_step_manager.last_error_mean_abs - v_mean)
        print('mean error:', v_mean, 'error diff: ', v_diff, 'iteration: ', it,
              'valid pixel ratio: ', valid_pixel_ratio, 'runtime: ',
              end - start, 'variance: ', variance)
        #v_diff = Gradient_step_manager.last_error_mean_abs - v_mean

        #Gradient_step_manager.track_gradient(v_mean,it)

        # TODO investigate absolute error threshold aswel?
        if ((v_diff <= eps)) and Gradient_step_manager.check_iteration(it):
            print('done, mean error:', v_mean, 'diff: ', v_diff,
                  'pixel ratio:', valid_pixel_ratio)
            break

    motion_cov_inv = normal_matrix_ret

    return SE_3_est, w, motion_cov_inv