Пример #1
0
def V_GGX(n_dot_v, n_dot_l, n_dot_h, v_dot_h, alpha):
    #                                                      2
    # V := -------------------------------------------------------------------------------------------------
    #      n_dot_v sqrt(alpha^2 + (1 - alpha^2) n_dot_l^2) + n_dot_l sqrt(alpha^2 + (1 - alpha^2) n_dot_v^2)
    
    alpha2   = sqr(alpha)
    lambda_v = sqrt(alpha2 + (1.0 - alpha2) * sqr(n_dot_v))
    lambda_l = sqrt(alpha2 + (1.0 - alpha2) * sqr(n_dot_l))
    
    return 2.0 / (n_dot_v * lambda_l + n_dot_l * lambda_v)
Пример #2
0
def G1_GGX(n_dot_vl, alpha):
    #                          2 n_dot_vl                                              2
    # G1 := --------------------------------------------------- = --------------------------------------------
    #       n_dot_vl + sqrt(alpha^2 + (1 - alpha^2) n_dot_vl^2)   1 + sqrt((alpha/n_dot_vl)^2 + (1 - alpha^2))
    
    alpha2    = sqr(alpha)
    n_dot_vl2 = sqr(n_dot_vl)

    return 2.0 * n_dot_vl / (n_dot_vl * sqrt(alpha2 + (1.0 - alpha2) * n_dot_vl2))
Пример #3
0
def V1_GGX(n_dot_vl, alpha):
    #                               2
    # V1 := ---------------------------------------------------
    #       n_dot_vl + sqrt(alpha^2 + (1 - alpha^2) n_dot_vl^2)
    
    alpha2    = sqr(alpha)
    n_dot_vl2 = sqr(n_dot_vl)
    
    return 2.0 / (n_dot_vl * sqrt(alpha2 + (1.0 - alpha2) * n_dot_vl2))
Пример #4
0
def G_GGX(n_dot_v, n_dot_l, n_dot_h, v_dot_h, alpha):
    #                                           2 (n_dot_l) (n_dot_v)
    # G := -------------------------------------------------------------------------------------------------
    #      n_dot_v sqrt(alpha^2 + (1 - alpha^2) n_dot_l^2) + n_dot_l sqrt(alpha^2 + (1 - alpha^2) n_dot_v^2)
    #
    #                1
    #    = -----------------------
    #      1 + Lambda_v + lambda_l
    #
    #            sqrt(alpha^2 + (1 - alpha^2) (n_dot_v)^2)   1
    # Lambda_v = ----------------------------------------- - -
    #                           2 n_dot_v                    2
    
    alpha2   = sqr(alpha)
    lambda_v = sqrt(alpha2 + (1.0 - alpha2) * sqr(n_dot_v))
    lambda_l = sqrt(alpha2 + (1.0 - alpha2) * sqr(n_dot_l))
    
    return (2.0 * n_dot_l * n_dot_v) / (n_dot_v * lambda_l + n_dot_l * lambda_v)
Пример #5
0
def F_CookTorrance(v_dot_h, F0):
    # c   := v_dot_h
    #
    #        1 + sqrt(F0)
    # eta := ------------
    #        1 - sqrt(F0)
    #
    # g   := sqrt(eta^2 + c^2 - 1)
    #
    #        1 [g - c]^2 [    [(g + c) c - 1]^2]
    # F   := - [-----]   [1 + [-------------]  ]
    #        2 [g + c]   [    [(g - c) c + 1]  ]
    
    sqrt_F0  = sqrt(F0)
    eta      = (1.0 + sqrt_F0) / (1.0 - sqrt_F0)
    g        = sqrt(sqr(eta) + sqr(v_dot_h) - 1.0)
    g1       = g + v_dot_h
    g2       = g - v_dot_h
    
    return 0.5 * sqr(g2 / g1) * (1.0 + sqr((g1 * v_dot_h - 1.0) / (g2 * v_dot_h + 1.0)))
Пример #6
0
    def find_reprojection_error(self, i_usable_frame, object_points,
                                intrinsics):
        rotation_vector = self.poses[i_usable_frame].rvec
        translation_vector = self.poses[i_usable_frame].tvec
        img_pts = self.image_points[i_usable_frame]

        est_pts = cv2.projectPoints(object_points, rotation_vector,
                                    translation_vector,
                                    intrinsics.intrinsic_mat,
                                    intrinsics.distortion_coeffs)[0]

        rms = math_utils.sqrt(
            ((img_pts - est_pts)**2).sum() / len(object_points))
        return rms
Пример #7
0
def G1_Beckmann(n_dot_vl, alpha):
    #                n_dot_vl
    # c  := --------------------------
    #       alpha sqrt(1 - n_dot_vl^2)
    #
    #         3.535 c + 2.181 c^2
    # G1 := ----------------------- (if c < 1.6) | 1 (otherwise)
    #       1 + 2.276 c + 2.577 c^2
    
    n_dot_vl2 = sqr(n_dot_vl)
    c         = n_dot_vl2 / (alpha * sqrt(1.0 - n_dot_vl2))
    c2        = sqr(c)
    
    if c < 1.6:
        return (3.535 * c + 2.8181 * c2) / (1.0 + 2.276 * c + 2.577 * c2)
    else:
        return 1.0
Пример #8
0
    def calibrate_time_reprojection(self,
                                    sample_count=1000,
                                    verbose=2,
                                    save_data=False,
                                    min_offset_datapoints=10):
        if type(verbose) == bool:
            verbose = int(verbose)
        logging.basicConfig(stream=sys.stderr)

        max_offset = self.args.max_frame_offset
        source_video = self.videos[0]

        source_poses = []
        sample_frame_numbers = []

        source_video.frame_offset = 0

        if sample_count > 0:
            sampling_interval = len(source_video.usable_frames) // sample_count
            start = (len(source_video.usable_frames) % sample_count) // 2

            # source poses array will be parallel to sample_frame_numbers, i.e. source_poses[i] is the pose
            # of the source camera at the frame position sample_frame_numbers[i] in the original source video

            usable_frames = list(source_video.usable_frames.keys())
            usable_frames.sort()

            for i_usable_frame in range(start,
                                        len(usable_frames) - start - 1,
                                        sampling_interval):
                usable_frame_num = usable_frames[i_usable_frame]
                sample_frame_numbers.append(usable_frame_num)
                source_pose = source_video.poses[
                    source_video.usable_frames[usable_frame_num]]
                source_poses.append(source_pose)
            if verbose:
                print("Sample count: {:d}".format(len(sample_frame_numbers)))
                # DEBUG LINE
                logging.debug(
                    "Calib interval: {:s}, first usable frame: {:d}".format(
                        str(source_video.calibration_interval),
                        usable_frames[0]))
                logging.debug("Sample frames: {:s}".format(
                    str(sample_frame_numbers)))
        else:
            sample_frame_numbers = list(source_video.usable_frames.keys())
            sample_frame_numbers.sort()
            source_poses = source_video.poses
            sample_count = len(sample_frame_numbers)

        offsets = [0]
        ix_camera = 1
        for target_video in self.videos[1:]:
            target_camera = self.cameras[ix_camera]
            if verbose:
                print(
                    "========================================================")
                print(
                    "Processing time shift between cameras '{:s}' and '{:s}'.".
                    format(source_video.name, target_video.name))
                print(
                    "========================================================")
            possible_offset_count = max_offset * 2 + 1

            # flag_array to remember unfilled entries
            flag_array = np.zeros((possible_offset_count, sample_count),
                                  dtype=np.bool)
            # transforms = np.zeros((possible_offset_count, sample_count, 4, 4), dtype=np.float64)
            pose_differences = np.zeros((possible_offset_count, sample_count),
                                        dtype=np.float64)
            projection_rms_mat = np.zeros(
                (possible_offset_count, sample_count), dtype=np.float64)

            offset_sample_counts = np.zeros(possible_offset_count,
                                            dtype=np.float64)
            offset_mean_pose_diffs = np.zeros(possible_offset_count,
                                              dtype=np.float64)
            offset_pt_rms = np.zeros(possible_offset_count, dtype=np.float64)

            offset_range = range(-max_offset, max_offset + 1)

            best_offset = 0
            best_offset_rms = float(sys.maxsize)
            # traverse all possible offsets
            for offset in offset_range:

                if verbose > 1:
                    print("Processing offset {:d}. ".format(offset), end="")

                ix_offset = offset + max_offset

                # per-offset cumulative things
                offset_comparison_count = 0
                offset_cumulative_pt_counts = 0
                offset_cumulative_pose_counts = 0
                offset_cumulative_pose_error = 0.0
                offset_cumulative_pt_squared_error = 0.0

                # for each offset, traverse all source frame samples
                for j_sample in range(0, len(sample_frame_numbers)):
                    source_frame = sample_frame_numbers[j_sample]
                    j_target_frame = source_frame + offset

                    # check if frame of the target video at this particular offset from the source sample frame has
                    # a usable calibration board
                    if j_target_frame in target_video.usable_frames:
                        flag_array[ix_offset, j_sample] = True
                        source_pose = source_poses[j_sample]
                        j_target_usable_frame = target_video.usable_frames[
                            j_target_frame]
                        target_pose = target_video.poses[j_target_usable_frame]
                        '''
                        Transform between this camera and the other one.
                        Future notation:
                        T(x, y, f, o) denotes estimated transform from camera x at frame f to camera y at frame f + o
                        '''
                        transform = target_pose.T.dot(source_pose.T_inv)

                        rms = target_video.find_reprojection_error(
                            j_target_usable_frame,
                            self.board_object_corner_set)

                        if rms > 1.0:
                            continue

                        cumulative_pose_error = 0.0
                        cumulative_squared_point_error = 0.0
                        pose_count = 0
                        point_count = 0
                        # for each sample, traverse all other samples
                        for i_sample in range(0, len(sample_frame_numbers)):
                            source_frame = sample_frame_numbers[i_sample]
                            i_target_frame = source_frame + offset
                            if i_sample != j_sample and i_target_frame in target_video.usable_frames:
                                offset_comparison_count += 1
                                '''
                                use the same estimated transform between source & target cameras for specific offset
                                on other frame samples
                                's' means source camera, 't' means target camera
                                [R|t]_(x,z) means camera x extrinsics at frame z
                                [R|t]'_(x,z) means estimated camera x extrinsics at frame z
                                T(x, y, f, o) denotes estimated transform from camera x at frame f
                                  to camera y at frame f + o
                                X_im denotes image coordinates
                                X_im' denotes estimated image coordinates
                                X_w denotes corresponding world (object) coordinates

                                If the estimate at this offset is a good estimate of the transform between cameras
                                for all frames?

                                Firstly, we can apply transform to the source camera pose and see how far we end up
                                from the target camera transform
                                i != j,
                                [R|t]_(t,i)' = T(s,t,j,k).dot([R|t]_(s,i+k))
                                [R|t]_(t,i) =?= [R|t]_(t,i)'
                                '''
                                target_pose = target_video.poses[
                                    target_video.usable_frames[i_target_frame]]
                                est_target_pose = Pose(
                                    transform.dot(source_poses[i_sample].T))
                                cumulative_pose_error += est_target_pose.diff(
                                    target_pose)
                                '''
                                Secondly, we can use the transform applied to source camera pose and the target
                                intrinsics to project the world (object) coordinates to the image plane,
                                and then compare them with the empirical observations

                                X_im(t,i) = K_t.dot(dist_t([R|t]_(t,i).dot(X_w)))
                                X_im'(t,i) = K_t.dot(dist_t([R|t]_(t,i)'.dot(X_w)))
                                X_im(t,i) =?= X_im'(t,i)

                                Note: X_im(t,i) computation above is for reference only, no need to reproject as
                                 we already have empirical observations of the image points
                                '''
                                target_points = target_video.image_points[
                                    target_video.usable_frames[i_target_frame]]
                                est_target_points = \
                                    cv2.projectPoints(objectPoints=self.board_object_corner_set,
                                                      rvec=est_target_pose.rvec,
                                                      tvec=est_target_pose.tvec,
                                                      cameraMatrix=target_camera.intrinsics.intrinsic_mat,
                                                      distCoeffs=target_camera.intrinsics.distortion_coeffs)[0]

                                # np.linalg.norm(target_points - est_target_points, axis=2).flatten()
                                cumulative_squared_point_error += (
                                    (target_points -
                                     est_target_points)**2).sum()
                                pose_count += 1
                                point_count += len(
                                    self.board_object_corner_set)

                        if pose_count > 0:
                            mean_pose_error = cumulative_pose_error / pose_count
                            root_mean_square_pt_error = math_utils.sqrt(
                                cumulative_squared_point_error / point_count)
                            pose_differences[ix_offset,
                                             j_sample] = mean_pose_error
                            projection_rms_mat[
                                ix_offset,
                                j_sample] = root_mean_square_pt_error

                            offset_cumulative_pose_error += cumulative_pose_error
                            offset_cumulative_pose_counts += pose_count
                            offset_cumulative_pt_counts += point_count
                            offset_cumulative_pt_squared_error += cumulative_squared_point_error
                if verbose > 1:
                    print("Total comparison count: {:d} ".format(
                        offset_comparison_count),
                          end="")
                offset_sample_counts[ix_offset] = offset_comparison_count
                if offset_cumulative_pose_counts > min_offset_datapoints:
                    offset_pose_error = offset_cumulative_pose_error / offset_cumulative_pose_counts
                    offset_mean_pose_diffs[ix_offset] = offset_pose_error
                    rms = math_utils.sqrt(offset_cumulative_pt_squared_error /
                                          offset_cumulative_pt_counts)
                    offset_pt_rms[ix_offset] = rms
                    if verbose > 1:
                        print("RMS error: {:.3f}; pose error: {:.3f}".format(
                            rms, offset_pose_error),
                              end="")
                    if rms < best_offset_rms:
                        best_offset = offset
                        best_offset_rms = rms
                if verbose > 1:
                    print("\n", end="")

            if save_data:
                np.savez_compressed(os.path.join(
                    self.args.folder, target_video.name + "_tc_data.npz"),
                                    sample_counts=offset_sample_counts,
                                    mean_pose_diffs=offset_mean_pose_diffs,
                                    point_rms=offset_pt_rms,
                                    flag_array=flag_array,
                                    pose_diff_mat=pose_differences,
                                    point_rms_mat=projection_rms_mat)

            target_video.offset = best_offset
            target_video.offset_error = best_offset_rms
            if verbose:
                print("Offset for {:s}-->{:s}: {d}, RMS error: {:.5f}".format(
                    source_video.name, target_video.name, best_offset,
                    best_offset_rms))
            offsets.append(best_offset)
            ix_camera += 1

        if save_data:
            np.savetxt('autooffset.txt', offsets)
Пример #9
0
def residual(feature_vec, point, camera_quat, camera_tr):
    point_cap = ma.Quat(*camera_quat)(point) + camera_tr
    observed_dir = point_cap.normalized()
    chordal_length = ma.sqrt((observed_dir - feature_vec).squared_norm() +
                             1e-32)
    return chordal_length
Пример #10
0
#
#  Unless required by applicable law or agreed to in writing, software
#  distributed under the License is distributed on an "AS IS" BASIS,
#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#  See the License for the specific language governing permissions and
#  limitations under the License.
#  ================================================================

import numpy as np
import sys
import cv2
import math_utils

EXIT_CODE_SUCCESS = 0
EXIT_CODE_FAILURE = 1
SQUARE_ROOT_OF_TWO = math_utils.sqrt(2)


def main():
    image = cv2.imread("test_image1.png")
    step_size_px = 10
    vertex_row_count = image.shape[0] // step_size_px
    vertex_col_count = image.shape[1] // step_size_px
    vertex_count = vertex_row_count * vertex_col_count

    face_row_count = vertex_row_count - 1
    face_col_count = vertex_col_count - 1

    print("Grid size: ", vertex_row_count, " x ", vertex_col_count)
    print("Vertex count: ", vertex_count)
    warp_coefficient_count = 2 * vertex_count