def footprints(cam, sensor, base_elev):
    """
    This function calculates the instantaneous field of view (IFOV) for 
    the camera(s) that are passed.\n
    Vars:\n
    \t cam = pandas dataframe (n x ~6, fields: x,y,z,yaw,pitch,roll)\n
    \t sensor = pandas dataframe (1 x 3, fields: focal, sensor_x, sensor_y):
    \t focal length (mm), sensor x dim (mm), sensor y dim (mm)\n
    \t base_elev = average elevation of your site (meters, or in the same
    \t measure as your coordinates)\n
    Creates approx. coordinates for sensor
    corners (north-oriented and zero pitch) at the camera's x,y,z. Rotates
    the sensor coords in 3D space to the camera's pitch and yaw angles (roll
    angles are ignored for now) and projects corner rays through the camera 
    x,y,z to a approx ground plane. The intersection of the rays with the
    ground are the corners of the photo footprint.\n
    *** Photos that have picth angles that cause the horizon to be visable will
    cause the UL and UR path coordniates to wrong. These cameras are 
    disreguarded and the footprint will be set to NaN in the output.***\n 
    RETURNS: footprints = Pandas dataframe (n x 1) of Matplotlib Path objects()
    """
    # Setup DF to house camera footprint polygons
    footprints = pd.DataFrame(np.zeros((cam.shape[0], 1)), columns=['fov'])

    # convert sensor dimensions to meters, divide x/y for corner coord calc
    f = sensor.focal[0] * 0.001
    sx = sensor.sensor_x[0] / 2 * 0.001
    sy = sensor.sensor_y[0] / 2 * 0.001

    # calculate the critical pitch (in degrees) where the horizon will be
    #   visible with the horizon viable, the ray projections go backward
    #   and produce erroneous IFOV polygons (90 - 0.5*vert_fov)
    crit_pitch = 90 - np.rad2deg(np.arctan(sy / f))

    # User Feedback
    print("Proccesing Camera IFOVs (%i total)..." % (cam.shape[0]))
    sys.stdout.flush()

    # for each camera...
    for idx, row in cam.iterrows():

        # check is the camera pitch is over the critical value
        if row.pitch < crit_pitch:

            # sensor corners (UR,LR,LL,UL), north-oriented and zero pitch
            corners = np.array([[row.x + sx, row.y - f, row.z + sy],
                                [row.x + sx, row.y - f, row.z - sy],
                                [row.x - sx, row.y - f, row.z - sy],
                                [row.x - sx, row.y - f, row.z + sy]])

            # offset corner points by cam x,y,z for rotation
            cam_pt = np.atleast_2d(np.array([row.x, row.y, row.z]))
            corner_p = corners - cam_pt

            # get pitch and yaw from the camera, convert to radians
            pitch = np.deg2rad(90.0 - row.pitch)
            roll = np.deg2rad(row.roll)
            yaw = np.deg2rad(row.yaw)

            # setup picth rotation matrix (r_x) and yaw rotation matrix (r_z)
            r_x = np.matrix([[1.0, 0.0, 0.0],
                             [0.0, np.cos(pitch), -1 * np.sin(pitch)],
                             [0.0, np.sin(pitch),
                              np.cos(pitch)]])

            r_y = np.matrix([[np.cos(roll), 0.0,
                              np.sin(roll)], [0.0, 1.0, 0.0],
                             [-1 * np.sin(roll), 0.0,
                              np.cos(roll)]])

            r_z = np.matrix([[np.cos(yaw), -1 * np.sin(yaw), 0],
                             [np.sin(yaw), np.cos(yaw), 0], [0, 0, 1]])

            # rotate corner_p by r_x, then r_z, add back cam x,y,z offsets
            # produces corner coords rotated for pitch and yaw
            p_pr = np.matmul(np.matmul(corner_p, r_x), r_y)
            p_out = np.matmul(p_pr, r_z) + cam_pt

            # GEOMETRY
            # Set Sympy 3D point for the camera and a 3D plane for intersection
            cam_sp = spg.Point3D(row.x, row.y, row.z)
            plane = spg.Plane(spg.Point3D(row.x, row.y, base_elev),
                              normal_vector=(0, 0, 1))

            # blank array for footprint intersection coords
            inter_points = np.zeros((corners.shape[0], 2))

            # for each sensor corner point
            idx_b = 0
            for pt in np.asarray(p_out):

                # create a Sympy 3D point and create a Sympy 3D ray from
                #   corner point through camera point
                pt_sp = spg.Point3D(pt[0], pt[1], pt[2])
                ray = spg.Ray3D(pt_sp, cam_sp)

                # calculate the intersection of the ray with the plane
                inter_pt = plane.intersection(ray)

                # Extract out the X,Y coords fot eh intersection point
                #   ground intersect points will be in this order (LL,UL,UR,LR)
                inter_points[idx_b, 0] = inter_pt[0].x.evalf()
                inter_points[idx_b, 1] = inter_pt[0].y.evalf()

                idx_b += 1

        # if crit_pitch is exceeded set inter_points to NaN
        else:
            inter_points = np.full((4, 2), np.nan)

        # append inter_points to footprints as a matplotlib path object
        footprints.fov[idx] = mplPath.Path(inter_points)

        # User feedback
        if (idx + 1) % 10 == 0:
            print("%i cameras processed..." % (idx + 1))
            sys.stdout.flush()

    return footprints
Esempio n. 2
0
def footprints(cam, sensor, base_elev, gui):
    """
    This function calculates the instantaneous field of view (IFOV) for 
    the camera(s) that are passed.\n
    Vars:\n
    \t cam = pandas dataframe (n x ~6, fields: x,y,z,yaw,pitch,roll)\n
    \t sensor = pandas dataframe (1 x 3, fields: focal, sensor_x, sensor_y):
    \t focal length (mm), sensor x dim (mm), sensor y dim (mm)\n
    \t base_elev = average elevation of your site (meters, or in the same
    \t measure as your coordinates)\n
    Creates approx. coordinates for sensor
    corners (north-oriented and zero pitch) at the camera's x,y,z. Rotates
    the sensor coords in 3D space to the camera's pitch and yaw angles (roll
    angles are ignored for now) and projects corner rays through the camera 
    x,y,z to a approx ground plane. The intersection of the rays with the
    ground are the corners of the photo footprint.\n
    *** Photos that have picth angles that cause the horizon to be visable will
    cause the UL and UR path coordniates to wrong. These cameras are 
    disreguarded and the footprint will be set to NaN in the output.***\n 
    RETURNS: footprints = Pandas dataframe (n x 1) of Matplotlib Path objects()
    """

    #qt progress bar
    gui.top_progBar.setValue(0)
    gui.top_progBar.setMaximum(cam.shape[0])

    # Setup DF to house camera footprint polygons
    footprints = pd.DataFrame(np.zeros((cam.shape[0], 1)), columns=['fov'])

    # debug - blank 3d array for inter_points
    #        itp_f = '//thor.ad.uni.edu/users/jdietric/Documents/Python Scripts/py_sfm_depth/WhiteR_2016/itp.npy'
    #        itp = np.zeros((cam.shape[0],4,2))

    # convert sensor dimensions to meters, divide x/y for corner coord calc
    f = sensor.focal[0] * 0.001
    sx = sensor.sensor_x[0] / 2 * 0.001
    sy = sensor.sensor_y[0] / 2 * 0.001

    # calculate the critical pitch (in degrees) where the horizon will be
    #   visible with the horizon viable, the ray projections go backward
    #   and produce erroneous IFOV polygons (90 - 0.5*vert_fov)
    crit_pitch = 90 - np.rad2deg(np.arctan(sy / f))

    # User Feedback
    print("Proccesing Camera IFOVs (%i total)..." % (cam.shape[0]))
    sys.stdout.flush()

    # for each camera...
    for idx, row in cam.iterrows():

        # check is the camera pitch is over the critical value
        if row.pitch < crit_pitch:

            # sensor corners, north-oriented and zero pitch(down look)
            #   [LL,UL,UR,LR,frame center]
            # X & Y flipped for Euler rotation frame (switched back latter)
            corner_p = np.array([[-f, -sx, -sy], [-f, -sx, sy], [-f, sx, sy],
                                 [-f, sx, -sy], [-f, 0, 0]])

            # cam x,y,z for adding to rotation
            cam_pt = np.array([row.x, row.y, row.z])

            # get pitch and yaw from the camera, in degrees
            #   translate to fit euler frame
            pitch_e = 90.0 - row.pitch
            roll_e = -row.roll
            if 0 <= row.yaw <= 180.0:
                yaw_e = row.yaw
            else:
                yaw_e = -1 * (360.0 - row.yaw)

            # create Euler rotation object form y,p,r,
            #   apply to the sensor corner points
            #   add the camera point coords to translate
            r_e = R.from_euler('ZYX', [[yaw_e, pitch_e, roll_e]], degrees=True)
            p_eC = r_e.apply(corner_p) + [cam_pt[1], cam_pt[0], cam_pt[2]]
            reord = [1, 0, 2]
            s_pts = p_eC[:, reord]

            # GEOMETRY
            # Set Sympy 3D point for the camera and a 3D plane for intersection
            cam_sp = spg.Point3D(row.x, row.y, row.z)
            plane = spg.Plane(spg.Point3D(row.x, row.y, base_elev),
                              normal_vector=(0, 0, 1))

            # blank array for footprint intersection coords
            inter_points = np.zeros((corner_p.shape[0] - 1, 2))

            # for each sensor corner point
            idx_b = 0
            for pt in np.asarray(s_pts[0:4]):

                # create a Sympy 3D point and create a Sympy 3D ray from
                #   corner point through camera point
                pt_sp = spg.Point3D(pt[0], pt[1], pt[2])
                ray = spg.Ray3D(pt_sp, cam_sp)

                # calculate the intersection of the ray with the plane
                inter_pt = plane.intersection(ray)

                # Extract out the X,Y coords from the intersection point to
                #   ground intersect points will be in this order (LL,UL,UR,LR)
                inter_points[idx_b, 0] = inter_pt[0].x.evalf()
                inter_points[idx_b, 1] = inter_pt[0].y.evalf()

                idx_b += 1

        # if crit_pitch is exceeded set inter_points to NaN
        else:
            inter_points = np.full((4, 2), np.nan)

        # append inter_points to footprints as a matplotlib path object
        footprints.fov[idx] = mplPath.Path(inter_points)

        #debug - save inter_points
        #            itp[idx,:,:] = inter_points

        # User feedback and progress bar
        if (idx + 1) % 10 == 0:
            print("%i cameras processed..." % (idx + 1))
            gui.top_progBar.setValue(idx)
            gui.topProg_Lbl.setText(
                "Calculating Camera Footprints - %i of %i" %
                (idx + 1, cam.shape[0]))
            QApplication.processEvents()
            #sys.stdout.flush()

    #debug - save inter_points
    #np.save(itp_f,itp)

    return footprints
Esempio n. 3
0
from sympy import geometry
import numpy as np
from sphere import Sphere
import collections

camera_placement = geometry.Point3D(0, 0, 0)
matrix_size = (151, 151)
light_position = np.array([0, 0, 0])


def pixel_coordinate(matrix_placement):
    return (
        matrix_placement[0] - matrix_size[0] // 2,
        matrix_placement[1] - matrix_size[1] // 2,
    )


def vector_to_object(pixel):
    assert matrix_size[0] % 2 != 0 and matrix_size[1] % 2 != 0
    return np.array((100.0, *pixel_coordinate(pixel)))


def luminescence_from_objects(objects):
    luminescence_matrix = np.zeros(matrix_size)
    assert isinstance(objects, collections.Iterable)
    for i in objects:
        luminescence_matrix = np.maximum(vectors_through_matrix(i),
                                         luminescence_matrix)
    return luminescence_matrix

Esempio n. 4
0
def footprint(sensor):
    '''
    Caculates the foot print of the off nadir camera by projecting rays from
    the sensor corners through the "lens" (focal length) out onto the ground.
    It's a lot of fun linear algebra that the SYMPY library handles.
    '''

    # Setup DF to house camera footprint polygons
    footprint = pd.DataFrame(
        np.zeros((1, 5)), columns=['fov_h', 'fov_v', 'path', 'pp_x', 'pp_y'])

    # convert sensor dimensions to meters, divide x/y for corner coord calc
    print("SENSOR", sensor)
    f = sensor['focal'] * 0.001
    sx = sensor['sensor_x'] / 2 * 0.001
    sy = sensor['sensor_y'] / 2 * 0.001

    # calculate the critical pitch (in degrees) where the horizon will be
    #   visible with the horizon viable, the ray projections go backward
    #   and produce erroneous IFOV polygons (90 - 0.5*vert_fov)
    #   exit with error message if critical pitch is exceeded

    crit_pitch = 90 - np.rad2deg(np.arctan(sy / f))

    if sensor['gimy'] >= crit_pitch:
        print('!!! The provided parameters indicate that the vertical field')
        print('\t of view extends above the horizon. Please start over and')
        print('\t try a shallower camera angle. The maximum angle for this')
        print('\t camera is %0.2f' % (crit_pitch))
        sys.exit()

    # calculate horz and vert field of view angles
    footprint.fov_h = 2 * np.rad2deg(np.arctan(sx / f))
    footprint.fov_v = 2 * np.rad2deg(np.arctan(sy / f))

    # sensor corners (UR,LR,LL,UL), north-oriented and zero pitch
    corners = np.array([[0 + sx, 0 - f, sensor['alt'] + sy],
                        [0 + sx, 0 - f, sensor['alt'] - sy],
                        [0 - sx, 0 - f, sensor['alt'] - sy],
                        [0 - sx, 0 - f, sensor['alt'] + sy]])

    # offset corner points by cam x,y,z for rotation
    cam_pt = np.atleast_2d(np.array([0, 0, sensor['alt']]))
    corner_p = corners - cam_pt

    # convert off nadir angle to radians
    pitch = np.deg2rad(90.0 - sensor['gimy'])

    # setup pitch rotation matrix (r_x)
    r_x = np.matrix([[1.0, 0.0, 0.0], [0.0,
                                       np.cos(pitch), -1 * np.sin(pitch)],
                     [0.0, np.sin(pitch), np.cos(pitch)]])

    # rotate corner_p by r_x, add back cam x,y,z offsets
    p_out = np.matmul(corner_p, r_x) + cam_pt

    # GEOMETRY
    # Set Sympy 3D point for the camera and a 3D plane for intersection
    cam_sp = spg.Point3D(0, 0, sensor['alt'])
    plane = spg.Plane(spg.Point3D(0, 0, 0), normal_vector=(0, 0, 1))

    # blank array for footprint intersection coords
    inter_points = np.zeros((corners.shape[0], 2))

    # for each sensor corner point
    idx_b = 0
    for pt in np.asarray(p_out):
        # create a Sympy 3D point and create a Sympy 3D ray from
        #   corner point through camera point
        pt_sp = spg.Point3D(pt[0], pt[1], pt[2])
        ray = spg.Ray3D(pt_sp, cam_sp)

        # calculate the intersection of the ray with the plane
        inter_pt = plane.intersection(ray)

        # Extract out the X,Y coords fot eh intersection point
        #   ground intersect points will be in this order (LL,UL,UR,LR)
        inter_points[idx_b, 0] = inter_pt[0].x.evalf()
        inter_points[idx_b, 1] = inter_pt[0].y.evalf()

        idx_b += 1

        # append inter_points to footprints as a matplotlib path object
        footprint.path[0] = mplPath.Path(inter_points)

    # calculate the principle point by intersecting the corners of the ifov path
    ll_pt = spg.Point(inter_points[0, 0], inter_points[0, 1])
    ul_pt = spg.Point(inter_points[1, 0], inter_points[1, 1])
    ur_pt = spg.Point(inter_points[2, 0], inter_points[2, 1])
    lr_pt = spg.Point(inter_points[3, 0], inter_points[3, 1])
    line_ll_ur = spg.Line(ll_pt, ur_pt)
    line_lr_ul = spg.Line(lr_pt, ul_pt)
    pp_inter = line_ll_ur.intersection(line_lr_ul)
    footprint.pp_x = pp_inter[0].x.evalf()
    footprint.pp_y = pp_inter[0].y.evalf()
    print("LL", ll_pt, "UL", ul_pt, "UR", ur_pt, "LR", lr_pt)
    return footprint