Ejemplo n.º 1
0
    def test_create_ellipsoid_stl(self):
        adju = Peseudo_3D_intersection_adjustment()
        m = PhotoScan.Matrix([[504, 360, 180], [360, 360, 0], [180, 0, 720]])
        m = PhotoScan.Matrix([[24.66697238419596, 11.102022651894911, 29.082023223173206],
                              [11.10202265189491, 10.052229488742526, 14.941828405336427],
                              [29.082023223173206, 14.941828405336427, 42.78791682803554]])

        eig_valu, eig_vec = adju._get_eigen_vel_vec(m)

        stl_handler = STL_Handler()
        stl_handler.importSTL()
        stl_handler.importSTL("sp_exp_for_test.stl")
        ellipsoid_stl = "solid OpenSCAD_Model\n"

        ellipsoid_stl += stl_handler.create_ellipsoid_stl(eig_vec, eig_valu, [10, 0, 0], 1, False)
        # print(ellipsoid_stl)
        self.assertEqual('vertex 11.997  0.635 -1.716', ellipsoid_stl.splitlines()[3])

        ellipsoid_stl += "endsolid OpenSCAD_Model"

        path = os.path.dirname(os.path.realpath(__file__))

        f = open(path + '\\stl_ell.stl', 'w')
        f.write(ellipsoid_stl)
        f.close()
Ejemplo n.º 2
0
def bbox_to_cs():
    print("Script started...")

    doc = PhotoScan.app.document
    chunk = doc.chunk

    T = chunk.transform.matrix

    v_t = T.mulp(PhotoScan.Vector([0, 0, 0]))

    if chunk.crs:
        m = chunk.crs.localframe(v_t)
    else:
        m = PhotoScan.Matrix().Diag([1, 1, 1, 1])

    m = m * T
    s = math.sqrt(m[0, 0] ** 2 + m[0, 1] ** 2 + m[0, 2] ** 2)  # scale factor # s = m.scale()
    R = PhotoScan.Matrix([[m[0, 0], m[0, 1], m[0, 2]],
                          [m[1, 0], m[1, 1], m[1, 2]],
                          [m[2, 0], m[2, 1], m[2, 2]]])
    # R = m.rotation()

    R = R * (1. / s)

    reg = chunk.region
    reg.rot = R.t()
    chunk.region = reg

    print("Script finished!")
def main():

    doc = PhotoScan.app.document

    chunk = doc.chunk
    T0 = chunk.transform.matrix

    region = chunk.region
    R0 = region.rot
    C0 = region.center
    s0 = region.size

    for chunk in doc.chunks:

        if chunk == doc.chunk:
            continue

        T = chunk.transform.matrix.inv() * T0

        R = PhotoScan.Matrix( [[T[0,0],T[0,1],T[0,2]], [T[1,0],T[1,1],T[1,2]], [T[2,0],T[2,1],T[2,2]]])

        scale = R.row(0).norm()
        R = R * (1/scale)

        region.rot = R * R0
        c = T.mulp(C0)
        region.center = c
        region.size = s0 * scale / 1.

        chunk.region = region

    print("Script finished. Bounding box copied.\n")
Ejemplo n.º 4
0
def transform_chunck():
    text = input_field.get("1.0", tk.END)
    try:
        # text = '1.000000 0.000000 0.000000 0.000000\n0.000000 1.000000 0.000000 0.000000\n0.000000 0.000000 1.000000 0.000000\n0.000000 0.000000 0.000000 1.000000\n'
        lines = text.splitlines()
        lines = list(filter(lambda x: len(x) > 2, lines))

        print("Do Transform")
        matrix_list = []
        if len(lines) != 4:
            print("unvalid number of rows")
            raise

        for line in lines:
            line_value = line.split()
            if len(line_value) != 4:
                print("unvalid number of columns", line_value)
                raise

            line_value = list(map(lambda x: float(x), line_value))
            matrix_list.append(line_value)

        trafo_matrix = PhotoScan.Matrix(matrix_list)
        PhotoScan.app.document.chunk.transform.matrix = trafo_matrix
        print(PhotoScan.app.document.chunk.transform.matrix)
        label.config(text='transformation succesful!')
    except Exception as e:
        print(e)
        label.config(text='The Matrix is not valid.\n'
                          ' Please use a 4x4 Matrix with blanks as seperator')
Ejemplo n.º 5
0
def rotY(angle):
    sinAngle = sin(angle)
    cosAngle = cos(angle)

    mat = PhotoScan.Matrix([[cosAngle, 0., sinAngle, 0.], [0., 1., 0., 0.],
                            [-sinAngle, 0., cosAngle, 0], [0., 0., 0., 1.]])
    # print("matY " + str(mat))
    return mat
Ejemplo n.º 6
0
def align_cameras(chunk, min_latitude, min_longitude):
    if chunk.transform.scale is None:
        chunk.transform.scale = 1
        chunk.transform.rotation = ps.Matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
        chunk.transform.translation = ps.Vector([0, 0, 0])

    i, j, k = get_chunk_vectors(min_latitude, min_longitude)  # i || North
    estimate_rotation_matrices(chunk, i, j)

    for c in chunk.cameras:
        if c.transform is not None:
            continue

        location = c.reference.location
        if location is None:
            continue
        chunk_coordinates = wgs_to_chunk(chunk, location)
        fi = c.reference.rotation.x + 90
        fi = math.radians(fi)
        roll = math.radians(c.reference.rotation.z)
        pitch = math.radians(c.reference.rotation.y)

        roll_mat = ps.Matrix([[1, 0, 0], [0,
                                          math.cos(roll), -math.sin(roll)],
                              [0, math.sin(roll),
                               math.cos(roll)]])
        pitch_mat = ps.Matrix([[math.cos(pitch), 0,
                                math.sin(pitch)], [0, 1, 0],
                               [-math.sin(pitch), 0,
                                math.cos(pitch)]])
        yaw_mat = ps.Matrix([[math.cos(fi), -math.sin(fi), 0],
                             [math.sin(fi), math.cos(fi), 0], [0, 0, 1]])

        r = roll_mat * pitch_mat * yaw_mat

        ii = r[0, 0] * i + r[1, 0] * j + r[2, 0] * k
        jj = r[0, 1] * i + r[1, 1] * j + r[2, 1] * k
        kk = r[0, 2] * i + r[1, 2] * j + r[2, 2] * k

        c.transform = ps.Matrix([[ii.x, jj.x, kk.x, chunk_coordinates[0]],
                                 [ii.y, jj.y, kk.y, chunk_coordinates[1]],
                                 [ii.z, jj.z, kk.z, chunk_coordinates[2]],
                                 [0, 0, 0, 1]])
Ejemplo n.º 7
0
def center_bbox_xyz():
    """Centers bounding box to XYZ center."""
    chunk = PhotoScan.app.document.chunk
    transform_matrix = chunk.transform.matrix
    if chunk.crs:
        vect_tm = transform_matrix * PhotoScan.Vector([0, 0, 0, 1])
        vect_tm.size = 3
        locfrm = chunk.crs.localframe(vect_tm)
    else:
        locfrm = PhotoScan.Matrix().diag([1, 1, 1, 1])
    locfrm = locfrm * transform_matrix
    sqrt = math.sqrt(locfrm[0, 0]**2 + locfrm[0, 1]**2 + locfrm[0, 2]**2)
    mat = PhotoScan.Matrix([[locfrm[0, 0], locfrm[0, 1], locfrm[0, 2]],
                            [locfrm[1, 0], locfrm[1, 1], locfrm[1, 2]],
                            [locfrm[2, 0], locfrm[2, 1], locfrm[2, 2]]])
    mat = mat * (1. / sqrt)
    reg = chunk.region
    reg.rot = mat.t()
    chunk.region = reg
Ejemplo n.º 8
0
def photoscan_alignphotos(images):
    EOs = []
    start_time = time.time()

    doc = PhotoScan.app.document
    chunk = doc.addChunk()
    chunk.addPhotos(images)
    for camera in chunk.cameras:
        if not camera.reference.location:
            continue
        if ("DJI/RelativeAltitude" in camera.photo.meta.keys()) and camera.reference.location:
            z = float(camera.photo.meta["DJI/RelativeAltitude"])
            camera.reference.location = (camera.reference.location.x, camera.reference.location.y, z)
        gimbal_roll = float(camera.photo.meta["DJI/GimbalRollDegree"])
        gimbal_pitch = float(camera.photo.meta["DJI/GimbalPitchDegree"])
        gimbal_yaw = float(camera.photo.meta["DJI/GimbalYawDegree"])
        camera.reference.rotation = (gimbal_yaw, 90 + gimbal_pitch, gimbal_roll)

    chunk.matchPhotos(accuracy=PhotoScan.MediumAccuracy)
    chunk.alignCameras()

    doc.save("test.psz")

    camera = chunk.cameras[-1]
    if not camera.transform:
        print("There is no transformation matrix")

    estimated_coord = chunk.crs.project(
        chunk.transform.matrix.mulp(camera.center))  # estimated XYZ in coordinate system units
    T = chunk.transform.matrix
    m = chunk.crs.localframe(
        T.mulp(camera.center))  # transformation matrix to the LSE coordinates in the given point
    R = (m * T * camera.transform * PhotoScan.Matrix().Diag([1, -1, -1, 1])).rotation()
    estimated_ypr = PhotoScan.utils.mat2ypr(R)  # estimated orientation angles - yaw, pitch, roll
    estimated_opk = PhotoScan.utils.mat2opk(R)  # estimated orientation angles - omega, phi, kappa

    pos = list(estimated_coord)
    ori = list(estimated_opk)
    eo = [pos[0], pos[1], pos[2], ori[0], ori[1], ori[2]]
    EOs.append(eo)
    print("======================================================================================================")
    print(images[-1].split("/")[-1], eo)
    print("======================================================================================================")
    print("process time of each image = ", time.time() - start_time)

    print(estimated_coord, estimated_opk)
    print(estimated_coord[0])
    print(estimated_coord[1])
    print(estimated_coord[2])
    print(estimated_opk[0])
    print(estimated_opk[1])
    print(estimated_opk[2])
Ejemplo n.º 9
0
    def test_errorEllipse_from_eig(self):
        adju = Peseudo_3D_intersection_adjustment()
        m = PhotoScan.Matrix([[504, 360, 180], [360, 360, 0], [180, 0, 720]])
        m = PhotoScan.Matrix([[24.66697238419596, 11.102022651894911, 29.082023223173206],
                              [11.10202265189491, 10.052229488742526, 14.941828405336427],
                              [29.082023223173206, 14.941828405336427, 42.78791682803554]])
        eig_valu, eig_vec = adju._get_eigen_vel_vec(m)
        py2scad = Py_2_OpenScad()

        scad_string = py2scad.errorEllipse_from_eig(eig_vec, eig_valu, [0, 0, 0])

        ref_string = "render(){translate([ 0.000, 0.000, 0.000])" + \
                     "rotate([-7.740,-50.259,27.649])" + \
                     "scale([ 8.365, 2.068, 1.806])" + \
                     "sphere(r =  1.000)};\n"

        self.assertTrue(True)

        path = os.path.dirname(os.path.realpath(__file__))

        f = open(path + '\\scad_ell.scad', 'w')
        f.write(scad_string)
        f.close()
Ejemplo n.º 10
0
def align_cameras(chunk, min_latitude, min_longitude):
    if chunk.transform.scale is None:
        chunk.transform.scale = 1
        chunk.transform.rotation = ps.Matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
        chunk.transform.translation = ps.Vector([0, 0, 0])

    same_yaw_bound = 40  # within this bound all yaws are considered to be for same direction flights
    yaws_deltas, first_class_yaw = get_camera_calibration(chunk,
                                                          min_latitude,
                                                          min_longitude,
                                                          same_yaw_bound=40)

    print('Estimated yaw offsets {}'.format(yaws_deltas))

    i, j, k = get_chunk_vectors(min_latitude, min_longitude)  # i || North

    for c in chunk.cameras:
        group_index = chunk.camera_groups.index(
            c.group) if c.group is not None else -1

        location = c.reference.location
        if location is None:
            continue
        chunk_coordinates = wgs_to_chunk(chunk, location)
        fi = c.reference.rotation.x + 90
        idx = 0 if math.fabs(
            c.reference.rotation.x -
            first_class_yaw[group_index]) < same_yaw_bound else 1
        fi += yaws_deltas[group_index][idx]
        fi = math.radians(fi)

        ii, jj = i * math.cos(fi) + j * math.sin(fi), j * math.cos(
            fi) - i * math.sin(fi)
        c.transform = ps.Matrix([[ii.x, jj.x, k.x, chunk_coordinates[0]],
                                 [ii.y, jj.y, k.y, chunk_coordinates[1]],
                                 [ii.z, jj.z, k.z, chunk_coordinates[2]],
                                 [0, 0, 0, 1]])
Ejemplo n.º 11
0
def cs_to_bbox():
    print("Script started...")

    doc = PhotoScan.app.document
    chunk = doc.chunk

    R = chunk.region.rot     # Bounding box rotation matrix
    C = chunk.region.center  # Bounding box center vector

    if chunk.transform.matrix:
        T = chunk.transform.matrix
        s = math.sqrt(T[0, 0] ** 2 + T[0, 1] ** 2 + T[0, 2] ** 2)  # scaling # T.scale()
        S = PhotoScan.Matrix().Diag([s, s, s, 1])                  # scale matrix
    else:
        S = PhotoScan.Matrix().Diag([1, 1, 1, 1])

    T = PhotoScan.Matrix([[R[0, 0], R[0, 1], R[0, 2], C[0]],
                          [R[1, 0], R[1, 1], R[1, 2], C[1]],
                          [R[2, 0], R[2, 1], R[2, 2], C[2]],
                          [      0,       0,       0,    1]])

    chunk.transform.matrix = S * T.inv()  # resulting chunk transformation matrix

    print("Script finished!")
Ejemplo n.º 12
0
    def test_eig(self):
        adju = Peseudo_3D_intersection_adjustment()
        m = PhotoScan.Matrix([[504, 360, 180], [360, 360, 0], [180, 0, 720]])
        eig_valu, eig_vec = adju._get_eigen_vel_vec(m)
        # print(eig_vec[0])
        # print(eig_vec[1])
        # print(eig_vec[2])
        # print(eig_valu)
        self.assertAlmostEqual(eig_valu[0], 910.06995, 4)
        self.assertAlmostEqual(eig_valu[1], 44.81966, 4)
        self.assertAlmostEqual(eig_valu[2], 629.11038, 4)

        self.assertAlmostEqual(eig_vec[0][0], -0.65580, 4)
        self.assertAlmostEqual(eig_vec[0][1], 0.64879, 4)
        self.assertAlmostEqual(eig_vec[0][2], 0.38600, 4)
Ejemplo n.º 13
0
def export_camera_pose(chunk, output_path):
    file = open(output_path, "wt")
    if chunk.transform:
        T = chunk.transform.matrix
    else:
        T = PhotoScan.Matrix().diag([1, 1, 1, 1])
    print("Exporting camera poses to ", output_path)

    for camera in chunk.cameras:
        if camera.transform:
            coords = T.mulp(camera.center)
            file.write(camera.label + "\t{:.5f}".format(coords[0]) +
                       "\t{:.5f}".format(coords[1]) +
                       "\t{:.5f}".format(coords[2]) + "\n")

    file.close()
    print("Script finished")
def photoscan_alignphotos(images, reference_eo, sequence):
    start_time = time.time()

    doc = PhotoScan.app.document
    chunk = doc.addChunk()
    chunk.addPhotos(images)
    for i in range(len(chunk.cameras)):
        chunk.cameras[i].reference.location = (float(reference_eo[6 * i]), float(reference_eo[6 * i + 1]), float(reference_eo[6 * i + 2]))
        chunk.cameras[i].reference.rotation = (float(reference_eo[6 * i + 5]), float(reference_eo[6 * i + 4]), float(reference_eo[6 * i + 3]))

    chunk.camera_location_accuracy = PhotoScan.Vector([0.001, 0.001, 0.001])
    chunk.camera_rotation_accuracy = PhotoScan.Vector([0.01, 0.01, 0.01])
    # chunk.cameras[-1].reference.location_accuracy = PhotoScan.Vector([10, 10, 10])
    # chunk.cameras[-1].reference.rotation_accuracy = PhotoScan.Vector([10, 10, 10])
    chunk.cameras[-1].reference.accuracy = PhotoScan.Vector([10, 10, 10])
    chunk.cameras[-1].reference.accuracy_ypr = PhotoScan.Vector([10, 10, 10])

    chunk.matchPhotos(accuracy=PhotoScan.MediumAccuracy)
    chunk.alignCameras()

    # doc.save("test_" + str(int(sequence)+1) + ".psz")

    camera = chunk.cameras[-1]
    if not camera.transform:
        print("There is no transformation matrix")

    estimated_coord = chunk.crs.project(
        chunk.transform.matrix.mulp(camera.center))  # estimated XYZ in coordinate system units
    T = chunk.transform.matrix
    m = chunk.crs.localframe(
        T.mulp(camera.center))  # transformation matrix to the LSE coordinates in the given point
    R = (m * T * camera.transform * PhotoScan.Matrix().Diag([1, -1, -1, 1])).rotation()
    estimated_ypr = PhotoScan.utils.mat2ypr(R)  # estimated orientation angles - yaw, pitch, roll
    estimated_opk = PhotoScan.utils.mat2opk(R)  # estimated orientation angles - omega, phi, kappa

    print(estimated_coord[0])
    print(estimated_coord[1])
    print(estimated_coord[2])
    print(estimated_ypr[0])
    print(estimated_ypr[1])
    print(estimated_ypr[2])
    print(estimated_opk[0])
    print(estimated_opk[1])
    print(estimated_opk[2])
Ejemplo n.º 15
0
def main():

    doc = PhotoScan.app.document

    for i in range(len(doc.chunks)):
        chunk = doc.chunks[i]

        R = chunk.region.rot
        C = chunk.region.center

        if chunk.transform:
            T = chunk.transform
            s = math.sqrt(T[0, 0] * T[0, 0] + T[0, 1] * T[0, 1] +
                          T[0, 2] * T[0, 2])
            S = PhotoScan.Matrix([[s, 0, 0, 0], [0, s, 0, 0], [0, 0, s, 0],
                                  [0, 0, 0, 1]])
        else:
            S = PhotoScan.Matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0],
                                  [0, 0, 0, 1]])

        T = PhotoScan.Matrix([[R[0, 0], R[0, 1], R[0, 2], C[0]],
                              [R[1, 0], R[1, 1], R[1, 2], C[1]],
                              [R[2, 0], R[2, 1], R[2, 2], C[2]], [0, 0, 0, 1]])

        xm = PhotoScan.Matrix([[1, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0],
                               [0, 0, 0, 1]])
        ym = PhotoScan.Matrix([[0, 0, -1, 0], [0, 1, 0, 0], [1, 0, 0, 0],
                               [0, 0, 0, 1]])
        zm = PhotoScan.Matrix([[0, -1, 0, 0], [1, 0, 0, 0], [0, 0, 1, 0],
                               [0, 0, 0, 1]])

        chunk.transform = ym * xm * S * T.inv()

        #Rotation matrices (by 90 degrees):
        # the use chunk.transform = zm * S * T.inv()
    print(
        "Script finished. Coordinate system is now parallel to Bounding Box\n")
import math

doc = PhotoScan.app.document

chnk = doc.chunk

T = chnk.transform

v = PhotoScan.Vector([0, 0, 0, 1])

v_t = T.matrix * v

v_t.size = 3

m = chnk.crs.localframe(v_t)

m = m * T.matrix

s = math.sqrt(m[0, 0] * m[0, 0] + m[0, 1] * m[0, 1] +
              m[0, 2] * m[0, 2])  #scale factor
# S = PhotoScan.Matrix( [[s, 0, 0], [0, s, 0], [0, 0, s]] ) #scale matrix

R = PhotoScan.Matrix([[m[0, 0], m[0, 1], m[0, 2]], [m[1, 0], m[1, 1], m[1, 2]],
                      [m[2, 0], m[2, 1], m[2, 2]]])

R = R * (1. / s)

reg = chnk.region
reg.rot = R.t()
chnk.region = reg
Ejemplo n.º 17
0
def processscan(scanfile):
    configfile = MonitorDirectory + scanfile
    log("JSON file: " + configfile)
    config = json.loads(open(configfile).read())
    scanid = config["scanid"]
    normaldir = config["normaldir"]
    projectdir = config["projectdir"]
    savedir = config["savedir"]

    try:
        SKETCHFAB_ENABLE = config["SKETCHFAB_ENABLE"]
        log("Taking JSON setting for sketchfab enable")
    except:
        log("Taking default sketchfab setting from main script")

    try:
        SKETCHFAB_DESCRIPTION = config["SKETCHFAB_DESCRIPTION"]
        log("Taking JSON setting for sketchfab description")
    except:
        log("Taking sketchfab description from main script")

# STEP 1 - Load Images
    doc = PhotoScan.app.document
    doc.clear()
    chunk = doc.addChunk()
    photos = os.listdir(normaldir)  # Get the photos filenames
    photos = [os.path.join(normaldir, p)
              for p in photos]  # Make them into a full path
    log("Found {} photos in {}".format(len(photos), normaldir))
    if not chunk.addPhotos(photos):
        log("ERROR: Failed to add photos: " + str(photos))

# STEP 2 - Detect Markers
    log("Dectecting markers on non-projected images")
    chunk.detectMarkers(PhotoScan.TargetType.CircularTarget12bit, 50)

    # STEP 3 - Create auto mask, if empty directory is specified in JSON file
    try:
        emptydir = config["emptydir"]
        log("Mask directory found, going to create auto mask")
    except:
        emptydir = ""
        log("No mask directory set, no auto making will take place")
    if (emptydir != ""):
        log("Creating auto mask based on non-projected images")
        maskpath = emptydir + "{filename}.jpg"
        chunk.importMasks(maskpath,
                          method='background',
                          tolerance=MaskTolerence)

# STEP 4 - Change images to projection images
    log("Switching to projection images")
    for i in range(len(chunk.cameras)):
        camera = chunk.cameras[i]
        photo = camera.photo.copy()
        photo.path = projectdir + camera.label
        camera.photo = photo

# STEP 5 - Align Images
    chunk.matchPhotos(accuracy=PhotoScan.HighAccuracy,
                      preselection=PhotoScan.NoPreselection,
                      filter_mask=True,
                      keypoint_limit=keypointLimit,
                      tiepoint_limit=tiepointLimit)
    chunk.alignCameras()

    # STEP 6 - Create Auto Bounding box
    mp0 = 0
    mpy = 0
    mpx = 0
    fp0 = 0
    fpy = 0
    fpx = 0
    #setting for Y up, Z forward -> needed for mixamo/unity
    vector0 = PhotoScan.Vector((0, 0, 0))
    vectorY = PhotoScan.Vector((0, 0, distancepy))  # Specify Y Distance
    vectorX = PhotoScan.Vector((distancepx, 0, 0))  # Specify X Distance
    c1 = 0
    c2 = 0
    c3 = 0
    c4 = 0
    c = 0

    for m in chunk.markers:
        if m.label == c1target:
            log("Center 1 point found")
            c1 = c
        if m.label == c2target:
            log("Center 2 point found")
            c2 = c
        if m.label == c3target:
            log("Center 3 point found")
            c3 = c
        if m.label == c4target:
            log("Center 4 point found")
            c4 = c
        if m.label == p0:
            mp0 = c
            fp0 = 1
            m.reference.location = vector0
            m.reference.enabled = 1
            log("Found floormat center point")
        if m.label == py:
            mpy = c
            fpy = 1
            m.reference.location = vectorY
            m.reference.enabled = 1
            log("found floormat Y point")
        if m.label == px:
            mpx = c
            fpx = 1
            m.reference.location = vectorX
            m.reference.enabled = 1
            log("found floormat X point")
        c = c + 1

    if fp0 and fpx and fpy:
        log("Found all markers")
        chunk.updateTransform()
    else:
        log("Error: not all markers found")

    newregion = chunk.region

    T = chunk.transform.matrix
    v_t = T * PhotoScan.Vector([0, 0, 0, 1])
    m = PhotoScan.Matrix().diag([1, 1, 1, 1])

    m = m * T
    s = math.sqrt(m[0, 0]**2 + m[0, 1]**2 + m[0, 2]**2)  #scale factor
    R = PhotoScan.Matrix([[m[0, 0], m[0, 1], m[0, 2]],
                          [m[1, 0], m[1, 1], m[1, 2]],
                          [m[2, 0], m[2, 1], m[2, 2]]])
    R = R * (1. / s)
    newregion.rot = R.t()

    # Calculate center point of the bounding box, by taking the average of 2 left and 2 right markers
    mx = (chunk.markers[c1].position + chunk.markers[c2].position +
          chunk.markers[c3].position + chunk.markers[c4].position) / 4

    mx = PhotoScan.Vector([mx[0], mx[1], mx[2]])
    newregion.center = mx

    dist = chunk.markers[mp0].position - chunk.markers[mpy].position
    dist = dist.norm()

    ratio = dist / distancepy

    newregion.size = PhotoScan.Vector(
        [boxwidth * ratio, boxheight * ratio, boxdepth * ratio])

    chunk.region = newregion
    chunk.updateTransform()

    log("Bounding box should be aligned now")

    # STEP 7 - Create Dense Cloud
    chunk.buildDenseCloud(quality=PhotoScan.HighQuality,
                          filter=PhotoScan.AggressiveFiltering)

    # STEP 8 - Create MESH
    chunk.buildModel(surface=PhotoScan.Arbitrary,
                     interpolation=PhotoScan.EnabledInterpolation,
                     face_count=PhotoScan.HighFaceCount)

    # STEP 9 - Switch projection images back to normal images
    for i in range(len(chunk.cameras)):
        camera = chunk.cameras[i]
        photo = camera.photo.copy()
        photo.path = normaldir + camera.label
        camera.photo = photo

    # STEP 10 - Do some basic clean up operations
    try:
        chunk.model.removeComponents(removecomponentsmax)
    except:
        log("Error Removing small components")
    try:
        chunk.model.fixTopology()
    except:
        log("Error Fix topology")
    try:
        chunk.model.closeHoles(100)
    except:
        log("Error closing holes")
    try:
        if smoothmodellevel > 1:
            chunk.smoothModel(smoothmodellevel)
    except:
        log("Error smoothing model")

    # STEP 11 - Create UVmap and Texture
    chunk.buildUV(mapping=PhotoScan.GenericMapping)
    chunk.buildTexture(blending=PhotoScan.MosaicBlending, size=8196)

    # STEP 12 - Save files
    doc.save(savedir + scanid + ".psz")
    modelpath = savedir + scanid + ".obj"
    chunk.exportModel(modelpath,
                      binary=True,
                      precision=6,
                      texture_format="jpg",
                      texture=True,
                      normals=True,
                      colors=True,
                      cameras=False,
                      format="obj")

    # STEP 13 - Zip files
    if SKETCHFAB_ENABLE:
        log("Zipping files to upload to sketchfab")
        zf = zipfile.ZipFile(savedir + scanid + ".zip", mode="w")
        try:
            zf.write(savedir + scanid + ".mtl")
            zf.write(savedir + scanid + ".obj")
            zf.write(savedir + scanid + ".jpg")
        finally:
            zf.close()
        zip_file = savedir + scanid + ".zip"

        # STEP 14 - Upload to sketchfab
        data = {
            'token': SKETCHFAB_API_TOKEN,
            'name': scanid,
            'description': SKETCHFAB_DESCRIPTION,
            'tags': SKETCHFAB_TAGS,
            'private': SKETCHFAB_PRIVATE,
            'password': SKETCHFAB_PASSWORD
        }

        f = open(zip_file, 'rb')
        files = {'modelFile': f}

        try:
            log("Uploading.. agisoft will pretend to hang while uploading, please wait"
                )
            PhotoScan.app.update()
            model_url = upload(data, files)
            sfile = open(savedir + scanid + "_sketchfabURL.txt", "w")
            sfile.write(model_url)
            sfile.close()
            log("Uploaded to Sketchfab")
        finally:
            f.close()

    log("=============================================================================="
        )
    log(" Completeted processing: " + scanid)
    log("=============================================================================="
        )
Ejemplo n.º 18
0
#rotates model coordinate system in accordance of bounding box for active chunk
#scale is kept
#compatibility: Agisoft PhotoScan Professional 1.1.0

import PhotoScan
import math

doc = PhotoScan.app.document
chunk = doc.chunk

R = chunk.region.rot  #Bounding box rotation matrix
C = chunk.region.center  #Bounding box center vector

if chunk.transform.matrix:
    T = chunk.transform.matrix
    s = math.sqrt(T[0, 0]**2 + T[0, 1]**2 + T[0, 2]**2)  #scaling
    S = PhotoScan.Matrix([[s, 0, 0, 0], [0, s, 0, 0], [0, 0, s, 0],
                          [0, 0, 0, 1]])  #scale matrix
else:
    S = PhotoScan.Matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0],
                          [0, 0, 0, 1]])

T = PhotoScan.Matrix([[R[0, 0], R[0, 1], R[0, 2], C[0]],
                      [R[1, 0], R[1, 1], R[1, 2], C[1]],
                      [R[2, 0], R[2, 1], R[2, 2], C[2]], [0, 0, 0, 1]])

chunk.transform.matrix = S * T.inv()  #resulting chunk transformation matrix
Ejemplo n.º 19
0
    def exp_ortho(self):

        doc = PhotoScan.app.document
        chunk = doc.chunk
        path = doc.path.rsplit("\\", 1)[0]

        if not chunk.model:
            PhotoScan.app.messageBox("No mesh generated!\n")
            return False

        try:
            resolution = float(self.resEdt.text())
        except (ValueError):
            PhotoScan.app.messageBox(
                "Incorrect export resolution! Please use point delimiter.\n")
            print("Script aborted.")
            return False

        print("Export started...")  #information message

        self.btnP1.setDisabled(True)
        self.btnQuit.setDisabled(True)
        self.pBar.setMinimum(0)
        self.pBar.setMaximum(100)

        export_list = list()
        if self.radioBtn_sel.isChecked():
            for photo in chunk.cameras:
                if photo.selected:
                    export_list.append(photo)
        elif self.radioBtn_all.isChecked():
            export_list = list(chunk.cameras)
        elif self.radioBtn_rnd.isChecked():
            random_cams = random.sample(range(len(chunk.cameras)),
                                        10)  #number of random cameras
            for i in range(0, p_num):
                export_list.append(chunk.cameras[random_cams[i]])
        for photo in chunk.cameras:
            photo.enabled = False

        blending_mode = self.blend_types[self.blendCmb.currentText()]

        processed = 0
        t0 = time.time()

        for i in range(0, len(chunk.cameras)):
            photo = chunk.cameras[i]
            photo.enabled = False

        PhotoScan.app.update()

        for photo in export_list:

            if not photo.transform:
                continue

            x0 = x1 = x2 = x3 = PhotoScan.Vector((0.0, 0.0, 0.0))

            width = photo.sensor.width
            height = photo.sensor.height
            calibration = photo.sensor.calibration

            # vectors corresponding to photo corners

            v0 = PhotoScan.Vector((-calibration.cx / calibration.fx,
                                   -calibration.cy / calibration.fy, 1))
            v1 = PhotoScan.Vector(((width - calibration.cx) / calibration.fx,
                                   -calibration.cy / calibration.fy, 1))
            v2 = PhotoScan.Vector(
                (-calibration.cx / calibration.fx,
                 (height - calibration.cy) / calibration.fy, 1))
            v3 = PhotoScan.Vector(
                ((width - calibration.cx) / calibration.fx,
                 (height - calibration.cy) / calibration.fy, 1))
            vc = photo.center

            v0.size = v1.size = v2.size = v3.size = vc.size = 4
            v0[3] = v1[3] = v2[3] = v3[3] = 0
            vc[3] = 1

            M = chunk.transform.matrix * photo.transform

            v0_gc = M * v0
            v1_gc = M * v1
            v2_gc = M * v2
            v3_gc = M * v3
            vc_gc = chunk.transform.matrix * vc

            v0_gc.size = v1_gc.size = v2_gc.size = v3_gc.size = vc_gc.size = 3

            # surface normal

            cen_p = photo.center
            cen_t = chunk.transform.matrix.mulp(cen_p)
            if chunk.crs:
                cen_t = chunk.crs.project(cen_t)

            h = self.surf_height(chunk, photo)

            vloc = PhotoScan.Vector((cen_t[0], cen_t[1], h))
            vloc_h = PhotoScan.Vector((cen_t[0], cen_t[1], h))
            vloc_h[2] += 1

            if chunk.crs:
                vloc_gc = chunk.crs.unproject(vloc)
                vloc_h_gc = chunk.crs.unproject(vloc_h)
                surf_n = vloc_h_gc - vloc_gc
            else:
                vloc_gc = vloc
                vloc_h_gc = vloc_h
                surf_n = vloc_h - vloc

            surf_n.normalize()
            v0_gc.normalize()
            v1_gc.normalize()
            v2_gc.normalize()
            v3_gc.normalize()

            #intersection with the surface

            x0 = intersect(vloc_gc, surf_n, vc_gc, v0_gc)
            x1 = intersect(vloc_gc, surf_n, vc_gc, v1_gc)
            x2 = intersect(vloc_gc, surf_n, vc_gc, v2_gc)
            x3 = intersect(vloc_gc, surf_n, vc_gc, v3_gc)

            if chunk.crs:
                x0 = chunk.crs.project(x0)
                x1 = chunk.crs.project(x1)
                x2 = chunk.crs.project(x2)
                x3 = chunk.crs.project(x3)

            x_0 = min(x0[0], x1[0], x2[0], x3[0])
            x_1 = max(x0[0], x1[0], x2[0], x3[0])
            y_0 = min(x0[1], x1[1], x2[1], x3[1])
            y_1 = max(x0[1], x1[1], x2[1], x3[1])

            x_0 -= (x_1 - x_0) / 20.
            x_1 += (x_1 - x_0) / 20.
            y_0 -= (y_1 - y_0) / 20.
            y_1 += (y_1 - y_0) / 20.

            reg = (x_0, y_0, x_1, y_1)

            photo.enabled = True
            PhotoScan.app.update()
            p_name = photo.photo.path.rsplit("/", 1)[1].rsplit(".", 1)[0]
            p_name = "ortho_" + p_name

            if chunk.crs:
                proj = chunk.crs  ##export in chunk coordinate system
            else:
                proj = PhotoScan.Matrix().diag([1, 1, 1, 1])  #TopXY
            d_x = d_y = resolution

            #recalculating WGS84 resolution from degrees into meters if required
            if chunk.crs:
                if ('UNIT["degree"' in proj.wkt):
                    crd = photo.reference.location

                    #longitude
                    v1 = PhotoScan.Vector((crd[0], crd[1], 0))
                    v2 = PhotoScan.Vector((crd[0] + 0.001, crd[1], 0))
                    vm1 = chunk.crs.unproject(v1)
                    vm2 = chunk.crs.unproject(v2)
                    res_x = (vm2 - vm1).norm() * 1000

                    #latitude
                    v2 = PhotoScan.Vector((crd[0], crd[1] + 0.001, 0))
                    vm2 = chunk.crs.unproject(v2)
                    res_y = (vm2 - vm1).norm() * 1000

                    pixel_x = pixel_y = resolution  #export resolution (meters/pix)
                    d_x = pixel_x / res_x
                    d_y = pixel_y / res_y

            if chunk.exportOrthophoto(path + "\\" + p_name + ".tif",
                                      format="tif",
                                      blending=blending_mode,
                                      color_correction=False,
                                      projection=proj,
                                      region=reg,
                                      dx=d_x,
                                      dy=d_y,
                                      write_world=True):
                processed += 1
            photo.enabled = False
            self.pBar.setValue(int(processed / len(export_list) * 100))

        for i in range(0, len(chunk.cameras)):
            photo = chunk.cameras[i]
            photo.enabled = True

        PhotoScan.app.update()

        self.btnP1.setDisabled(False)
        self.btnQuit.setDisabled(False)

        t1 = time.time()

        t1 -= t0
        t1 = int(t1)

        PhotoScan.app.messageBox(
            "Processing finished.\nProcessed " + str(processed) +
            " images to orthophotos.\nProcessing time: " + str(t1) +
            " seconds.\nPress OK.")  #information message

        return 1
Ejemplo n.º 20
0
def run_script(points, img_name):
    global chunk
    photo_name = img_name.split('/')[-1]
    chunk = PhotoScan.app.document.chunk # set the chunk
    model = chunk.model
    vertices = chunk.model.vertices

    print(chunk)

    if chunk.transform.matrix:
        T0 = chunk.transform.matrix
    else:
        T0 = PhotoScan.Matrix().diag([1, 1, 1, 1])

    for point in points:
        x = point[0]
        y = point[1]
        marker_2D = (x, y) # pixel coordinates on the image for marker
        findIndex(photo_name)
        camera = chunk.cameras[cam_num] # sets the camera to create marker on
        marker = chunk.addMarker() # creates a marker on the chunk
        marker.projections[camera] = marker_2D # moves that marker to appropriate location on image

        point_2D = marker.projections[camera].coord
        vect = camera.sensor.calibration.unproject(point_2D)
        vect = camera.transform.mulv(vect)
        center = camera.center


        # estimating ray and surface intersection
        for face in model.faces:

            v = face.vertices

            E1 = PhotoScan.Vector(vertices[v[1]].coord - vertices[v[0]].coord)
            E2 = PhotoScan.Vector(vertices[v[2]].coord - vertices[v[0]].coord)
            D = PhotoScan.Vector(vect)
            T = PhotoScan.Vector(center - vertices[v[0]].coord)
            P = cross(D, E2)
            Q = cross(T, E1)
            result = PhotoScan.Vector([Q * E2, P * T, Q * D]) / (P * E1)

            if (0 < result[1]) and (0 < result[2]) and (result[1] + result[2] <= 1):
                t = (1 - result[1] - result[2]) * vertices[v[0]].coord
                u = result[1] * vertices[v[1]].coord
                v_ = result[2] * vertices[v[2]].coord

                point_3D = T0.mulp(u + v_ + t)
                point_3D = chunk.crs.project(point_3D)
                break

        point = chunk.crs.unproject(point_3D)
        point = T0.inv().mulp(point)

        for cur_camera in chunk.cameras:

            if (cur_camera == camera) or not cur_camera.transform:
                continue
            cur_proj = cur_camera.project(point)

            if (0 <= cur_proj[0] < camera.sensor.width) and (0 <= cur_proj[1] < camera.sensor.height):
                marker.projections[cur_camera] = cur_proj
Ejemplo n.º 21
0
    def photoscan_alignphotos(self, ImgList):

        start_time = time.time()

        doc = PhotoScan.app.document
        chunk = doc.addChunk()
        chunk.addPhotos(ImgList)

        # Set pixel size to 0.017mm
        doc.chunk.sensors[0].pixel_height = 0.017
        doc.chunk.sensors[0].pixel_width = 0.017

        # Import RPY from EO file
        for img_fname in ImgList:
            doc.chunk.loadReference(
                img_fname.split('.')[0] + '.txt', PhotoScan.ReferenceFormatCSV,
                'nxyzabc', '\t')

        print(
            "==match Photos=================================================")
        print(
            "===============================================================")
        chunk.matchPhotos(accuracy=PhotoScan.MediumAccuracy)

        print(
            "==align photo==================================================")
        print(
            "===============================================================")
        chunk.alignCameras()

        # print("==save project=================================================")
        # #저장 파일 이름을 center_image_name + 시간 으로 변경
        # path = "./test.psz"
        # doc.save(path)
        # print("===============================================================")

        center_photo_index = int(len(chunk.cameras) / 2)
        print("center image number = ", center_photo_index)

        photo1 = chunk.cameras[center_photo_index]  #5개의 이미지 list 중에서 중간 영상의 값

        #기본이미지정보 룰력 작업중
        FocalLenth = photo1.photo.meta["Exif/FocalLength"]
        print(FocalLenth)
        image_width = photo1.photo.meta["Exif/Width"]
        image_height = photo1.photo.meta["Exif/Height"]

        IO = [FocalLenth, image_width, image_height]
        print(IO)

        if not photo1.transform:
            print("There is no transformation matrix")

        print(
            "==extract X(E), Y(N), Z(Altitude), Yaw, Pitch, Roll============================="
        )
        XYZ = chunk.crs.project(chunk.transform.matrix.mulp(photo1.center))
        T = chunk.transform.matrix
        m = chunk.crs.localframe(
            T.mulp(photo1.center)
        )  # transformation matrix to the LSE coordinates in the given point
        R = m * T * photo1.transform * PhotoScan.Matrix().Diag([1, -1, -1, 1])

        row = list()

        for j in range(0, 3):  # creating normalized rotation matrix 3x3
            row.append(R.row(j))
            row[j].size = 3
            row[j].normalize()

        R = PhotoScan.Matrix([row[0], row[1], row[2]])
        omega, phi, kappa = PhotoScan.utils.mat2opk(
            R)  # estimated orientation angles

        # print("EO(XYZ) = ", XYZ)
        # print("T = ", T)
        # print("m = ", m)
        # print("R = ", R)
        # print("R = ", R)

        fname = ImgList[center_photo_index]
        #print(type(XYZ))
        XYZ_list = list(XYZ)
        EO = [XYZ_list[0], XYZ_list[1], XYZ_list[2], kappa, phi, omega]

        return fname, EO

        # print("File Name: ", fname, "X(Longitude) = ", EO[0], "Y(Latitude) = ", EO[1], "Z(Altitude) = ", EO[2],
        #       "yaw = ", EO[3], "pitch = ", EO[4],  "roll = ", EO[5])

        print("process time = ", time.time() - start_time)
Ejemplo n.º 22
0
    def photoscan_alignphotos(self, ImgList):
        start_time = time.time()

        # Prepare a document
        doc = PhotoScan.app.document
        chunk = doc.addChunk()
        chunk.addPhotos(ImgList)
        chunk.crs = self.my_crs

        # Retrieve georeferencing data of reference images
        doc.chunk.loadReference('reference_query_merged.txt',
                                PhotoScan.ReferenceFormatCSV, 'n[XYZ]xyz', ',')

        # Start aerial triangulation
        print(
            "==match Photos=================================================")
        print(
            "===============================================================")
        chunk.matchPhotos(accuracy=PhotoScan.MediumAccuracy)

        print(
            "==align photo==================================================")
        print(
            "===============================================================")
        chunk.alignCameras()

        doc.save(path='result.psz', chunks=[doc.chunk])

        # Last image
        photo1 = chunk.cameras[-1]

        if not photo1.transform:
            print("There is no transformation matrix")

        XYZ = chunk.crs.project(chunk.transform.matrix.mulp(photo1.center))
        T = chunk.transform.matrix
        m = chunk.crs.localframe(
            T.mulp(photo1.center)
        )  # transformation matrix to the LSE coordinates in the given point
        R = m * T * photo1.transform * PhotoScan.Matrix().Diag([1, -1, -1, 1])

        row = list()

        for j in range(0, 3):  # creating normalized rotation matrix 3x3
            row.append(R.row(j))
            row[j].size = 3
            row[j].normalize()

        R = PhotoScan.Matrix([row[0], row[1], row[2]])
        omega, phi, kappa = PhotoScan.utils.mat2opk(
            R)  # estimated orientation angles

        XYZ_list = list(XYZ)
        EO = [XYZ_list[0], XYZ_list[1], XYZ_list[2], kappa, phi, omega]

        print(
            "===============================================================")
        print(
            "===============================================================")
        print(
            "===============================================================")
        print(
            "==RESULT=======================================================")
        print(
            "===============================================================")
        print(
            "===============================================================")
        print(
            "===============================================================")

        print('Query image name: %s' % photo1.label)
        print('Adjusted EO (X, Y, Z, kappa, phi, omega)')
        print(EO)

        print("process time = ", time.time() - start_time)

        return EO
Ejemplo n.º 23
0
#bounding box size is kept
#compatibility: Agisoft PhotoScan Professional 1.1.0

import PhotoScan
import math

doc = PhotoScan.app.document
chunk = doc.chunk

T = chunk.transform.matrix

v_t = T * PhotoScan.Vector( [0,0,0,1] )
v_t.size = 3

if chunk.crs:
    m = chunk.crs.localframe(v_t)
else:
    m = PhotoScan.Matrix().diag([1,1,1,1])

m = m * T

s = math.sqrt(m[0,0] ** 2 + m[0,1] ** 2 + m[0,2] ** 2) #scale factor

R = PhotoScan.Matrix( [[m[0,0],m[0,1],m[0,2]], [m[1,0],m[1,1],m[1,2]], [m[2,0],m[2,1],m[2,2]]])

R = R * (1. / s)

reg = chunk.region
reg.rot = R.t()
chunk.region = reg
Ejemplo n.º 24
0
def photoscanProcess(sampleid,
                     camType,
                     path,
                     export_path,
                     scaletxt="scalebars.csv",
                     proj_path="projects",
                     data_path="data/LTMP"):
    ''''
    path: relative directory path to each transect. Eventually this will come from reefmon
    export_path: folder name where data will be exported to (not built in here yet)
    scaletext: text file containing the scale bar measurements per trip
    proj_path: root folder where projects will be saved
    data_path: root directory where data is stored. This will help using the relative path in "path"
    calfile: califration parameter files from cameras. This should be stored in the calibration folder
    stereo=logical value for acitivating stereo scaling
    '''
    ### Set GPU environment ####
    PhotoScan.app.gpu_mask = 2**len(
        PhotoScan.app.enumGPUDevices()) - 1  #setting GPU mask
    if PhotoScan.app.gpu_mask:
        PhotoScan.app.cpu_enable = False
    else:
        PhotoScan.app.cpu_enable = True
    ## end of set GPU environment

    ##Set parameter environment
    #load camera parameters
    camdict = cdict[camType]
    # processing parameters
    #TODO Move this to camdict <mgr>
    accuracy = PhotoScan.Accuracy.HighAccuracy  #align photos accuracy
    reference_preselection = False
    generic_preselection = True
    keypoints = 40000  #align photos key point limit
    tiepoints = 4000  #align photos tie point limit
    source = PhotoScan.DataSource.DenseCloudData  #build mesh/DEM source
    surface = PhotoScan.SurfaceType.HeightField  #build mesh surface type
    quality = PhotoScan.Quality.LowQuality  #build dense cloud quality
    filtering = PhotoScan.FilterMode.AggressiveFiltering  #depth filtering
    interpolation = PhotoScan.Interpolation.EnabledInterpolation  #build mesh interpolation
    blending = PhotoScan.BlendingMode.MosaicBlending  #blending mode
    face_num = PhotoScan.FaceCount.HighFaceCount  #build mesh polygon count
    mapping = PhotoScan.MappingMode.GenericMapping  #build texture mapping
    atlas_size = 4096
    TYPES = ["jpg", "jpeg", "tif", "tiff"]

    print("Processing " + path)

    ## Load images
    doc = PhotoScan.app.document
    # docpath=doc.path
    # c=docpath.split('/projects')[0]
    list_files = os.listdir(os.path.join('.', data_path, path))
    imlist = list()
    for entry in list_files:  #finding image files
        file = os.path.join('.', data_path, path, entry)
        if os.path.isfile(file):
            if file[-3:].lower() in TYPES:
                imlist.append(file)

    if not (len(imlist)):
        print("No images in " + path)
        return False

    imdate = []
    for i in imlist:
        d = Image.open(os.path.join(path, i))._getexif()[
            36867]  ## TODO: change this to exiftool. Maybe it is faster <mgr>
        imdate.append(datetime.strptime(
            d, camdict['dateformat']))  #get image date time

    im = pd.DataFrame({'im': imlist, 'date': imdate})
    im = im.sort_values(by='date')

    ### Synchronise Left and Right camera
    Lidx = misc.first_substring(im, 'im', camdict['lstring'], contains=True)
    Ridx = misc.first_substring(im, 'im', camdict['lstring'], contains=False)
    Tdiff = im.date[Lidx] - im.date[Ridx]
    idx = im.im.str.contains(camdict['lstring'])
    im.loc[idx, 'date'] = im.date[im.im.str.contains(
        camdict['lstring'])] + Tdiff - timedelta(seconds=5)
    imlist = im.im

    ##Split images into chunks
    n = camdict['chunk_size']  #group size
    m = camdict['overlap']  #overlap
    imlist = [imlist[i:i + n] for i in range(0, len(imlist), n - m)]

    ## SAVE AS project to reset editing permisions
    doc.save('./' + proj_path + '/' + path + '.psx')

    ## Process chunks
    with open(os.path.join('.', export_path, 'reports', sampleid + ".csv"),
              "w") as csvFile:
        fieldnames = [
            'SAMPLEID', 'NO_IMAGES', 'ALIGNED', 'pALIGNED', 'SCALED',
            'NO_SCALEBARS', 'SCALE_ERROR', 'NO_MAKERS', 'MARKER_ERROR'
        ]
        writer = csv.writer(csvFile, delimiter=',')
        writer.writerow(fieldnames)

        for i in range(0, len(imlist)):
            chunk = doc.addChunk()
            chunk.label = sampleid + '_' + str(i)
            chunk.addPhotos(imlist[i])
            preProcess(doc, chunk, scaletxt, camdict)

            ### align photos ###
            chunk.matchPhotos(accuracy=accuracy,
                              generic_preselection=generic_preselection,
                              reference_preselection=reference_preselection,
                              filter_mask=False,
                              keypoint_limit=keypoints,
                              tiepoint_limit=tiepoints)
            chunk.alignCameras()
            chunk.optimizeCameras()
            chunk.resetRegion()
            doc.save()

            ### build dense cloud ###
            chunk.buildDepthMaps(quality=quality, filter=filtering)
            chunk.buildDenseCloud(point_colors=True, keep_depth=False)
            doc.save()

            ###building mesh and stereo scaling
            if camdict['stereo']:
                chunk.buildModel(surface=surface,
                                 source=source,
                                 interpolation=interpolation,
                                 face_count=PhotoScan.FaceCount.LowFaceCount)
                doc.save()
                scale_cams(chunk, camdict=camdict)
                chunk.buildModel(surface=surface,
                                 source=source,
                                 interpolation=interpolation,
                                 face_count=face_num)
            else:
                chunk.buildModel(surface=surface,
                                 source=source,
                                 interpolation=interpolation,
                                 face_count=face_num)

            ###build mesh texture
            chunk.buildUV(mapping=mapping, count=4)
            chunk.buildTexture(blending=blending, size=atlas_size)
            doc.save()

            ##Build orthomosaic
            XYproj = PhotoScan.Matrix([[1.0, 0.0, 0.0, 0.0],
                                       [0.0, 1.0, 0.0, 0.0],
                                       [0.0, 0.0, 1.0, 0.0],
                                       [0.0, 0.0, 0.0, 1.0]])
            chunk.buildOrthomosaic(
                surface=PhotoScan.DataSource.ModelData,
                blending=PhotoScan.BlendingMode.MosaicBlending,
                projection=XYproj)

            ### Write report
            noimgs = len(chunk.cameras)  #total numbr of images per chunk
            aligned = len(pe.checkalign(chunk))  #number of images aligned
            paligned = aligned / noimgs  #proportion of images aligned
            clength = chunk.orthomosaic.height * chunk.orthomosaic.resolution  #length of recuntructed chunk
            cwidth = chunk.orthomosaic.width * chunk.orthomosaic.resolution  #width of reconstructed chunk
            nomarkers = len(chunk.markers)

            if chunk.transform:
                scaled = True
            else:
                scaled = False
            nmarkers = 0
            nscalebars = 0
            for m in chunk.markers:
                if m.selected:
                    nmarkers += 1
            for s in chunk.scalebars:
                if s.selected:
                    nscalebars += 1
            if nscalebars <= 1:
                serror = 'NULL'
            else:
                serror = np.mean(pe.scale_error(chunk))  # measurement error

            if nmarkers > 0:
                merror = np.mean(pe.markerProjError(chunk))
            else:
                merror = 'NULL'

            csvData = [
                sampleid, noimgs, aligned, paligned, scaled, nscalebars,
                serror, nmarkers, merror
            ]
            csvData = [str(f) for f in csvData]
            writer.writerows(csvData)

            ##TODO: 1)export cameras, mosaics, models. 2) include check gate using model evaluation metics. <mgr>
    print("Processed " + chunk.label)
Ejemplo n.º 25
0
def bounding_box(chunk):
    CORNERS = ["target 18", "target 5", "target 10", "target 4"]

    # target 5----------target 10
    #     |               |
    #     |               |
    # target 18---------target 4
    # x,y-plane
    def vect(a, b):

        result = PhotoScan.Vector([
            a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y - a.y * b.x
        ])
        return result.normalized()

    def get_marker(label, chunk):

        for marker in chunk.markers:
            if marker.label.lower() == label.lower():
                return marker
        return None

    doc = PhotoScan.app.document
    chunk = doc.chunk  #active chunk
    if chunk.transform.matrix:
        T = chunk.transform.matrix
        s = chunk.transform.scale
    else:
        T = PhotoScan.Matrix().Diag([1, 1, 1, 1])
        s = 1

    for x in CORNERS:
        print('get_marker: ', get_marker(x, chunk))
        if get_marker(x, chunk) == None:
            return
    points2 = [get_marker(x, chunk).position
               for x in CORNERS]  #enthält die Marker positionen

    counter = 0
    for x in points2:
        if x:
            counter += 1
    print("Counter: ", counter)
    print("1Points2 Vektor", points2)  #
    if counter == 4:
        print("2Points2 Vektor", points2)  #
        new_region = chunk.region
        new_center = (points2[0] + points2[1] + points2[2] + points2[3]) / 4.

        side1 = points2[0] - points2[1]
        side2 = points2[0] - points2[-1]
        side1g = T.mulp(points2[0]) - T.mulp(points2[1])
        side2g = T.mulp(points2[0]) - T.mulp(points2[-1])

        new_size = PhotoScan.Vector([
            side2g.norm() / s * 1.4,
            side1g.norm() / s * 1.4, new_region.size.z * 2.5
        ])

        horizontal = side2
        vertical = side1
        normal = vect(
            vertical, horizontal
        )  #Kreuzprodunkt: Vektor der senkrecht auf vertical, horizontal steht, Länge 1
        horizontal = -vect(vertical, normal)
        vertical = vertical.normalized()

        R = PhotoScan.Matrix([horizontal, vertical, -normal])
        new_region.rot = R.t()
        new_region.center = new_center
        new_region.size = new_size