示例#1
0
def align_capture(capture, warp_mode, img_type):
    ## Alignment settings
    match_index = 1  # Index of the band
    max_alignment_iterations = 50
    #    warp_mode = cv2.MOTION_HOMOGRAPHY # MOTION_HOMOGRAPHY or MOTION_AFFINE. For Altum images only use HOMOGRAPHY
    #    warp_mode = cv2.MOTION_TRANSLATION
    pyramid_levels = 0  # for images with RigRelatives, setting this to 0 or 1 may improve alignment
    epsilon_threshold = 1e-10
    print(
        "Alinging images. Depending on settings this can take from a few seconds to many minutes"
    )
    # Can potentially increase max_iterations for better results, but longer runtimes
    warp_matrices, alignment_pairs = imageutils.align_capture(
        capture,
        ref_index=match_index,
        max_iterations=max_alignment_iterations,
        #                                                              multithreaded=False,
        #                                                              debug = True,
        epsilon_threshold=epsilon_threshold,
        warp_mode=warp_mode,
        pyramid_levels=pyramid_levels)

    print("Finished Aligning, warp matrices={}".format(warp_matrices))

    cropped_dimensions, edges = imageutils.find_crop_bounds(
        capture, warp_matrices, warp_mode=warp_mode)
    im_aligned = imageutils.aligned_capture(capture,
                                            warp_matrices,
                                            warp_mode,
                                            cropped_dimensions,
                                            match_index,
                                            img_type=img_type)

    return im_aligned, warp_matrices
def GetAllignmentMatrix(images):
    ## Alignment settings
    match_index = 4 # Index of the band, here we use green
    max_alignment_iterations = 20
    warp_mode = cv2.MOTION_HOMOGRAPHY # MOTION_HOMOGRAPHY or MOTION_AFFINE. For Altum images only use HOMOGRAPHY
    pyramid_levels = 3 # for 10-band imagery we use a 3-level pyramid. In some cases
    print("Calculating")
    warp_matrices, alignment_pairs = imageutils.align_capture(images,
                                                          ref_index = match_index,
                                                          max_iterations = max_alignment_iterations,
                                                          warp_mode = warp_mode,
                                                          pyramid_levels = pyramid_levels)
    print("Done")
    return warp_matrices
示例#3
0
def getAlignment(imageToGetAlignmentRoot, panelRoot):
    import micasense.capture as capture
    # Alignment
    imageNames = glob.glob(imageToGetAlignmentRoot)
    panelNames = glob.glob(panelRoot)

    panelCap = capture.Capture.from_filelist(panelNames) 
    imgCap = capture.Capture.from_filelist(imageNames)
    panel_reflectance_by_band = [0.67, 0.69, 0.68, 0.61, 0.67] #RedEdge band_index order
    panel_irradiance = panelCap.panel_irradiance(panel_reflectance_by_band)
    # capture.plot_undistorted_reflectance(panel_irradiance)
    imgCap.compute_reflectance(panel_irradiance)

    ## Increase max_iterations to 1000+ for better results, but much longer runtimes, but start with 100 for speed
    warp_matrices, alignment_pairs = imageutils.align_capture(imgCap, ref_index=3)#, max_iterations=10)
    
    return warp_matrices, alignment_pairs, panel_irradiance
def run():
    import sys
    import os, glob
    import argparse
    import csv
    import imutils
    import cv2
    import numpy as np
    import math
    import json
    import random
    import matplotlib.pyplot as plt
    from multiprocessing import Process, freeze_support
    from PIL import Image
    import micasense.imageutils as imageutils
    import micasense.plotutils as plotutils
    from micasense.image import Image
    from micasense.panel import Panel
    import micasense.utils as msutils
    from micasense.capture import Capture
    import pickle

    freeze_support()

    # construct the argument parse and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument(
        "-l",
        "--log_file_path",
        required=False,
        help=
        "file path to write log to. useful for using from the web interface")
    ap.add_argument(
        "-i",
        "--file_with_image_paths",
        required=True,
        help="file with file paths to the Micasense images in order")
    ap.add_argument(
        "-p",
        "--file_with_panel_image_paths",
        required=True,
        help="file with file paths to the Micasense panel images in order")
    ap.add_argument("-a",
                    "--field_layout_path",
                    required=True,
                    help="file with field layout")
    ap.add_argument("-r",
                    "--field_layout_params",
                    required=True,
                    help="file with layout params")
    ap.add_argument("-o",
                    "--output_path",
                    required=True,
                    help="file path where the output will be saved")
    ap.add_argument(
        "-u",
        "--temporary_development_path",
        required=False,
        help="file path for saving warp matrices. only useful for development")
    args = vars(ap.parse_args())

    log_file_path = args["log_file_path"]
    file_with_image_paths = args["file_with_image_paths"]
    file_with_panel_image_paths = args["file_with_panel_image_paths"]
    field_layout_path = args["field_layout_path"]
    field_layout_params = args["field_layout_params"]
    output_path = args["output_path"]
    temporary_development_path = args["temporary_development_path"]

    if sys.version_info[0] < 3:
        raise Exception("Must use Python3. Use python3 in your command line.")

    if log_file_path is not None:
        sys.stderr = open(log_file_path, 'a')

    def eprint(*args, **kwargs):
        print(*args, file=sys.stderr, **kwargs)

    basePath = ''
    imageNamesAll = []
    imageTempNamesBlue = []
    imageTempNamesGreen = []
    imageTempNamesRed = []
    imageTempNamesNIR = []
    imageTempNamesRedEdge = []
    with open(file_with_image_paths) as fp:
        for line in fp:
            imageName, basePath, tempImageNameBlue, tempImageNameGreen, tempImageNameRed, tempImageNameNIR, tempImageNameRedEdge = line.strip(
            ).split(",")
            imageNamesAll.append(imageName)
            imageTempNamesBlue.append(tempImageNameBlue)
            imageTempNamesGreen.append(tempImageNameGreen)
            imageTempNamesRed.append(tempImageNameRed)
            imageTempNamesNIR.append(tempImageNameNIR)
            imageTempNamesRedEdge.append(tempImageNameRedEdge)

    panelNames = []
    with open(file_with_panel_image_paths) as fp:
        for line in fp:
            imageName = line.strip()
            panelNames.append(imageName)

    field_layout = []
    with open(field_layout_path) as fp:
        for line in fp:
            plot_id, plot_name, plot_number = line.strip().split(",")
            field_layout.append([plot_id, plot_name, plot_number])

    field_params = []
    with open(field_layout_params) as fp:
        for line in fp:
            param = line.strip()
            field_params.append(param)

    first_plot_corner = field_params[
        0]  #north_west, north_east, south_west, south_east
    second_plot_direction = field_params[
        1]  #north_to_south, south_to_north, east_to_west, west_to_east
    plot_orientation = field_params[2]  #serpentine, zigzag
    corners_obj = json.loads(field_params[3])
    corner_gps_obj = json.loads(field_params[4])
    rotate_angle = float(field_params[5])
    num_rows = int(field_params[6])
    num_columns = int(field_params[7])
    flight_direction = field_params[8]  #rows, columns, #DEPRECATED
    plot_width_m = float(field_params[9])
    plot_length_m = float(field_params[10])
    plot_corners_pixels = json.loads(field_params[11])
    gps_precision_to_mm = float(field_params[12])
    start_direction = field_params[
        13]  #north_to_south, south_to_north, east_to_west, west_to_east
    turn_direction = field_params[
        14]  #north_to_south, south_to_north, east_to_west, west_to_east
    geographic_position = field_params[15]  #Q1, Q2, Q3, Q4
    image_top_direction = field_params[16]  #north, south, east, west
    # row_alley_width_m = float(field_params[17])
    # column_alley_width_m = float(field_params[18])

    panelCap = Capture.from_filelist(panelNames)
    if panelCap.panel_albedo() is not None:
        panel_reflectance_by_band = panelCap.panel_albedo()
    else:
        panel_reflectance_by_band = [0.58, 0.59, 0.59, 0.54,
                                     0.58]  #RedEdge band_index order
    panel_irradiance = panelCap.panel_irradiance(panel_reflectance_by_band)

    imageNamesDict = {}
    for i in imageNamesAll:
        s = i.split("_")
        k = s[-1].split(".")
        if s[-2] not in imageNamesDict:
            imageNamesDict[s[-2]] = {}
        imageNamesDict[s[-2]][k[0]] = i

    match_index = 3  # Index of the band. NIR band
    imageNameCaptures = []
    imageNameMatchIndexImages = []
    for i in sorted(imageNamesDict.keys()):
        im = []
        for j in sorted(imageNamesDict[i].keys()):
            imageName = imageNamesDict[i][j]
            img = Image(imageName)
            im.append(img)
        if len(im) > 0:
            imageNameMatchIndexImages.append(im[match_index])
            imageNameCaptures.append(im)

    captures = []
    for i in imageNameCaptures:
        im = Capture(i)
        captures.append(im)

    max_alignment_iterations = 1000
    warp_mode = cv2.MOTION_HOMOGRAPHY  # MOTION_HOMOGRAPHY or MOTION_AFFINE. For Altum images only use HOMOGRAPHY
    pyramid_levels = None  # for images with RigRelatives, setting this to 0 or 1 may improve alignment

    if log_file_path is not None:
        eprint(
            "Aligning images. Depending on settings this can take from a few seconds to many minutes"
        )
    else:
        print(
            "Aligning images. Depending on settings this can take from a few seconds to many minutes"
        )

    warp_matrices = None
    if temporary_development_path is not None:
        if os.path.exists(
                os.path.join(temporary_development_path,
                             'capturealignment.pkl')):
            with open(
                    os.path.join(temporary_development_path,
                                 'capturealignment.pkl'), 'rb') as f:
                warp_matrices, alignment_pairs = pickle.load(f)

    if warp_matrices is None:
        warp_matrices, alignment_pairs = imageutils.align_capture(
            captures[0],
            ref_index=match_index,
            max_iterations=max_alignment_iterations,
            warp_mode=warp_mode,
            pyramid_levels=pyramid_levels,
            multithreaded=True)

    if temporary_development_path is not None:
        with open(
                os.path.join(temporary_development_path,
                             'capturealignment.pkl'), 'wb') as f:
            pickle.dump([warp_matrices, alignment_pairs], f)

    if log_file_path is not None:
        eprint("Finished Aligning, warp matrices={}".format(warp_matrices))
    else:
        print("Finished Aligning, warp matrices={}".format(warp_matrices))

    rotated_imgs = []
    img_gps_locations = []
    counter = 0
    for x in captures:
        im_aligned = x.create_aligned_capture(irradiance_list=panel_irradiance,
                                              warp_matrices=warp_matrices,
                                              match_index=match_index,
                                              warp_mode=warp_mode)

        img = imageNameMatchIndexImages[counter]
        latitude = img.latitude
        longitude = img.longitude
        altitude = img.altitude
        # GSD resolution for Micasenes camera in m/p
        img_gps_locations.append(
            [latitude, longitude, altitude, 0.06857 * 100 * altitude / 10])

        rows, cols, d = im_aligned.shape
        M = cv2.getRotationMatrix2D((cols / 2, rows / 2), rotate_angle, 1)
        rotated_img = cv2.warpAffine(im_aligned, M, (cols, rows))

        if log_file_path is not None:
            eprint(rotated_img.shape)
        else:
            print(rotated_img.shape)

        rotated_imgs.append(rotated_img)

        counter += 1

    img_rows_pixels, img_columns_pixels, d = rotated_imgs[0].shape
    img_rows_pixels_half = img_columns_pixels / 2
    img_columns_pixels_half = img_rows_pixels / 2
    print(img_rows_pixels_half)
    print(img_columns_pixels_half)

    plot_width_1_pixel_nw = int(
        plot_corners_pixels['north_west'][1]['x']) - int(
            plot_corners_pixels['north_west'][0]['x'])
    plot_width_1_pixel_ne = int(
        plot_corners_pixels['north_east'][1]['x']) - int(
            plot_corners_pixels['north_east'][0]['x'])
    plot_width_1_pixel_sw = int(
        plot_corners_pixels['south_west'][1]['x']) - int(
            plot_corners_pixels['south_west'][0]['x'])
    plot_width_1_pixel_se = int(
        plot_corners_pixels['south_east'][1]['x']) - int(
            plot_corners_pixels['south_east'][0]['x'])
    plot_width_2_pixel_nw = int(
        plot_corners_pixels['north_west'][2]['x']) - int(
            plot_corners_pixels['north_west'][3]['x'])
    plot_width_2_pixel_ne = int(
        plot_corners_pixels['north_east'][2]['x']) - int(
            plot_corners_pixels['north_east'][3]['x'])
    plot_width_2_pixel_sw = int(
        plot_corners_pixels['south_west'][2]['x']) - int(
            plot_corners_pixels['south_west'][3]['x'])
    plot_width_2_pixel_se = int(
        plot_corners_pixels['south_east'][2]['x']) - int(
            plot_corners_pixels['south_east'][3]['x'])
    plot_length_1_pixel_nw = int(
        plot_corners_pixels['north_west'][2]['y']) - int(
            plot_corners_pixels['north_west'][1]['y'])
    plot_length_1_pixel_ne = int(
        plot_corners_pixels['north_east'][2]['y']) - int(
            plot_corners_pixels['north_east'][1]['y'])
    plot_length_1_pixel_sw = int(
        plot_corners_pixels['south_west'][2]['y']) - int(
            plot_corners_pixels['south_west'][1]['y'])
    plot_length_1_pixel_se = int(
        plot_corners_pixels['south_east'][2]['y']) - int(
            plot_corners_pixels['south_east'][1]['y'])
    plot_length_2_pixel_nw = int(
        plot_corners_pixels['north_west'][3]['y']) - int(
            plot_corners_pixels['north_west'][0]['y'])
    plot_length_2_pixel_ne = int(
        plot_corners_pixels['north_east'][3]['y']) - int(
            plot_corners_pixels['north_east'][0]['y'])
    plot_length_2_pixel_sw = int(
        plot_corners_pixels['south_west'][3]['y']) - int(
            plot_corners_pixels['south_west'][0]['y'])
    plot_length_2_pixel_se = int(
        plot_corners_pixels['south_east'][3]['y']) - int(
            plot_corners_pixels['south_east'][0]['y'])

    plot_width_pixel_avg = int(
        (plot_width_1_pixel_nw + plot_width_1_pixel_ne +
         plot_width_1_pixel_sw + plot_width_1_pixel_se +
         plot_width_2_pixel_nw + plot_width_2_pixel_ne +
         plot_width_2_pixel_sw + plot_width_2_pixel_se) / 8)
    plot_length_pixel_avg = int(
        (plot_length_1_pixel_nw + plot_length_1_pixel_ne +
         plot_length_1_pixel_sw + plot_length_1_pixel_se +
         plot_length_2_pixel_nw + plot_length_2_pixel_ne +
         plot_length_2_pixel_sw + plot_length_2_pixel_se) / 8)
    print(plot_width_pixel_avg)
    print(plot_length_pixel_avg)

    plot_width_pixels_per_m = plot_width_pixel_avg / plot_width_m
    plot_length_pixels_per_m = plot_length_pixel_avg / plot_length_m
    print(plot_width_pixels_per_m)
    print(plot_length_pixels_per_m)

    gps_precision_to_mm = gps_precision_to_mm * 10

    plot_pixels_per_gps_width = int(plot_width_pixels_per_m *
                                    gps_precision_to_mm * 1000)
    plot_pixels_per_gps_length = int(plot_length_pixels_per_m *
                                     gps_precision_to_mm * 1000)
    print(plot_pixels_per_gps_width)
    print(plot_pixels_per_gps_length)

    column_width_gps = 0
    column_height_gps = 0
    row_width_gps = 0
    row_height_gps = 0
    column_width_pixels = 0
    column_height_pixels = 0
    row_width_pixels = 0
    row_height_pixels = 0
    latitude_to_pixel_sign = 1
    longitude_to_pixel_sign = 1

    nw_pixel_x_diff = int(
        corners_obj['north_west']['x']) - img_rows_pixels_half
    nw_pixel_y_diff = int(
        corners_obj['north_west']['y']) - img_columns_pixels_half
    ne_pixel_x_diff = int(
        corners_obj['north_east']['x']) - img_rows_pixels_half
    ne_pixel_y_diff = int(
        corners_obj['north_east']['y']) - img_columns_pixels_half
    sw_pixel_x_diff = int(
        corners_obj['south_west']['x']) - img_rows_pixels_half
    sw_pixel_y_diff = int(
        corners_obj['south_west']['y']) - img_columns_pixels_half
    se_pixel_x_diff = int(
        corners_obj['south_east']['x']) - img_rows_pixels_half
    se_pixel_y_diff = int(
        corners_obj['south_east']['y']) - img_columns_pixels_half

    def distance(lat1, lon1, lat2, lon2):
        p = 0.017453292519943295
        a = 0.5 - math.cos((lat2 - lat1) * p) / 2 + math.cos(
            lat1 * p) * math.cos(lat2 * p) * (1 - math.cos(
                (lon2 - lon1) * p)) / 2
        return 12742 * math.asin(math.sqrt(a))

    def distances(data, v):
        distances = []
        for d in data:
            distances.append(distance(d[0], d[1], v['lat'], v['lon']))
        return distances

    def min_distance(data, v):
        d = distances(data, v)
        val, idx = min((val, idx) for (idx, val) in enumerate(d))
        return (val, idx)

    def crop_polygon(input_image, polygon):
        pts_array = []
        for point in polygon:
            x = point['x']
            y = point['y']

            x = int(round(x))
            y = int(round(y))
            pts_array.append([x, y])

        pts = np.array(pts_array)
        rect = cv2.boundingRect(pts)
        x, y, w, h = rect
        finalImage = input_image[y:y + h, x:x + w, :]
        return finalImage

    plot_polygons_gps = []
    plot_polygons_pixels = []

    output_lines = []
    print(corner_gps_obj)

    # Q1 is north of 0 and west of 0 e.g. North America
    if geographic_position == 'Q1':

        if image_top_direction == 'north':

            latitude_to_pixel_sign = -1
            latitude_to_pixel_sign = -1

            if first_plot_corner == 'north_west' and second_plot_direction == 'west_to_east':
                # field_nw_longitude_gps = float(corner_gps_obj['north_west'][1]) + (nw_pixel_x_diff*longitude_to_pixel_sign*float(corner_gps_obj['north_west'][3])/gps_precision_to_mm)
                # field_nw_latitude_gps = float(corner_gps_obj['north_west'][0]) + (nw_pixel_y_diff*latitude_to_pixel_sign*float(corner_gps_obj['north_west'][3])/gps_precision_to_mm)
                # field_ne_longitude_gps = float(corner_gps_obj['north_east'][1]) + (ne_pixel_x_diff*longitude_to_pixel_sign*float(corner_gps_obj['north_east'][3])/gps_precision_to_mm)
                # field_ne_latitude_gps = float(corner_gps_obj['north_east'][0]) + (ne_pixel_y_diff*latitude_to_pixel_sign*float(corner_gps_obj['north_east'][3])/gps_precision_to_mm)
                # field_sw_longitude_gps = float(corner_gps_obj['south_west'][1]) + (sw_pixel_x_diff*longitude_to_pixel_sign*float(corner_gps_obj['south_west'][3])/gps_precision_to_mm)
                # field_sw_latitude_gps = float(corner_gps_obj['south_west'][0]) + (sw_pixel_y_diff*latitude_to_pixel_sign*float(corner_gps_obj['south_west'][3])/gps_precision_to_mm)
                # field_se_longitude_gps = float(corner_gps_obj['south_east'][1]) + (se_pixel_x_diff*longitude_to_pixel_sign*float(corner_gps_obj['south_east'][3])/gps_precision_to_mm)
                # field_se_latitude_gps = float(corner_gps_obj['south_east'][0]) + (se_pixel_y_diff*latitude_to_pixel_sign*float(corner_gps_obj['south_east'][3])/gps_precision_to_mm)

                field_nw_longitude_gps = float(
                    corner_gps_obj['north_west'][1]) + (
                        nw_pixel_x_diff * longitude_to_pixel_sign /
                        plot_pixels_per_gps_width)
                field_nw_latitude_gps = float(
                    corner_gps_obj['north_west'][0]) + (
                        nw_pixel_y_diff * latitude_to_pixel_sign /
                        plot_pixels_per_gps_length)
                field_ne_longitude_gps = float(
                    corner_gps_obj['north_east'][1]) + (
                        ne_pixel_x_diff * longitude_to_pixel_sign /
                        plot_pixels_per_gps_width)
                field_ne_latitude_gps = float(
                    corner_gps_obj['north_east'][0]) + (
                        ne_pixel_y_diff * latitude_to_pixel_sign /
                        plot_pixels_per_gps_length)
                field_sw_longitude_gps = float(
                    corner_gps_obj['south_west'][1]) + (
                        sw_pixel_x_diff * longitude_to_pixel_sign /
                        plot_pixels_per_gps_width)
                field_sw_latitude_gps = float(
                    corner_gps_obj['south_west'][0]) + (
                        sw_pixel_y_diff * latitude_to_pixel_sign /
                        plot_pixels_per_gps_length)
                field_se_longitude_gps = float(
                    corner_gps_obj['south_east'][1]) + (
                        se_pixel_x_diff * longitude_to_pixel_sign /
                        plot_pixels_per_gps_width)
                field_se_latitude_gps = float(
                    corner_gps_obj['south_east'][0]) + (
                        se_pixel_y_diff * latitude_to_pixel_sign /
                        plot_pixels_per_gps_length)

                plot_width_top_gps = (field_ne_longitude_gps -
                                      field_nw_longitude_gps) / num_columns
                plot_width_bottom_gps = (field_se_longitude_gps -
                                         field_sw_longitude_gps) / num_columns
                plot_width_gps_avg = (plot_width_top_gps +
                                      plot_width_bottom_gps) / 2

                plot_length_left_gps = (field_nw_latitude_gps -
                                        field_sw_latitude_gps) / num_rows
                plot_length_right_gps = (field_ne_latitude_gps -
                                         field_se_latitude_gps) / num_rows
                plot_length_gps_avg = (plot_length_left_gps +
                                       plot_length_right_gps) / 2

        if image_top_direction == 'west':

            # field_nw_longitude_gps = float(corner_gps_obj['north_west'][1]) + (nw_pixel_y_diff*longitude_to_pixel_sign*float(corner_gps_obj['north_west'][3])/gps_precision_to_mm)
            # field_nw_latitude_gps = float(corner_gps_obj['north_west'][0]) + (nw_pixel_x_diff*latitude_to_pixel_sign*float(corner_gps_obj['north_west'][3])/gps_precision_to_mm)
            # field_ne_longitude_gps = float(corner_gps_obj['north_east'][1]) + (ne_pixel_y_diff*longitude_to_pixel_sign*float(corner_gps_obj['north_east'][3])/gps_precision_to_mm)
            # field_ne_latitude_gps = float(corner_gps_obj['north_east'][0]) + (ne_pixel_x_diff*latitude_to_pixel_sign*float(corner_gps_obj['north_east'][3])/gps_precision_to_mm)
            # field_sw_longitude_gps = float(corner_gps_obj['south_west'][1]) + (sw_pixel_y_diff*longitude_to_pixel_sign*float(corner_gps_obj['south_west'][3])/gps_precision_to_mm)
            # field_sw_latitude_gps = float(corner_gps_obj['south_west'][0]) + (sw_pixel_x_diff*latitude_to_pixel_sign*float(corner_gps_obj['south_west'][3])/gps_precision_to_mm)
            # field_se_longitude_gps = float(corner_gps_obj['south_east'][1]) + (se_pixel_y_diff*longitude_to_pixel_sign*float(corner_gps_obj['south_east'][3])/gps_precision_to_mm)
            # field_se_latitude_gps = float(corner_gps_obj['south_east'][0]) + (se_pixel_x_diff*latitude_to_pixel_sign*float(corner_gps_obj['south_east'][3])/gps_precision_to_mm)

            field_nw_longitude_gps = float(corner_gps_obj['north_west'][1]) + (
                nw_pixel_y_diff * longitude_to_pixel_sign /
                plot_pixels_per_gps_length)
            field_nw_latitude_gps = float(corner_gps_obj['north_west'][0]) + (
                nw_pixel_x_diff * latitude_to_pixel_sign /
                plot_pixels_per_gps_width)
            field_ne_longitude_gps = float(corner_gps_obj['north_east'][1]) + (
                ne_pixel_y_diff * longitude_to_pixel_sign /
                plot_pixels_per_gps_length)
            field_ne_latitude_gps = float(corner_gps_obj['north_east'][0]) + (
                ne_pixel_x_diff * latitude_to_pixel_sign /
                plot_pixels_per_gps_width)
            field_sw_longitude_gps = float(corner_gps_obj['south_west'][1]) + (
                sw_pixel_y_diff * longitude_to_pixel_sign /
                plot_pixels_per_gps_length)
            field_sw_latitude_gps = float(corner_gps_obj['south_west'][0]) + (
                sw_pixel_x_diff * latitude_to_pixel_sign /
                plot_pixels_per_gps_width)
            field_se_longitude_gps = float(corner_gps_obj['south_east'][1]) + (
                se_pixel_y_diff * longitude_to_pixel_sign /
                plot_pixels_per_gps_length)
            field_se_latitude_gps = float(corner_gps_obj['south_east'][0]) + (
                se_pixel_x_diff * latitude_to_pixel_sign /
                plot_pixels_per_gps_width)

            plot_width_top_gps = (field_nw_latitude_gps -
                                  field_sw_latitude_gps) / num_columns
            plot_width_bottom_gps = (field_ne_latitude_gps -
                                     field_se_latitude_gps) / num_columns
            plot_width_gps_avg = (plot_width_top_gps +
                                  plot_width_bottom_gps) / 2
            print(plot_width_gps_avg)

            plot_length_left_gps = (field_ne_longitude_gps -
                                    field_nw_longitude_gps) / num_rows
            plot_length_right_gps = (field_se_longitude_gps -
                                     field_sw_longitude_gps) / num_rows
            plot_length_gps_avg = (plot_length_left_gps +
                                   plot_length_right_gps) / 2
            print(plot_length_gps_avg)

            plot_total_vertical_shift_gps = (
                (field_nw_longitude_gps - field_sw_longitude_gps) +
                (field_ne_longitude_gps - field_se_longitude_gps)) / 2
            plot_vertical_shift_avg_gps = plot_total_vertical_shift_gps / num_columns
            print(plot_vertical_shift_avg_gps)

            plot_horizontal_shift_left_gps = (field_sw_latitude_gps -
                                              field_se_latitude_gps) / num_rows
            plot_horizontal_shift_right_gps = (
                field_nw_latitude_gps - field_ne_latitude_gps) / num_rows
            plot_horizontal_shift_avg_gps = (
                plot_horizontal_shift_left_gps +
                plot_horizontal_shift_right_gps) / 2
            print(plot_horizontal_shift_avg_gps)

            if first_plot_corner == 'north_west' and second_plot_direction == 'north_to_south' and plot_orientation == 'zigzag':
                x_pos = field_nw_latitude_gps - plot_width_gps_avg
                y_pos = field_nw_longitude_gps

                plot_width_fix = 10
                plot_length_fix = 10

                plot_counter = 1
                row_num = 1
                #Visualize the GPS on http://www.copypastemap.com/index.html
                dumper_str = ''
                colors = ['red', 'blue', 'green', 'yellow', 'white']
                for i in range(0, num_rows, 1):
                    for j in range(0, num_columns, 1):
                        x_pos_val = x_pos
                        y_pos_val = y_pos
                        plot_polygons_gps.append([{
                            'lat': x_pos_val,
                            'lon': y_pos_val
                        }, {
                            'lat': x_pos_val + plot_width_gps_avg,
                            'lon': y_pos_val
                        }, {
                            'lat':
                            x_pos_val + plot_width_gps_avg,
                            'lon':
                            y_pos_val + plot_length_gps_avg
                        }, {
                            'lat':
                            x_pos_val,
                            'lon':
                            y_pos_val + plot_length_gps_avg
                        }])

                        color = random.choice(colors)
                        dumper_str = dumper_str + str(x_pos_val) + '\t' + str(
                            y_pos_val) + '\tnumbered\t' + color + '\t' + str(
                                plot_counter) + '\n'
                        dumper_str = dumper_str + str(
                            x_pos_val + plot_width_gps_avg) + '\t' + str(
                                y_pos_val
                            ) + '\tnumbered\t' + color + '\t' + str(
                                plot_counter) + '\n'
                        dumper_str = dumper_str + str(
                            x_pos_val + plot_width_gps_avg) + '\t' + str(
                                y_pos_val + plot_length_gps_avg
                            ) + '\tnumbered\t' + color + '\t' + str(
                                plot_counter) + '\n'
                        dumper_str = dumper_str + str(x_pos_val) + '\t' + str(
                            y_pos_val + plot_length_gps_avg
                        ) + '\tnumbered\t' + color + '\t' + str(
                            plot_counter) + '\n'

                        x_pos = x_pos - plot_width_gps_avg
                        y_pos = y_pos + plot_vertical_shift_avg_gps
                        plot_counter += 1
                    x_pos = field_nw_latitude_gps - plot_width_gps_avg + (
                        row_num * plot_horizontal_shift_avg_gps)
                    # y_pos = y_pos + plot_length_gps_avg + plot_total_vertical_shift_gps
                    y_pos = y_pos + plot_length_gps_avg - plot_total_vertical_shift_gps
                    row_num = row_num + 1
                print(dumper_str)

                x_offset_pixels = 0
                y_offset_pixels = 0

                counter = 0
                for p in plot_polygons_gps:
                    #Find image closest to plot GPS
                    img_distance, img_index = min_distance(
                        img_gps_locations, p[0])
                    img = rotated_imgs[img_index]
                    img_gps = img_gps_locations[img_index]

                    # polygon = [{
                    #     'x':img_rows_pixels_half + (p[0]['lat'] - img_gps[0])*gps_precision_to_mm/img_gps[3],
                    #     'y':img_columns_pixels_half - abs(p[0]['lon'] - img_gps[1])*gps_precision_to_mm/img_gps[3]
                    # },
                    # {
                    #     'x':img_rows_pixels_half + abs(p[1]['lat'] - img_gps[0])*gps_precision_to_mm/img_gps[3],
                    #     'y':img_columns_pixels_half - abs(p[1]['lon'] - img_gps[1])*gps_precision_to_mm/img_gps[3]
                    # },
                    # {
                    #     'x':img_rows_pixels_half + abs(p[2]['lat'] - img_gps[0])*gps_precision_to_mm/img_gps[3],
                    #     'y':img_columns_pixels_half - abs(p[2]['lon'] - img_gps[1])*gps_precision_to_mm/img_gps[3]
                    # },
                    # {
                    #     'x':img_rows_pixels_half + abs(p[3]['lat'] - img_gps[0])*gps_precision_to_mm/img_gps[3],
                    #     'y':img_columns_pixels_half - abs(p[3]['lon'] - img_gps[1])*gps_precision_to_mm/img_gps[3]
                    # }]

                    polygon = [{
                        'x':
                        img_rows_pixels_half + x_offset_pixels +
                        abs(p[0]['lat'] - img_gps[0]) *
                        plot_pixels_per_gps_width,
                        'y':
                        img_columns_pixels_half + y_offset_pixels -
                        abs(p[0]['lon'] - img_gps[1]) *
                        plot_pixels_per_gps_length
                    }, {
                        'x':
                        img_rows_pixels_half + x_offset_pixels +
                        abs(p[1]['lat'] - img_gps[0]) *
                        plot_pixels_per_gps_width,
                        'y':
                        img_columns_pixels_half + y_offset_pixels -
                        abs(p[1]['lon'] - img_gps[1]) *
                        plot_pixels_per_gps_length
                    }, {
                        'x':
                        img_rows_pixels_half + x_offset_pixels +
                        abs(p[2]['lat'] - img_gps[0]) *
                        plot_pixels_per_gps_width,
                        'y':
                        img_columns_pixels_half + y_offset_pixels -
                        abs(p[2]['lon'] - img_gps[1]) *
                        plot_pixels_per_gps_length
                    }, {
                        'x':
                        img_rows_pixels_half + x_offset_pixels +
                        abs(p[3]['lat'] - img_gps[0]) *
                        plot_pixels_per_gps_width,
                        'y':
                        img_columns_pixels_half + y_offset_pixels -
                        abs(p[3]['lon'] - img_gps[1]) *
                        plot_pixels_per_gps_length
                    }]

                    plot_polygons_pixels.append({
                        'img_index': img_index,
                        'p': polygon
                    })

                    plot_stack = crop_polygon(img, polygon)

                    blue_img = cv2.rectangle(
                        img[:, :, 0] * 255,
                        (int(polygon[0]['x']), int(polygon[0]['y'])),
                        (int(polygon[2]['x']), int(polygon[2]['y'])),
                        (0, 0, 0), 1)
                    green_img = cv2.rectangle(
                        img[:, :, 1] * 255,
                        (int(polygon[0]['x']), int(polygon[0]['y'])),
                        (int(polygon[2]['x']), int(polygon[2]['y'])),
                        (0, 0, 0), 1)
                    red_img = cv2.rectangle(
                        img[:, :, 2] * 255,
                        (int(polygon[0]['x']), int(polygon[0]['y'])),
                        (int(polygon[2]['x']), int(polygon[2]['y'])),
                        (0, 0, 0), 1)
                    nir_img = cv2.rectangle(
                        img[:, :, 3] * 255,
                        (int(polygon[0]['x']), int(polygon[0]['y'])),
                        (int(polygon[2]['x']), int(polygon[2]['y'])),
                        (0, 0, 0), 1)
                    red_edge_img = cv2.rectangle(
                        img[:, :, 4] * 255,
                        (int(polygon[0]['x']), int(polygon[0]['y'])),
                        (int(polygon[2]['x']), int(polygon[2]['y'])),
                        (0, 0, 0), 1)

                    blue_img_file = imageTempNamesBlue[counter]
                    green_img_file = imageTempNamesGreen[counter]
                    red_img_file = imageTempNamesRed[counter]
                    nir_img_file = imageTempNamesNIR[counter]
                    red_edge_img_file = imageTempNamesRedEdge[counter]

                    cv2.imwrite(basePath + blue_img_file, blue_img)
                    cv2.imwrite(basePath + green_img_file, green_img)
                    cv2.imwrite(basePath + red_img_file, red_img)
                    cv2.imwrite(basePath + nir_img_file, nir_img)
                    cv2.imwrite(basePath + red_edge_img_file, red_edge_img)

                    # plt.imsave(basePath+blue_img_file, plot_stack[:,:,0], cmap='gray')
                    # plt.imsave(basePath+green_img_file, plot_stack[:,:,1], cmap='gray')
                    # plt.imsave(basePath+red_img_file, plot_stack[:,:,2], cmap='gray')
                    # plt.imsave(basePath+nir_img_file, plot_stack[:,:,3], cmap='gray')
                    # plt.imsave(basePath+red_edge_img_file, plot_stack[:,:,4], cmap='gray')

                    if counter in range(-len(field_layout), len(field_layout)):
                        plot_database = field_layout[counter]
                        output_lines.append([
                            plot_database[0], plot_database[1],
                            plot_database[2], blue_img_file, green_img_file,
                            red_img_file, nir_img_file, red_edge_img_file,
                            json.dumps(polygon)
                        ])

                    counter += 1

    print(plot_width_gps_avg)
    print(plot_length_gps_avg)
    print(plot_polygons_gps)
    print(len(plot_polygons_gps))
    print(plot_polygons_pixels)
    print(len(plot_polygons_pixels))
    print(output_lines)

    with open(output_path, 'w') as writeFile:
        writer = csv.writer(writeFile)
        writer.writerows(output_lines)

    writeFile.close()
示例#5
0
def micasense_alignment_matrix(
        image_path,
        ground_level=None,
        max_alignment_iterations=50,
        match_index=1,  # Index of the band
        warp_mode=cv2.MOTION_HOMOGRAPHY,
        pyramid_levels_1=0):

    imset = imageset.ImageSet.from_directory(image_path)

    data, columns = imset.as_nested_lists()
    df = pd.DataFrame.from_records(data, index='timestamp', columns=columns)

    #lat = np.array(df.iloc[0]['latitude'])
    #lon = np.array(df.iloc[0]['longitude'])
    alt_asl = np.array(df.iloc[:]['altitude'])

    if ground_level == None:
        ground_level = np.min(alt_asl)
    # or enter manually
    #ground_level = 106.48 # altitude above sea level

    alt_agl = alt_asl - ground_level
    df['altitude'] = alt_agl

    a_increment = 0
    alignment_matrix = {}
    wrap_matrix = {}
    alignment_matrix["altitude"] = np.round(alt_agl)

    for cap in imset.captures:

        if cap.location()[2] == ground_level:
            continue  # if the this is the imageset captured on the ground, then continue

        # Can potentially increase max_iterations for better results, but longer runtimes
        warp_matrices, alignment_pairs = imageutils.align_capture(
            cap,
            ref_index=match_index,
            max_iterations=max_alignment_iterations,
            warp_mode=warp_mode,
            pyramid_levels=pyramid_levels_1,
            multithreaded=True)
        #        if
        #        TypeError: findTransformECC() missing required argument 'inputMask' (pos 6)
        #        run the following
        #
        #        (cc, warp_matrix) = cv2.findTransformECC (im1_gray,im2_gray,warp_matrix, warp_mode, criteria, None, 1)

        #    print("Finished Aligning, warp matrices={}".format(warp_matrices))
        wrap_matrix["Blue"] = warp_matrices[0]
        wrap_matrix["Green"] = warp_matrices[1]
        wrap_matrix["Red"] = warp_matrices[2]
        wrap_matrix["NIR"] = warp_matrices[3]
        wrap_matrix["RedEdge"] = warp_matrices[4]

        alt = int(cap.location()[2] - ground_level)
        alignment_matrix[alt] = wrap_matrix
        a_increment += 1
        print("iteration {} out of {}".format(a_increment, len(df)))

        print("Finished Aligning, warp matrices={}".format(warp_matrices))

        wrap_matrix = {}

    alignment_param = alignment_pairs[0].copy(
    )  # it is the same for all the bands and all altitudes, so save one of them
    alignment_param.pop(
        "ref_image"
    )  # ref_image is different for each image and band, so remove it
    alignment_matrix["alignment_param"] = alignment_param

    return alignment_matrix
示例#6
0
def align_template(imAl, mx, reflFolder, ref_ind=rf):

    
    warp_matrices, alignment_pairs = imageutils.align_capture(imAl,
                                                              ref_index=ref_ind, 
                                                              warp_mode=cv2.MOTION_HOMOGRAPHY,
                                                              max_iterations=mx)
    for x,mat in enumerate(warp_matrices):
        print("Band {}:\n{}".format(x,mat))

    # cropped_dimensions is of the form:
    # (first column with overlapping pixels present in all images, 
    #  first row with overlapping pixels present in all images, 
    #  number of columns with overlapping pixels in all images, 
    #  number of rows with overlapping pixels in all images   )
    dist_coeffs = []
    cam_mats = []
# create lists of the distortion coefficients and camera matricies
    for i,img in enumerate(imAl.images):
        dist_coeffs.append(img.cv2_distortion_coeff())
        cam_mats.append(img.cv2_camera_matrix())
        
    warp_mode = cv2.MOTION_HOMOGRAPHY #alignment_pairs[0]['warp_mode']
    match_index = alignment_pairs[0]['ref_index']
    
    cropped_dimensions, edges = imageutils.find_crop_bounds(imAl, 
                                                            warp_matrices,
                                                            warp_mode=cv2.MOTION_HOMOGRAPHY)
   # capture, warp_matrices, cv2.MOTION_HOMOGRAPHY, cropped_dimensions, None, img_type="reflectance",
    im_aligned = imageutils.aligned_capture(imAl, warp_matrices, warp_mode,
                                            cropped_dimensions, match_index,
                                            img_type="reflectance")
    
    im_display = np.zeros((im_aligned.shape[0],im_aligned.shape[1],5), dtype=np.float32 )
    
    for iM in range(0,im_aligned.shape[2]):
        im_display[:,:,iM] =  imageutils.normalize(im_aligned[:,:,iM])
        
    rgb = im_display[:,:,[2,1,0]] 
    cir = im_display[:,:,[3,2,1]] 
    grRE = im_display[:,:,[4,2,1]] 
    
    
    if args.plts == True:
        
        fig, axes = plt.subplots(1, 3, figsize=(16,16)) 
        plt.title("Red-Green-Blue Composite") 
        axes[0].imshow(rgb) 
        plt.title("Color Infrared (CIR) Composite") 
        axes[1].imshow(cir) 
        plt.title("Red edge-Green-Red (ReGR) Composite") 
        axes[2].imshow(grRE) 
        plt.show()
    
    prevList = [rgb, cir, grRE]
    nmList = ['rgb.jpg', 'cir.jpg', 'grRE.jpg']
    names = [os.path.join(reflFolder, pv) for pv in nmList]
    
    for ind, p in enumerate(prevList):
        img8 = bytescale(p)
        imageio.imwrite(names[ind], img8)
    
    return warp_matrices, alignment_pairs#, dist_coeffs, cam_mats, cropped_dimensions
示例#7
0
def main(img_dir, out_dir, alt_thresh, ncores, start_count, scaling,
         irradiance, subset, layer, resolution):
    # Create output dir it doesn't exist yet
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    # Load all images as imageset
    imgset = imageset.ImageSet.from_directory(img_dir)
    meta_list = imgset.as_nested_lists()
    # Make feature collection of image centers and write it to tmp file
    point_list = [capture_to_point(c) for c in imgset.captures]
    feature_list = [{
        'type': 'Feature',
        'properties': {},
        'geometry': mapping(x)
    } for x in point_list]
    fc = {'type': 'FeatureCollection', 'features': feature_list}

    ###########################
    #### Optionally cut a spatial subset of the images
    ##########################
    if subset == 'interactive':
        # Write feature collection to tmp file, to make it accessible to the flask app
        # without messing up with the session context
        fc_tmp_file = os.path.join(tempfile.gettempdir(), 'micamac_fc.geojson')
        with open(fc_tmp_file, 'w') as dst:
            json.dump(fc, dst)
        # Select spatial subset interactively (available as feature in POLYGONS[0])
        app.run(debug=False, host='0.0.0.0')
        # Check which images intersect with the user defined polygon (list of booleans)
        poly_shape = shape(POLYGONS[0]['geometry'])
        in_polygon = [x.intersects(poly_shape) for x in point_list]
        print('Centroid of drawn polygon: %s' % poly_shape.centroid.wkt)
    elif subset is None:
        in_polygon = [True for x in point_list]
    elif os.path.exists(subset):
        with fiona.open(subset, layer) as src:
            poly_shape = shape(src[0]['geometry'])
        in_polygon = [x.intersects(poly_shape) for x in point_list]
        print('Centroid of supplied polygon: %s' % poly_shape.centroid.wkt)
    else:
        raise ValueError(
            '--subset must be interactive, the path to an OGR file or left empty'
        )

    ##################################
    ### Threshold on altitude
    ##################################
    if alt_thresh == 'interactive':
        alt_arr = np.array([x[3] for x in meta_list[0]])
        n, bins, patches = plt.hist(alt_arr, 100)
        plt.xlabel('Altitude')
        plt.ylabel('Freq')
        plt.show()
        # Ask user for alt threshold
        alt_thresh = input('Enter altitude threshold:')
        alt_thresh = float(alt_thresh)
        above_alt = [x[3] > alt_thresh for x in meta_list[0]]
    elif isinstance(alt_thresh, float):
        above_alt = [x[3] > alt_thresh for x in meta_list[0]]
    else:
        raise ValueError(
            '--alt_thresh argument must be a float or interactive')

    # Combine both boolean lists (altitude and in_polygon)
    is_valid = [x and y for x, y in zip(above_alt, in_polygon)]

    #########################
    ### Optionally retrieve irradiance values
    #########################
    if irradiance == 'panel':
        # Trying first capture, then last if doesn't work
        try:
            panel_cap = imgset.captures[0]
            # Auto-detect panel, perform visual check, retrieve corresponding irradiance values
            if panel_cap.detect_panels() != 5:
                raise AssertionError('Panels could not be detected')
            panel_cap.plot_panels()
            # Visual check and ask for user confirmation
            panel_check = input("Are panels properly detected ? (y/n):")
            if panel_check != 'y':
                raise AssertionError(
                    'User input, unsuitable detected panels !')
        except Exception as e:
            print(
                "Failed to use pre flight panels; trying post flight panel capture"
            )
            panel_cap = imgset.captures[-1]
            # Auto-detect panel, perform visual check, retrieve corresponding irradiance values
            if panel_cap.detect_panels() != 5:
                raise AssertionError('Panels could not be detected')
            panel_cap.plot_panels()
            # Visual check and ask for user confirmation
            panel_check = input("Are panels properly detected ? (y/n):")
            if panel_check != 'y':
                raise AssertionError(
                    'User input, unsuitable detected panels !')
        # Retrieve irradiance values from panels reflectance
        img_type = 'reflectance'
        irradiance_list = panel_cap.panel_irradiance()
    elif irradiance == 'dls':
        img_type = 'reflectance'
        irradiance_list = None
    elif irradiance == 'sixs':
        # Pick the middle cature, and use it to model clear sky irradiance using 6s
        middle_c = imgset.captures[round(len(imgset.captures) / 2)]
        img_type = 'reflectance'
        irradiance_list = modeled_irradiance_from_capture(middle_c)
    elif irradiance is None:
        img_type = None
        irradiance_list = None
    else:
        raise ValueError(
            'Incorrect value for --reflectance, must be panel, dls or left empty'
        )

    #########################
    ### Alignment parameters
    #########################
    # Select an arbitrary image, find warping and croping parameters, apply to image,
    # assemble a rgb composite to perform visual check
    alignment_confirmed = False
    while not alignment_confirmed:
        warp_cap_ind = random.randint(1, len(imgset.captures) - 1)
        warp_cap = imgset.captures[warp_cap_ind]
        warp_matrices, alignment_pairs = imageutils.align_capture(
            warp_cap, max_iterations=100, multithreaded=True)
        print("Finished Aligning")
        # Retrieve cropping dimensions
        cropped_dimensions, edges = imageutils.find_crop_bounds(
            warp_cap, warp_matrices)
        warp_mode = alignment_pairs[0]['warp_mode']
        match_index = alignment_pairs[0]['ref_index']
        # Apply warping and cropping to the Capture used for finding the parameters to
        # later perform a visual check
        im_aligned = imageutils.aligned_capture(warp_cap,
                                                warp_matrices,
                                                warp_mode,
                                                cropped_dimensions,
                                                match_index,
                                                img_type='radiance')
        rgb_list = [
            imageutils.normalize(im_aligned[:, :, i]) for i in [0, 1, 2]
        ]
        plt.imshow(np.stack(rgb_list, axis=-1))
        plt.show()

        cir_list = [
            imageutils.normalize(im_aligned[:, :, i]) for i in [1, 3, 4]
        ]
        plt.imshow(np.stack(cir_list, axis=-1))
        plt.show()

        alignment_check = input("""
Are all bands properly aligned? (y/n)
    y: Bands are properly aligned, begin processing
    n: Bands are not properly aliged or image is not representative of the whole set, try another image
""")
        if alignment_check.lower() == 'y':
            alignment_confirmed = True
        else:
            print('Trying another image')

    ##################
    ### Processing
    #################
    # Build iterator of captures
    cap_tuple_iterator = zip(imgset.captures, is_valid,
                             range(start_count,
                                   len(is_valid) + start_count))
    process_kwargs = {
        'warp_matrices': warp_matrices,
        'warp_mode': warp_mode,
        'cropped_dimensions': cropped_dimensions,
        'match_index': match_index,
        'out_dir': out_dir,
        'irradiance_list': irradiance_list,
        'img_type': img_type,
        'resolution': resolution,
        'scaling': scaling
    }
    # Run process function with multiprocessing
    pool = mp.Pool(ncores)
    pool.map(functools.partial(capture_to_files, **process_kwargs),
             cap_tuple_iterator)
def run():
    import sys
    from micasense.capture import Capture
    import cv2
    import numpy as np
    import matplotlib.pyplot as plt
    import micasense.imageutils as imageutils
    import micasense.plotutils as plotutils
    import argparse
    import os, glob
    from multiprocessing import Process, freeze_support
    import imutils
    import statistics
    import matplotlib.pyplot as plt
    from micasense.image import Image
    from micasense.panel import Panel
    import micasense.utils as msutils
    import csv
    import pickle

    freeze_support()

    ap = argparse.ArgumentParser()
    ap.add_argument("-l", "--log_file_path", required=False, help="file path to write log to. useful for using from the web interface")
    ap.add_argument("-b", "--file_with_image_paths", required=False, help="file path to file that has all image file names and temporary file names for each image in it, comma separated and separated by a newline. useful for using from the web interface. e.g. /home/nmorales/myfilewithnames.txt")
    ap.add_argument("-d", "--file_with_panel_image_paths", required=False, help="file path to file that has all image file names in it, separated by a newline. useful for using from the web interface. e.g. /home/nmorales/myfilewithnames.txt")
    ap.add_argument("-o", "--output_path", required=True, help="output path to directory in which all resulting files will be placed. useful for using from the command line")
    ap.add_argument("-y", "--temporary_development_path", required=False, help="output file path for stitched RGB image")
    ap.add_argument("-a", "--outfile_alignment_file", required=False, help="output file path for alignment matrices")
    ap.add_argument("-i", "--infile_alignment_file", required=False, help="input file path for alignment matrices")
    args = vars(ap.parse_args())

    log_file_path = args["log_file_path"]
    file_with_image_paths = args["file_with_image_paths"]
    file_with_panel_image_paths = args["file_with_panel_image_paths"]
    output_path = args["output_path"]
    temporary_development_path = args["temporary_development_path"]
    infile_alignment_file = args["infile_alignment_file"]
    outfile_alignment_file = args["outfile_alignment_file"]

    if sys.version_info[0] < 3:
        raise Exception("Must use Python3. Use python3 in your command line.")

    if log_file_path is not None and log_file_path != '':
        sys.stderr = open(log_file_path, 'a')

    def eprint(*args, **kwargs):
        print(*args, file=sys.stderr, **kwargs)

    basePath = ''
    imageNamesAll = []
    imageTempNamesBlue = []
    imageTempNamesGreen = []
    imageTempNamesRed = []
    imageTempNamesNIR = []
    imageTempNamesRedEdge = []
    with open(file_with_image_paths) as fp:
        for line in fp:
            imageName, basePath, tempImageNameBlue, tempImageNameGreen, tempImageNameRed, tempImageNameNIR, tempImageNameRedEdge = line.strip().split(",")
            imageNamesAll.append(imageName)
            imageTempNamesBlue.append(tempImageNameBlue)
            imageTempNamesGreen.append(tempImageNameGreen)
            imageTempNamesRed.append(tempImageNameRed)
            imageTempNamesNIR.append(tempImageNameNIR)
            imageTempNamesRedEdge.append(tempImageNameRedEdge)

    panelNames = []
    with open(file_with_panel_image_paths) as fp:
        for line in fp:
            imageName = line.strip()
            panelNames.append(imageName)

    panelCap = Capture.from_filelist(panelNames)
    if panelCap.panel_albedo() is not None:
        panel_reflectance_by_band = panelCap.panel_albedo()
    else:
        panel_reflectance_by_band = [0.58, 0.59, 0.59, 0.54, 0.58] #RedEdge band_index order
    panel_irradiance = panelCap.panel_irradiance(panel_reflectance_by_band)

    imageNamesDict = {}
    for i in imageNamesAll:
        s = i.split("_")
        k = s[-1].split(".")
        if s[-2] not in imageNamesDict:
            imageNamesDict[s[-2]] = {}
        imageNamesDict[s[-2]][k[0]] = i

    match_index = 3 # Index of the band. NIR band
    imageNameCaptures = []
    imageNameMatchIndexImages = []
    for i in sorted (imageNamesDict.keys()):
        im = []
        for j in sorted (imageNamesDict[i].keys()):
            imageName = imageNamesDict[i][j]
            img = Image(imageName)
            im.append(img)
        if len(im) > 0:
            imageNameMatchIndexImages.append(im[match_index])
            imageNameCaptures.append(im)

    captures = []
    for i in imageNameCaptures:
        im = Capture(i)
        captures.append(im)

    max_alignment_iterations = 1000
    warp_mode = cv2.MOTION_HOMOGRAPHY # MOTION_HOMOGRAPHY or MOTION_AFFINE. For Altum images only use HOMOGRAPHY
    pyramid_levels = None # for images with RigRelatives, setting this to 0 or 1 may improve alignment

    if log_file_path is not None:
        eprint("Aligning images. Depending on settings this can take from a few seconds to many minutes")
    else:
        print("Aligning images. Depending on settings this can take from a few seconds to many minutes")


    warp_matrices = None
    if temporary_development_path is not None:
        if os.path.exists(os.path.join(temporary_development_path,'capturealignment.pkl')):
            with open(os.path.join(temporary_development_path,'capturealignment.pkl'), 'rb') as f:
                warp_matrices, alignment_pairs = pickle.load(f)
    if infile_alignment_file is not None:
        with open(infile_alignment_file, 'rb') as f:
            warp_matrices, alignment_pairs = pickle.load(f)

    if warp_matrices is None:
        warp_matrices, alignment_pairs = imageutils.align_capture(
            captures[0],
            ref_index = match_index,
            max_iterations = max_alignment_iterations,
            warp_mode = warp_mode,
            pyramid_levels = pyramid_levels,
            multithreaded = True
        )

    if temporary_development_path is not None:
        with open(os.path.join(temporary_development_path,'capturealignment.pkl'), 'wb') as f:
            pickle.dump([warp_matrices, alignment_pairs], f)
    if outfile_alignment_file is not None:
        with open(outfile_alignment_file, 'wb') as f:
            pickle.dump([warp_matrices, alignment_pairs], f)

    if log_file_path is not None:
        eprint("Finished Aligning, warp matrices={}".format(warp_matrices))
    else:
        print("Finished Aligning, warp matrices={}".format(warp_matrices))

    rotated_imgs = []
    output_lines = []
    counter = 0
    warp_matrices2 = []

    for x in captures:
        print(counter)
        if len(warp_matrices) == len(x.images):
            im_aligned = x.create_aligned_capture(
                irradiance_list = panel_irradiance,
                warp_matrices = warp_matrices,
                match_index = match_index,
                warp_mode = warp_mode
            )
        elif len(warp_matrices2) == len(x.images):
            im_aligned = x.create_aligned_capture(
                irradiance_list = panel_irradiance,
                warp_matrices = warp_matrices2,
                match_index = match_index,
                warp_mode = warp_mode
            )
        else:
            warp_matrices2, alignment_pairs2 = imageutils.align_capture(
                x,
                ref_index = match_index,
                max_iterations = max_alignment_iterations,
                warp_mode = warp_mode,
                pyramid_levels = pyramid_levels,
                multithreaded = True
            )
            im_aligned = x.create_aligned_capture(
                irradiance_list = panel_irradiance,
                warp_matrices = warp_matrices2,
                match_index = match_index,
                warp_mode = warp_mode
            )
        im_aligned_bands = im_aligned.shape[2];

        blue_img_file = imageTempNamesBlue[counter]
        green_img_file = imageTempNamesGreen[counter]
        red_img_file = imageTempNamesRed[counter]
        nir_img_file = imageTempNamesNIR[counter]

        plt.imsave(basePath+blue_img_file, im_aligned[:,:,0], cmap='gray')
        plt.imsave(basePath+green_img_file, im_aligned[:,:,1], cmap='gray')
        plt.imsave(basePath+red_img_file, im_aligned[:,:,2], cmap='gray')
        plt.imsave(basePath+nir_img_file, im_aligned[:,:,3], cmap='gray')

        gps_location_blue = x.images[0].location;
        gps_location_green = x.images[1].location;
        gps_location_red = x.images[2].location;
        gps_location_nir = x.images[3].location;

        output_lines.append([basePath+blue_img_file, gps_location_blue[0], gps_location_blue[1], gps_location_blue[2]])
        output_lines.append([basePath+green_img_file, gps_location_green[0], gps_location_green[1], gps_location_green[2]])
        output_lines.append([basePath+red_img_file, gps_location_red[0], gps_location_red[1], gps_location_red[2]])
        output_lines.append([basePath+nir_img_file, gps_location_nir[0], gps_location_nir[1], gps_location_nir[2]])

        if im_aligned_bands == 5 and len(x.images) == 5:
            red_edge_img_file = imageTempNamesRedEdge[counter]
            plt.imsave(basePath+red_edge_img_file, im_aligned[:,:,4], cmap='gray')
            gps_location_red_edge = x.images[4].location;
            output_lines.append([basePath+red_edge_img_file, gps_location_red_edge[0], gps_location_red_edge[1], gps_location_red_edge[2]])
        else:
            output_lines.append(['NA', 'NA', 'NA', 'NA'])

        counter += 1

    with open(output_path, 'w') as writeFile:
        writer = csv.writer(writeFile)
        writer.writerows(output_lines)
    writeFile.close()
def run():
    import sys
    from micasense.capture import Capture
    import cv2
    import numpy as np
    import matplotlib.pyplot as plt
    import micasense.imageutils as imageutils
    import micasense.plotutils as plotutils
    import argparse
    import os, glob
    from multiprocessing import Process, freeze_support
    import imutils
    import statistics
    import matplotlib.pyplot as plt
    from micasense.image import Image
    from micasense.panel import Panel
    import micasense.utils as msutils
    import csv
    import pickle

    freeze_support()

    ap = argparse.ArgumentParser()
    ap.add_argument(
        "-l",
        "--log_file_path",
        required=False,
        help=
        "file path to write log to. useful for using from the web interface")
    ap.add_argument(
        "-a",
        "--image_path",
        required=False,
        help=
        "image path to directory with all images inside of it. useful for using from command line. e.g. /home/nmorales/MicasenseTest/000. NOTE: a temp folder will be created within this directory"
    )
    ap.add_argument(
        "-b",
        "--file_with_image_paths",
        required=False,
        help=
        "file path to file that has all image file names and temporary file names for each image in it, comma separated and separated by a newline. useful for using from the web interface. e.g. /home/nmorales/myfilewithnames.txt"
    )
    ap.add_argument(
        "-c",
        "--panel_image_path",
        required=False,
        help=
        "image path to directory with all 5 panel images inside of it. useful for using from command line. e.g. /home/nmorales/MicasenseTest/000"
    )
    ap.add_argument(
        "-d",
        "--file_with_panel_image_paths",
        required=False,
        help=
        "file path to file that has all image file names in it, separated by a newline. useful for using from the web interface. e.g. /home/nmorales/myfilewithnames.txt"
    )
    ap.add_argument(
        "-o",
        "--output_path",
        required=True,
        help=
        "output path to directory in which all resulting files will be placed. useful for using from the command line"
    )
    ap.add_argument("-y",
                    "--final_rgb_output_path",
                    required=True,
                    help="output file path for stitched RGB image")
    ap.add_argument("-z",
                    "--final_rnre_output_path",
                    required=True,
                    help="output file path for stitched RNRe image")
    ap.add_argument(
        "-p",
        "--output_path_band1",
        required=True,
        help=
        "output file path in which resulting band 1 will be placed. useful for using from the web interface"
    )
    ap.add_argument(
        "-q",
        "--output_path_band2",
        required=True,
        help=
        "output file path in which resulting band 2 will be placed. useful for using from the web interface"
    )
    ap.add_argument(
        "-r",
        "--output_path_band3",
        required=True,
        help=
        "output file path in which resulting band 3 will be placed. useful for using from the web interface"
    )
    ap.add_argument(
        "-s",
        "--output_path_band4",
        required=True,
        help=
        "output file path in which resulting band 4 will be placed. useful for using from the web interface"
    )
    ap.add_argument(
        "-u",
        "--output_path_band5",
        required=True,
        help=
        "output file path in which resulting band 5 will be placed. useful for using from the web interface"
    )
    ap.add_argument(
        "-n",
        "--number_captures",
        required=False,
        help="When you want to test using only a subset of images.")
    ap.add_argument(
        "-k",
        "--thin_images",
        required=False,
        help=
        "When you have too many images, specify a number of images to skip. e.g. 1 will only use every other image, 2 will use every third image, 3 will use every fourth image."
    )
    ap.add_argument(
        "-w",
        "--work_megapix",
        required=False,
        help="Resolution for image registration step. The default is 0.6 Mpx")
    ap.add_argument(
        "-x",
        "--ba_refine_mask",
        required=False,
        default='xxxxx',
        help=
        "Set refinement mask for bundle adjustment. It looks like 'x_xxx' where 'x' means refine respective parameter and '_' means don't refine one, and has the following format: <fx><skew><ppx><aspect><ppy>. The default mask is 'xxxxx'. If bundle adjustment doesn't support estimation of selected parameter then the respective flag is ignored."
    )
    args = vars(ap.parse_args())

    log_file_path = args["log_file_path"]
    image_path = args["image_path"]
    file_with_image_paths = args["file_with_image_paths"]
    panel_image_path = args["panel_image_path"]
    file_with_panel_image_paths = args["file_with_panel_image_paths"]
    output_path = args["output_path"]
    final_rgb_output_path = args["final_rgb_output_path"]
    final_rnre_output_path = args["final_rnre_output_path"]
    output_path_band1 = args["output_path_band1"]
    output_path_band2 = args["output_path_band2"]
    output_path_band3 = args["output_path_band3"]
    output_path_band4 = args["output_path_band4"]
    output_path_band5 = args["output_path_band5"]
    thin_images = args["thin_images"]
    if thin_images is not None:
        thin_images = int(thin_images)
    number_captures = args["number_captures"]
    if number_captures is not None:
        number_captures = int(number_captures)
    work_megapix = args["work_megapix"]
    ba_refine_mask = args["ba_refine_mask"]

    if sys.version_info[0] < 3:
        raise Exception("Must use Python3. Use python3 in your command line.")

    if log_file_path is not None:
        sys.stderr = open(log_file_path, 'a')

    def eprint(*args, **kwargs):
        print(*args, file=sys.stderr, **kwargs)

    #Must supply either image_path or file_with_image_paths as a source of images
    imageNamesAll = []
    imageTempNames = []
    tempImagePath = None
    if image_path is not None:

        tempImagePath = os.path.join(image_path, 'temp')
        if not os.path.exists(tempImagePath):
            os.makedirs(tempImagePath)

        imageNamesAll = glob.glob(os.path.join(image_path, '*.tif'))
        for idx, val in enumerate(imageNamesAll):
            imageTempNames.append(
                os.path.join(tempImagePath, 'temp' + str(idx) + '.tif'))

    elif file_with_image_paths is not None:
        with open(file_with_image_paths) as fp:
            for line in fp:
                imageName, tempImageName = line.strip().split(",")
                imageNamesAll.append(imageName)
                imageTempNames.append(tempImageName)
    else:
        if log_file_path is not None:
            eprint(
                "No input images given. use image_path OR file_with_image_paths args"
            )
        else:
            print(
                "No input images given. use image_path OR file_with_image_paths args"
            )
        os._exit

    panelBandCorrection = {}
    panelNames = []
    if panel_image_path is not None:
        panelNames = glob.glob(os.path.join(panel_image_path, '*.tif'))
    elif file_with_panel_image_paths is not None:
        with open(file_with_panel_image_paths) as fp:
            for line in fp:
                imageName = line.strip()
                panelNames.append(imageName)
    else:
        if log_file_path is not None:
            eprint(
                "No panel input images given. use panel_image_path OR file_with_panel_image_paths args"
            )
        else:
            print(
                "No panel input images given. use panel_image_path OR file_with_panel_image_paths args"
            )
        #os._exit

    for imageName in panelNames:
        img = Image(imageName)
        band_name = img.band_name
        if img.auto_calibration_image:
            if log_file_path is not None:
                eprint("Found automatic calibration image")
            else:
                print("Found automatic calibration image")
        panel = Panel(img)

        if not panel.panel_detected():
            raise IOError("Panel Not Detected!")

        mean, std, num, sat_count = panel.raw()
        micasense_panel_calibration = panel.reflectance_from_panel_serial()
        radianceToReflectance = micasense_panel_calibration / mean
        panelBandCorrection[band_name] = radianceToReflectance
        if log_file_path is not None:
            eprint("Detected panel serial: {}".format(panel.serial))
            eprint("Extracted Panel Statistics:")
            eprint("Mean: {}".format(mean))
            eprint("Standard Deviation: {}".format(std))
            eprint("Panel Pixel Count: {}".format(num))
            eprint("Saturated Pixel Count: {}".format(sat_count))
            eprint('Panel Calibration: {:1.3f}'.format(
                micasense_panel_calibration))
            eprint('Radiance to reflectance conversion factor: {:1.3f}'.format(
                radianceToReflectance))
        else:
            print("Detected panel serial: {}".format(panel.serial))
            print("Extracted Panel Statistics:")
            print("Mean: {}".format(mean))
            print("Standard Deviation: {}".format(std))
            print("Panel Pixel Count: {}".format(num))
            print("Saturated Pixel Count: {}".format(sat_count))
            print('Panel Calibration: {:1.3f}'.format(
                micasense_panel_calibration))
            print('Radiance to reflectance conversion factor: {:1.3f}'.format(
                radianceToReflectance))

    imageNamesDict = {}
    for i in imageNamesAll:
        s = i.split("_")
        k = s[-1].split(".")
        if s[-2] not in imageNamesDict:
            imageNamesDict[s[-2]] = {}
        imageNamesDict[s[-2]][k[0]] = i

    imageNameCaptures = []
    capture_count = 0
    skip_count = 0
    image_count = 0
    skip_proceed = 1
    num_captures_proceed = 1
    for i in sorted(imageNamesDict.keys()):
        im = []
        if thin_images is not None:
            if image_count > 0 and skip_count < thin_images:
                skip_count = skip_count + 1
                skip_proceed = 0
            else:
                skip_count = 0
                skip_proceed = 1
            image_count = image_count + 1

        if skip_proceed == 1:
            if number_captures is not None:
                if capture_count < number_captures:
                    num_captures_proceed = 1
                else:
                    num_captures_proceed = 0
            if num_captures_proceed == 1:
                for j in sorted(imageNamesDict[i].keys()):
                    imageName = imageNamesDict[i][j]
                    img = Image(imageName)
                    # meta = img.meta
                    # flightImageRaw=plt.imread(imageName)
                    # flightRadianceImage, _, _, _ = msutils.raw_image_to_radiance(meta, flightImageRaw)
                    # flightReflectanceImage = flightRadianceImage * panelBandCorrection[img.band_name]
                    # flightUndistortedReflectance = msutils.correct_lens_distortion(meta, flightReflectanceImage)
                    # calibratedImage = imageNameToCalibratedImageName[imageName]
                    # print(flightUndistortedReflectance.shape)
                    # plt.imsave(calibratedImage, flightUndistortedReflectance, cmap='gray')
                    # calIm = Image(calibratedImage, meta = meta)
                    im.append(img)
                if len(im) > 0:
                    imageNameCaptures.append(im)
                    capture_count = capture_count + 1

    def enhance_image(rgb):
        gaussian_rgb = cv2.GaussianBlur(rgb, (9, 9), 10.0)
        gaussian_rgb[gaussian_rgb < 0] = 0
        gaussian_rgb[gaussian_rgb > 1] = 1
        unsharp_rgb = cv2.addWeighted(rgb, 1.5, gaussian_rgb, -0.5, 0)
        unsharp_rgb[unsharp_rgb < 0] = 0
        unsharp_rgb[unsharp_rgb > 1] = 1

        # Apply a gamma correction to make the render appear closer to what our eyes would see
        gamma = 1.4
        gamma_corr_rgb = unsharp_rgb**(1.0 / gamma)
        return (gamma_corr_rgb)

    captures = []
    # captureGPSDict = {}
    # counter = 0
    for i in imageNameCaptures:
        im = Capture(i)
        captures.append(im)
        # latitudes = []
        # longitudes = []
        # altitudes = []
        # for i,img in enumerate(im.images):
        #     latitudes.append(img.latitude)
        #     longitudes.append(img.longitude)
        #     altitudes.append(img.altitude)
        # captureGPSDict[counter] = [round(statistics.mean(latitudes), 4), round(statistics.mean(longitudes), 4), statistics.mean(altitudes)]
        # counter = counter + 1

    # GPSsorter = {}
    # for counter, loc in captureGPSDict.items():
    #     if loc[0] not in GPSsorter:
    #         GPSsorter[loc[0]] = {}
    #     GPSsorter[loc[0]][loc[1]] = counter

    imageCaptureSets = captures

    img_type = "reflectance"
    match_index = 0  # Index of the band
    max_alignment_iterations = 1000
    warp_mode = cv2.MOTION_HOMOGRAPHY  # MOTION_HOMOGRAPHY or MOTION_AFFINE. For Altum images only use HOMOGRAPHY
    pyramid_levels = None  # for images with RigRelatives, setting this to 0 or 1 may improve alignment

    if log_file_path is not None:
        eprint(img_type)
        eprint(
            "Alinging images. Depending on settings this can take from a few seconds to many minutes"
        )
    else:
        print(img_type)
        print(
            "Alinging images. Depending on settings this can take from a few seconds to many minutes"
        )

    warp_matrices = None
    if tempImagePath is not None:
        if os.path.exists(os.path.join(tempImagePath, 'capturealignment.pkl')):
            with open(os.path.join(tempImagePath, 'capturealignment.pkl'),
                      'rb') as f:
                warp_matrices, alignment_pairs = pickle.load(f)

    if warp_matrices is None:
        warp_matrices, alignment_pairs = imageutils.align_capture(
            captures[0],
            ref_index=match_index,
            max_iterations=max_alignment_iterations,
            warp_mode=warp_mode,
            pyramid_levels=pyramid_levels,
            multithreaded=True)

    if log_file_path is not None:
        eprint("Finished Aligning, warp matrices={}".format(warp_matrices))
    else:
        print("Finished Aligning, warp matrices={}".format(warp_matrices))

    if tempImagePath is not None:
        with open(os.path.join(tempImagePath, 'capturealignment.pkl'),
                  'wb') as f:
            pickle.dump([warp_matrices, alignment_pairs], f)

    images_to_stitch1 = []
    images_to_stitch2 = []
    count = 0
    for x in imageCaptureSets:
        cropped_dimensions, edges = imageutils.find_crop_bounds(
            x, warp_matrices, warp_mode=warp_mode)
        im_aligned = imageutils.aligned_capture(x,
                                                warp_matrices,
                                                warp_mode,
                                                cropped_dimensions,
                                                match_index,
                                                img_type=img_type)
        if log_file_path is not None:
            eprint(im_aligned.shape)
        else:
            print(im_aligned.shape)

        i1 = im_aligned[:, :, [0, 1, 2]]
        i1 = enhance_image(i1)
        image1 = np.uint8(i1 * 255)
        cv2.imwrite(imageTempNames[count], image1)
        images_to_stitch1.append(imageTempNames[count])
        count = count + 1

        i2 = im_aligned[:, :, [2, 3, 4]]
        i2 = enhance_image(i2)
        image2 = np.uint8(i2 * 255)
        cv2.imwrite(imageTempNames[count], image2)
        images_to_stitch2.append(imageTempNames[count])
        count = count + 1

        del cropped_dimensions
        del edges
        del im_aligned
        del i1
        del i2
        del image1
        del image2

    sep = " "
    images_string1 = sep.join(images_to_stitch1)
    images_string2 = sep.join(images_to_stitch2)
    num_images = len(images_to_stitch1)

    del imageNamesAll
    del imageTempNames
    del imageNamesDict
    del panelNames
    del imageNameCaptures
    del imageCaptureSets
    del images_to_stitch1
    del images_to_stitch2

    log_file_path_string = ''
    if log_file_path is not None:
        log_file_path_string = " --log_file '" + log_file_path + "'"
    stitchCmd = "stitching_multi " + images_string1 + " " + images_string2 + " --num_images " + str(
        num_images
    ) + " --result1 '" + final_rgb_output_path + "' --result2 '" + final_rnre_output_path + "' " + log_file_path_string
    # stitchCmd = "stitching_multi "+images_string1+" "+images_string2+" --num_images "+str(num_images)+" --result1 '"+final_rgb_output_path+"' --result2 '"+final_rnre_output_path+"' --log_file "+log_file_path+" --work_megapix "+work_megapix+" --ba_refine_mask "+ba_refine_mask
    # stitchCmd = "stitching_multi "+images_string1+" "+images_string2+" --num_images "+str(len(images_to_stitch1))+" --result1 '"+final_rgb_output_path+"' --result2 '"+final_rnre_output_path+"' --try_cuda yes --log_file "+log_file_path+" --work_megapix "+work_megapix
    if log_file_path is not None:
        eprint(stitchCmd)
        eprint(len(stitchCmd))
    else:
        print(stitchCmd)
        print(len(stitchCmd))
    os.system(stitchCmd)

    final_result_img1 = cv2.imread(final_rgb_output_path, cv2.IMREAD_UNCHANGED)
    final_result_img2 = cv2.imread(final_rnre_output_path,
                                   cv2.IMREAD_UNCHANGED)
    final_result_img1 = enhance_image(final_result_img1 / 255)
    final_result_img2 = enhance_image(final_result_img2 / 255)

    plt.imsave(final_rgb_output_path, final_result_img1)
    plt.imsave(final_rnre_output_path, final_result_img2)

    plt.imsave(output_path_band1, final_result_img1[:, :, 0], cmap='gray')
    plt.imsave(output_path_band2, final_result_img1[:, :, 1], cmap='gray')
    plt.imsave(output_path_band3, final_result_img1[:, :, 2], cmap='gray')
    plt.imsave(output_path_band4, final_result_img2[:, :, 1], cmap='gray')
    plt.imsave(output_path_band5, final_result_img2[:, :, 2], cmap='gray')
示例#10
0
#
# It's also good to use an image for alignment which is taken near the same level above ground as the rest of the flights. Above approximately 35m AGL, the alignement will be consistent. However, if images taken closer to the ground are used, such as panel images, the same alignment transformation will not work for the flight data.

# In[ ]:

import cv2
import numpy as np
import matplotlib.pyplot as plt
import multiprocessing
import micasense.imageutils as imageutils

print(
    "Alinging images. Depending on settings this can take from a few seconds to many minutes"
)
# Increase max_iterations to 1000+ for better results, but much longer runtimes
warp_matrices, alignment_pairs = imageutils.align_capture(capture,
                                                          max_iterations=100)

print("Finished Aligning, warp matrices:")
for i, mat in enumerate(warp_matrices):
    print("Band {}:\n{}".format(i, mat))

# ## Crop Aligned Images
# After finding image alignments we may need to remove pixels around the edges which aren't present in every image in the capture.  To do this we use the affine transforms found above and the image distortions from the image metadata.  OpenCV provides a couple of handy helpers for this task in the  `cv2.undistortPoints()` and `cv2.transform()` methods.  These methods takes a set of pixel coordinates and apply our undistortion matrix and our affine transform, respectively.  So, just as we did when registering the images, we first apply the undistortion process the coordinates of the image borders, then we apply the affine transformation to that result. The resulting pixel coordinates tell us where the image borders end up after this pair of transformations, and we can then crop the resultant image to these coordinates.

# In[ ]:

dist_coeffs = []
cam_mats = []
# create lists of the distortion coefficients and camera matricies
for i, img in enumerate(capture.images):
    dist_coeffs.append(img.cv2_distortion_coeff())
def stackImages(FILE, imageNames, panelNames=None):
    import os, glob
    import micasense.capture as capture
    import cv2
    import numpy as np
    import matplotlib.pyplot as plt
    import micasense.imageutils as imageutils
    import micasense.plotutils as plotutils
    # Allow this code to align both radiance and reflectance images; bu excluding
    # a definition for panelNames above, radiance images will be used
    # For panel images, efforts will be made to automatically extract the panel information
    # but if the panel/firmware is before Altum 1.3.5, RedEdge 5.1.7 the panel reflectance
    # will need to be set in the panel_reflectance_by_band variable.
    # Note: radiance images will not be used to properly create NDVI/NDRE images below.
    if panelNames is not None:
        panelCap = capture.Capture.from_filelist(panelNames)
    else:
        panelCap = None

    capture = capture.Capture.from_filelist(imageNames)

    if panelCap is not None:
        if panelCap.panel_albedo() is not None:
            panel_reflectance_by_band = panelCap.panel_albedo()
        else:
            panel_reflectance_by_band = [0.67, 0.69, 0.68, 0.61,
                                         0.67]  #RedEdge band_index order
        panel_irradiance = panelCap.panel_irradiance(panel_reflectance_by_band)
        img_type = "reflectance"
        capture.plot_undistorted_reflectance(panel_irradiance)
    else:
        if capture.dls_present():
            img_type = 'reflectance'
            #capture.plot_undistorted_reflectance(capture.dls_irradiance())
        else:
            img_type = "radiance"
            #capture.plot_undistorted_radiance()

    ## Alignment settings
    match_index = 1  # Index of the band
    max_alignment_iterations = 10
    warp_mode = cv2.MOTION_HOMOGRAPHY  # MOTION_HOMOGRAPHY or MOTION_AFFINE. For Altum images only use HOMOGRAPHY
    pyramid_levels = 0  # for images with RigRelatives, setting this to 0 or 1 may improve alignment

    print(
        "Aligning images. Depending on settings this can take from a few seconds to many minutes"
    )
    # Can potentially increase max_iterations for better results, but longer runtimes
    warp_matrices, alignment_pairs = imageutils.align_capture(
        capture,
        ref_index=match_index,
        max_iterations=max_alignment_iterations,
        warp_mode=warp_mode,
        pyramid_levels=pyramid_levels)
    if warp_matrices == -1:
        return -1
    print("Finished Aligning, warp matrices={}".format(warp_matrices))

    cropped_dimensions, edges = imageutils.find_crop_bounds(
        capture, warp_matrices, warp_mode=warp_mode)
    im_aligned = imageutils.aligned_capture(capture,
                                            warp_matrices,
                                            warp_mode,
                                            cropped_dimensions,
                                            match_index,
                                            img_type=img_type)

    # Create a normalized stack for viewing
    im_display = np.zeros(
        (im_aligned.shape[0], im_aligned.shape[1], im_aligned.shape[2]),
        dtype=np.float32)

    from osgeo import gdal, gdal_array
    rows, cols, bands = im_display.shape
    driver = gdal.GetDriverByName('GTiff')
    filename = FILE + "stacked"  #blue,green,red,nir,redEdge
    filename = os.path.join(directory, filename)
    outRaster = driver.Create(filename + ".tiff", cols, rows,
                              im_aligned.shape[2], gdal.GDT_UInt16)

    normalize = (img_type == 'radiance'
                 )  # normalize radiance images to fit with in UInt16

    # Output a 'stack' in the same band order as RedEdge/Alutm
    # Blue,Green,Red,NIR,RedEdge[,Thermal]
    # reflectance stacks are output with 32768=100% reflectance to provide some overhead for specular reflections
    # radiance stacks are output with 65535=100% radiance to provide some overhead for specular reflections

    # NOTE: NIR and RedEdge are not in wavelength order!

    multispec_min = np.min(im_aligned[:, :, 1:5])
    multispec_max = np.max(im_aligned[:, :, 1:5])

    for i in range(0, 5):
        outband = outRaster.GetRasterBand(i + 1)
        if normalize:
            outdata = imageutils.normalize(im_aligned[:, :, i], multispec_min,
                                           multispec_max)
        else:
            outdata = im_aligned[:, :, i]
            outdata[outdata < 0] = 0
            outdata[outdata > 2] = 2

        outdata = outdata * 32767
        outdata[outdata < 0] = 0
        outdata[outdata > 65535] = 65535
        outband.WriteArray(outdata)
        outband.FlushCache()

    if im_aligned.shape[2] == 6:
        outband = outRaster.GetRasterBand(6)
        outdata = im_aligned[:, :,
                             5] * 100  # scale to centi-C to fit into uint16
        outdata[outdata < 0] = 0
        outdata[outdata > 65535] = 65535
        outband.WriteArray(outdata)
        outband.FlushCache()
    outRaster = None
    return 1