コード例 #1
0
def align_capture(capture, warp_mode, img_type):
    ## Alignment settings
    match_index = 1  # Index of the band
    max_alignment_iterations = 50
    #    warp_mode = cv2.MOTION_HOMOGRAPHY # MOTION_HOMOGRAPHY or MOTION_AFFINE. For Altum images only use HOMOGRAPHY
    #    warp_mode = cv2.MOTION_TRANSLATION
    pyramid_levels = 0  # for images with RigRelatives, setting this to 0 or 1 may improve alignment
    epsilon_threshold = 1e-10
    print(
        "Alinging images. Depending on settings this can take from a few seconds to many minutes"
    )
    # Can potentially increase max_iterations for better results, but longer runtimes
    warp_matrices, alignment_pairs = imageutils.align_capture(
        capture,
        ref_index=match_index,
        max_iterations=max_alignment_iterations,
        #                                                              multithreaded=False,
        #                                                              debug = True,
        epsilon_threshold=epsilon_threshold,
        warp_mode=warp_mode,
        pyramid_levels=pyramid_levels)

    print("Finished Aligning, warp matrices={}".format(warp_matrices))

    cropped_dimensions, edges = imageutils.find_crop_bounds(
        capture, warp_matrices, warp_mode=warp_mode)
    im_aligned = imageutils.aligned_capture(capture,
                                            warp_matrices,
                                            warp_mode,
                                            cropped_dimensions,
                                            match_index,
                                            img_type=img_type)

    return im_aligned, warp_matrices
コード例 #2
0
ファイル: capture.py プロジェクト: micasense/imageprocessing
 def create_aligned_capture(self, irradiance_list=None, warp_matrices=None, normalize=False, img_type=None,
                            motion_type=cv2.MOTION_HOMOGRAPHY):
     """
     Creates aligned Capture. Computes undistorted radiance or reflectance images if necessary.
     :param irradiance_list: List of mean panel region irradiance.
     :param warp_matrices: 2d List of warp matrices derived from Capture.get_warp_matrices()
     :param normalize: FIXME: This parameter isn't used?
     :param img_type: str 'radiance' or 'reflectance' depending on image metadata.
     :param motion_type: OpenCV import. Also know as warp_mode. MOTION_HOMOGRAPHY or MOTION_AFFINE.
                         For Altum images only use HOMOGRAPHY.
     :return: ndarray with alignment changes
     """
     if img_type is None and irradiance_list is None and self.dls_irradiance() is None:
         self.compute_undistorted_radiance()
         img_type = 'radiance'
     elif img_type is None:
         if irradiance_list is None:
             irradiance_list = self.dls_irradiance() + [0]
         self.compute_undistorted_reflectance(irradiance_list)
         img_type = 'reflectance'
     if warp_matrices is None:
         warp_matrices = self.get_warp_matrices()
     cropped_dimensions, _ = imageutils.find_crop_bounds(self, warp_matrices, warp_mode=motion_type)
     self.__aligned_capture = imageutils.aligned_capture(self,
                                                         warp_matrices,
                                                         motion_type,
                                                         cropped_dimensions,
                                                         None,
                                                         img_type=img_type)
     return self.__aligned_capture
コード例 #3
0
def proc_imgs(i, warp_matrices, bndFolders, panel_irradiance, normalize=None):
    
    
#    for i in imgset.captures: 
    
    i.compute_reflectance(panel_irradiance) 
    #i.plot_undistorted_reflectance(panel_irradiance)  


    cropped_dimensions, edges = imageutils.find_crop_bounds(i, warp_matrices)
    
    im_aligned = imageutils.aligned_capture(i, warp_matrices,
                                            cv2.MOTION_HOMOGRAPHY,
                                            cropped_dimensions,
                                            None, img_type="reflectance")
    
    im_display = np.zeros((im_aligned.shape[0],im_aligned.shape[1],5), dtype=np.float32 )
    
    for iM in range(0,im_aligned.shape[2]):
        im_display[:,:,iM] =  imageutils.normalize(im_aligned[:,:,iM])*65535
    
    for k in range(0,im_display.shape[2]):
         im = i.images[k]
         hd, nm = os.path.split(im.path)
         outdata = im_aligned[:,:,i]
         outdata[outdata<0] = 0
         outdata[outdata>1] = 1
         
         outfile = os.path.join(bndFolders[k], nm)
         imageio.imwrite(outfile, outdata)
        
         cmd = ["exiftool", "-tagsFromFile", im.path,  "-file:all", "-iptc:all",
               "-exif:all",  "-xmp", "-Composite:all", outfile, 
               "-overwrite_original"]
         call(cmd)
コード例 #4
0
def proc_stack(i, warp_matrices, bndFolders, panel_irradiance):
    
    i.compute_reflectance(panel_irradiance) 
        #i.plot_undistorted_reflectance(panel_irradiance)  
    
    
    cropped_dimensions, edges = imageutils.find_crop_bounds(i, warp_matrices)
    
    im_aligned = imageutils.aligned_capture(i, warp_matrices,
                                            cv2.MOTION_HOMOGRAPHY,
                                            cropped_dimensions,
                                            None, img_type="reflectance")
    
    im_display = np.zeros((im_aligned.shape[0],im_aligned.shape[1],5), 
                          dtype=np.float32)
    
    rows, cols, bands = im_display.shape
    driver = gdal.GetDriverByName('GTiff')
    
    im = i.images[1].path
    hd, nm = os.path.split(im[:-6])

    filename = os.path.join(reflFolder, nm+'.tif') #blue,green,red,nir,redEdge
    #
    
    outRaster = driver.Create(filename, cols, rows, 5, gdal.GDT_Byte)
    normalize = False
    
    # Output a 'stack' in the same band order as RedEdge/Alutm
    # Blue,Green,Red,NIR,RedEdge[,Thermal]
    
    # NOTE: NIR and RedEdge are not in wavelength order!
    
    i.compute_reflectance(panel_irradiance+[0])
    
    for i in range(0,5):
        outband = outRaster.GetRasterBand(i+1)
        if normalize:
            outband.WriteArray(imageutils.normalize(im_aligned[:,:,i])*65535)
        else:
            outdata = im_aligned[:,:,i]
            outdata[outdata<0] = 0
            outdata[outdata>1] = 1
            
            outband.WriteArray(outdata*65535)
        outband.FlushCache()
    
    if im_aligned.shape[2] == 6:
        outband = outRaster.GetRasterBand(6)
        outdata = im_aligned[:,:,5] * 100 # scale to centi-C to fit into uint16
        outdata[outdata<0] = 0
        outdata[outdata>65535] = 65535
        outband.WriteArray(outdata)
        outband.FlushCache()
    outRaster = None
    
    cmd = ["exiftool", "-tagsFromFile", im,  "-file:all", "-iptc:all",
               "-exif:all",  "-xmp", "-Composite:all", filename, 
               "-overwrite_original"]
    call(cmd)
コード例 #5
0
ファイル: capture.py プロジェクト: gdslab/p4m
 def create_aligned_capture(self,
                            irradiance_list=None,
                            warp_matrices=None,
                            normalize=False,
                            img_type=None,
                            motion_type=cv2.MOTION_HOMOGRAPHY):
     if img_type is None and irradiance_list is None and self.dls_irradiance(
     ) is None:
         self.compute_undistorted_radiance()
         img_type = 'radiance'
     elif img_type is None:
         if irradiance_list is None:
             irradiance_list = self.dls_irradiance() + [0]
         self.compute_undistorted_reflectance(irradiance_list)
         img_type = 'reflectance'
     if warp_matrices is None:
         warp_matrices = self.get_warp_matrices()
     cropped_dimensions, _ = imageutils.find_crop_bounds(
         self, warp_matrices, warp_mode=motion_type)
     self.__aligned_capture = imageutils.aligned_capture(self,
                                                         warp_matrices,
                                                         motion_type,
                                                         cropped_dimensions,
                                                         None,
                                                         img_type=img_type)
     return self.__aligned_capture
コード例 #6
0
def main(imagePath, warp_matrices, alignment_pairs, panel_irradiance):
    import micasense.capture as capture
    
    for i in range(0,201):
            imageRoot = "IMG_%04i" % i
            outputRoot = imagePath.replace('Imagery\\','').replace('\\','_') + imageRoot
            imageSet = os.path.join(imagePath,imageRoot+'_*.tif')re
            imageNames = glob.glob(imageSet)
            if not len(imageNames): continue #skip
        # try:
            imgCap = capture.Capture.from_filelist(imageNames)
            imgCap.compute_reflectance(panel_irradiance)
            
            dist_coeffs = []
            cam_mats = []
            for i,img in enumerate(imgCap.images):
                dist_coeffs.append(img.cv2_distortion_coeff())
                cam_mats.append(img.cv2_camera_matrix())

            cropped_dimensions = imageutils.find_crop_bounds(imgCap.images[0].size(), warp_matrices, dist_coeffs, cam_mats)
            im_aligned = imageutils.aligned_capture(warp_matrices, alignment_pairs, cropped_dimensions)
            im_display = np.zeros((im_aligned.shape[0],im_aligned.shape[1],5), dtype=np.float32 )

            for i in range(0,im_aligned.shape[2]):
                im_display[:,:,i] =  imageutils.normalize(im_aligned[:,:,i])

            rgb = im_display[:,:,[2,1,0]]
            cir = im_display[:,:,[3,2,1]]

            gaussian_rgb = cv2.GaussianBlur(rgb, (9,9), 10.0)
            gaussian_rgb[gaussian_rgb<0] = 0
            gaussian_rgb[gaussian_rgb>1] = 1
            unsharp_rgb = cv2.addWeighted(rgb, 1.5, gaussian_rgb, -0.5, 0)
            unsharp_rgb[unsharp_rgb<0] = 0
            unsharp_rgb[unsharp_rgb>1] = 1
            gamma = 1.4
            gamma_corr_rgb = unsharp_rgb**(1.0/gamma)

            # Output
            imtype = '.jpg' # or 'jpg'
            imageio.imwrite(os.path.join('.','Output','rgb',outputRoot+imtype), (255*gamma_corr_rgb).astype('uint8'))
            imageio.imwrite(os.path.join('.','Output','cir',outputRoot+imtype), (255*cir).astype('uint8'))

            from osgeo import gdal, gdal_array
            rows, cols, bands = im_display.shape
            driver = gdal.GetDriverByName('GTiff')
            outRaster = driver.Create(os.path.join('.','Output','5band',outputRoot+".tiff"), cols, rows, bands, gdal.GDT_Float32)

            for i in range(0,bands):
                outband = outRaster.GetRasterBand(i+1)
                outband.WriteArray(im_aligned[:,:,i])
                outband.FlushCache()

            outRaster = None
            im_aligned = None
            print ("processed " + imageSet)
コード例 #7
0
def AllignImage(mat, images):
    if images.dls_present():
        img_type='reflectance'
    else:
        img_type = "radiance"
    warp_mode = cv2.MOTION_HOMOGRAPHY
    match_index = 4
    cropped_dimensions, _ = imageutils.find_crop_bounds(images, mat, warp_mode=warp_mode)
    im_aligned = imageutils.aligned_capture(images, mat, warp_mode, cropped_dimensions, match_index, img_type=img_type)
    return im_aligned
コード例 #8
0
ファイル: capture.py プロジェクト: petebunting/Sfm
    def save_capture_as_reflectance_stack(self,
                                          outfilename,
                                          irradiance_list=None,
                                          warp_matrices=None,
                                          normalize=False):
        from osgeo.gdal import GetDriverByName, GDT_UInt16
        self.compute_reflectance(irradiance_list)
        if warp_matrices is None:
            warp_matrices = self.get_warp_matrices()
        cropped_dimensions, edges = imageutils.find_crop_bounds(
            self, warp_matrices)
        im_aligned = imageutils.aligned_capture(self,
                                                warp_matrices,
                                                cv2.MOTION_HOMOGRAPHY,
                                                cropped_dimensions,
                                                None,
                                                img_type="reflectance")

        rows, cols, bands = im_aligned.shape
        driver = GetDriverByName('GTiff')
        outRaster = driver.Create(outfilename, cols, rows, bands, GDT_UInt16)
        if outRaster is None:
            raise IOError("could not load gdal GeoTiff driver")
        for i in range(0, 5):
            outband = outRaster.GetRasterBand(i + 1)
            outdata = im_aligned[:, :, i]
            outdata[outdata < 0] = 0
            outdata[outdata > 1] = 1
            outband.WriteArray(outdata * 32768)
            outband.FlushCache()

        if bands == 6:
            outband = outRaster.GetRasterBand(6)
            outdata = (
                im_aligned[:, :, 5] + 273.15
            ) * 100  # scale data from float degC to back to centi-Kelvin to fit into uint16
            outdata[outdata < 0] = 0
            outdata[outdata > 65535] = 65535
            outband.WriteArray(outdata)
            outband.FlushCache()
        outRaster = None
コード例 #9
0
def proc_imgs_comp(i, warp_matrices, bndFolders, panel_irradiance):
    
    
    
    i.compute_reflectance(panel_irradiance) 
    #i.plot_undistorted_reflectance(panel_irradiance)  


    cropped_dimensions, edges = imageutils.find_crop_bounds(i, warp_matrices)
    
    im_aligned = imageutils.aligned_capture(i, warp_matrices,
                                            cv2.MOTION_HOMOGRAPHY,
                                            cropped_dimensions,
                                            None, img_type="reflectance")
    
    im_display = np.zeros((im_aligned.shape[0],im_aligned.shape[1],5), dtype=np.float32 )
    
    for iM in range(0,im_aligned.shape[2]):
        im_display[:,:,iM] =  imageutils.normalize(im_aligned[:,:,iM])
    
    rgb = im_display[:,:,[2,1,0]] 
    #cir = im_display[:,:,[3,2,1]] 
    RRENir = im_display[:,:,[4,3,2]] 
    
    imoot = [rgb, RRENir]
    imtags = ["RGB.tif", "RRENir.tif"]
    im = i.images[1]
    hd, nm = os.path.split(im.path[:-5])
    
    for ind, k in enumerate(bndFolders):
         
         img8 = bytescale(imoot[ind])
         
         outfile = os.path.join(k, nm+imtags[ind])
         
         imageio.imwrite(outfile, img8)
        
         cmd = ["exiftool", "-tagsFromFile", im.path,  "-file:all", "-iptc:all",
               "-exif:all",  "-xmp", "-Composite:all", outfile, 
               "-overwrite_original"]
         call(cmd)
コード例 #10
0
def test_cropping(non_panel_altum_capture):
    warp_matrices = non_panel_altum_capture.get_warp_matrices()
    cropped_dimensions, _ = imageutils.find_crop_bounds(
        non_panel_altum_capture, warp_matrices)
    assert (cropped_dimensions == pytest.approx(expected_dimensions, abs=1))
コード例 #11
0
ファイル: test_function.py プロジェクト: robert827/data
def correction(image_path, image_name, panel_path, panel_name):
    import micasense.capture as capture
    get_ipython().run_line_magic('load_ext', 'autoreload')
    get_ipython().run_line_magic('autoreload', '2')
    # 輸入照片位置
    get_ipython().run_line_magic('matplotlib', 'inline')

    panelNames = None

    # This is an altum image with RigRelatives and a thermal band
    imageNames = glob.glob(os.path.join(image_path, image_name + '_*.tif'))
    panelNames = glob.glob(os.path.join(panel_path, panel_name + '_*.tif'))

    if panelNames is not None:
        panelCap = capture.Capture.from_filelist(panelNames)
    else:
        panelCap = None

    capture = capture.Capture.from_filelist(imageNames)

    for img in capture.images:
        if img.rig_relatives is None:
            raise ValueError(
                "Images must have RigRelatives tags set which requires updated firmware and calibration. See the links in text above"
            )

    if panelCap is not None:
        if panelCap.panel_albedo() is not None:
            panel_reflectance_by_band = panelCap.panel_albedo()
        else:
            # RedEdge band_index order
            panel_reflectance_by_band = [0.67, 0.69, 0.68, 0.61, 0.67]
        panel_irradiance = panelCap.panel_irradiance(panel_reflectance_by_band)
        img_type = "reflectance"
        capture.plot_undistorted_reflectance(panel_irradiance)
    else:
        if False:  # capture.dls_present():
            img_type = 'reflectance'
            capture.plot_undistorted_reflectance(capture.dls_irradiance())
        else:
            img_type = "radiance"
            capture.plot_undistorted_radiance()

    # 相片對齊
    warp_mode = cv2.MOTION_HOMOGRAPHY
    warp_matrices = capture.get_warp_matrices()

    cropped_dimensions, edges = imageutils.find_crop_bounds(
        capture, warp_matrices)
    im_aligned = imageutils.aligned_capture(capture,
                                            warp_matrices,
                                            warp_mode,
                                            cropped_dimensions,
                                            None,
                                            img_type=img_type)

    print("warp_matrices={}".format(warp_matrices))

    rgb_band_indices = [2, 1, 0]

    # Create an empty normalized stack for viewing
    im_display = np.zeros(
        (im_aligned.shape[0], im_aligned.shape[1], capture.num_bands + 1),
        dtype=np.float32)

    # modify with these percentilse to adjust contrast
    im_min = np.percentile(im_aligned[:, :, 0:2].flatten(), 0.1)
    # for many images, 0.5 and 99.5 are good values
    im_max = np.percentile(im_aligned[:, :, 0:2].flatten(), 99.9)

    for i in range(0, im_aligned.shape[2]):
        if img_type == 'reflectance':
            # for reflectance images we maintain white-balance by applying the same display scaling to all bands
            im_display[:, :, i] = imageutils.normalize(im_aligned[:, :, i],
                                                       im_min, im_max)
        elif img_type == 'radiance':
            # for radiance images we do an auto white balance since we don't know the input light spectrum by
            # stretching each display band histogram to it's own min and max
            im_display[:, :, i] = imageutils.normalize(im_aligned[:, :, i])

    rgb = im_display[:, :, rgb_band_indices]

    # 合成出rgb圖
    # Create an enhanced version of the RGB render using an unsharp mask
    gaussian_rgb = cv2.GaussianBlur(rgb, (9, 9), 10.0)
    gaussian_rgb[gaussian_rgb < 0] = 0
    gaussian_rgb[gaussian_rgb > 1] = 1
    unsharp_rgb = cv2.addWeighted(rgb, 1.5, gaussian_rgb, -0.5, 0)
    unsharp_rgb[unsharp_rgb < 0] = 0
    unsharp_rgb[unsharp_rgb > 1] = 1

    # Apply a gamma correction to make the render appear closer to what our eyes would see
    gamma = 1.2
    gamma_corr_rgb = unsharp_rgb**(1.0 / gamma)
    plt.imshow(gamma_corr_rgb, aspect='equal')
    plt.axis('off')
    plt.show()
    # 匯出rgb圖
    imtype = 'png'  # or 'jpg'
    imageio.imwrite(image_name + '_rgb.' + imtype,
                    (255 * gamma_corr_rgb).astype('uint8'))

    blue_band = capture.band_names_lower().index('blue')
    green_band = capture.band_names_lower().index('green')
    red_band = capture.band_names_lower().index('red')
    nir_band = capture.band_names_lower().index('nir')
    rededge_band = capture.band_names_lower().index('red edge')

    blue = im_aligned[:, :, blue_band]
    blue = blue.astype('float64')
    np.save(image_name + '_blue', blue)

    green = im_aligned[:, :, green_band]
    green = green.astype('float64')
    np.save(image_name + '_green', green)

    red = im_aligned[:, :, red_band]
    red = red.astype('float64')
    np.save(image_name + '_red', red)

    rededge = im_aligned[:, :, rededge_band]
    red = rededge.astype('float64')
    np.save(image_name + '_rededge', rededge)

    nir = im_aligned[:, :, nir_band]
    nir = nir.astype('float64')
    np.save(image_name + '_nir', nir)
    return print("all bands npy file save")
    get_ipython().run_line_magic('load_ext', 'autoreload')
    get_ipython().run_line_magic('autoreload', '2')
    # 輸入照片位置
    get_ipython().run_line_magic('matplotlib', 'inline')

    panelNames = None

    # This is an altum image with RigRelatives and a thermal band
    imagePath = os.path.join('.', 'data')
    imageNames = glob.glob(os.path.join(imagePath, 'IMG_0134_*.tif'))
    panelNames = glob.glob(os.path.join(imagePath, 'IMG_0003_*.tif'))

    if panelNames is not None:
        panelCap = capture.Capture.from_filelist(panelNames)
    else:
        panelCap = None

    capture = capture.Capture.from_filelist(imageNames)

    for img in capture.images:
        if img.rig_relatives is None:
            raise ValueError(
                "Images must have RigRelatives tags set which requires updated firmware and calibration. See the links in text above"
            )

    if panelCap is not None:
        if panelCap.panel_albedo() is not None:
            panel_reflectance_by_band = panelCap.panel_albedo()
        else:
            # RedEdge band_index order
            panel_reflectance_by_band = [0.67, 0.69, 0.68, 0.61, 0.67]
        panel_irradiance = panelCap.panel_irradiance(panel_reflectance_by_band)
        img_type = "reflectance"
        capture.plot_undistorted_reflectance(panel_irradiance)
    else:
        if False:  # capture.dls_present():
            img_type = 'reflectance'
            capture.plot_undistorted_reflectance(capture.dls_irradiance())
        else:
            img_type = "radiance"
            capture.plot_undistorted_radiance()

    # 相片對齊
    warp_mode = cv2.MOTION_HOMOGRAPHY
    warp_matrices = capture.get_warp_matrices()

    cropped_dimensions, edges = imageutils.find_crop_bounds(
        capture, warp_matrices)
    im_aligned = imageutils.aligned_capture(capture,
                                            warp_matrices,
                                            warp_mode,
                                            cropped_dimensions,
                                            None,
                                            img_type=img_type)

    print("warp_matrices={}".format(warp_matrices))

    rgb_band_indices = [2, 1, 0]

    # Create an empty normalized stack for viewing
    im_display = np.zeros(
        (im_aligned.shape[0], im_aligned.shape[1], capture.num_bands + 1),
        dtype=np.float32)

    # modify with these percentilse to adjust contrast
    im_min = np.percentile(im_aligned[:, :, 0:2].flatten(), 0.1)
    # for many images, 0.5 and 99.5 are good values
    im_max = np.percentile(im_aligned[:, :, 0:2].flatten(), 99.9)

    for i in range(0, im_aligned.shape[2]):
        if img_type == 'reflectance':
            # for reflectance images we maintain white-balance by applying the same display scaling to all bands
            im_display[:, :, i] = imageutils.normalize(im_aligned[:, :, i],
                                                       im_min, im_max)
        elif img_type == 'radiance':
            # for radiance images we do an auto white balance since we don't know the input light spectrum by
            # stretching each display band histogram to it's own min and max
            im_display[:, :, i] = imageutils.normalize(im_aligned[:, :, i])

    rgb = im_display[:, :, rgb_band_indices]

    # 合成出rgb圖
    # Create an enhanced version of the RGB render using an unsharp mask
    gaussian_rgb = cv2.GaussianBlur(rgb, (9, 9), 10.0)
    gaussian_rgb[gaussian_rgb < 0] = 0
    gaussian_rgb[gaussian_rgb > 1] = 1
    unsharp_rgb = cv2.addWeighted(rgb, 1.5, gaussian_rgb, -0.5, 0)
    unsharp_rgb[unsharp_rgb < 0] = 0
    unsharp_rgb[unsharp_rgb > 1] = 1

    # Apply a gamma correction to make the render appear closer to what our eyes would see
    gamma = 1.2
    gamma_corr_rgb = unsharp_rgb**(1.0 / gamma)
    plt.imshow(gamma_corr_rgb, aspect='equal')
    plt.axis('off')
    plt.show()
    # 匯出rgb圖
    imtype = 'png'  # or 'jpg'
    imageio.imwrite('rgb.' + imtype, (255 * gamma_corr_rgb).astype('uint8'))

    blue_band = capture.band_names_lower().index('blue')
    green_band = capture.band_names_lower().index('green')
    red_band = capture.band_names_lower().index('red')
    nir_band = capture.band_names_lower().index('nir')
    rededge_band = capture.band_names_lower().index('red edge')

    blue = im_aligned[:, :, blue_band]
    blue = blue.astype('float64')
    np.save('blue', blue)

    green = im_aligned[:, :, green_band]
    green = green.astype('float64')
    np.save('green', green)

    red = im_aligned[:, :, red_band]
    red = red.astype('float64')
    np.save('red', red)

    rededge = im_aligned[:, :, rededge_band]
    red = rededge.astype('float64')
    np.save('rededge', rededge)

    nir = im_aligned[:, :, nir_band]
    nir = nir.astype('float64')
    np.save('nir', nir)
コード例 #12
0
    raise IOError(
        'Error: could not find a proper alignment matrix for this altitue!')

# In[]
''' ================= convert to REFLECTANCE and STACK together=================  '''

imlist = imageset.ImageSet.from_directory(image_path)

# -------------- convert the imagelist to Panda data frame --------------------
data, columns = imlist.as_nested_lists()
df = pd.DataFrame.from_records(data, index='capture_id', columns=columns)

for cap in imlist.captures:

    cropped_dimensions, _ = imageutils.find_crop_bounds(cap,
                                                        warp_matrices,
                                                        warp_mode=warp_mode)

    stacked_reflectance = imageutils.my_alignment(
        cap,
        warp_matrices,
        warp_mode,
        cropped_dimensions,
        irradiance,
        interpolation_mode=cv2.INTER_LANCZOS4)
    panel_cropped_bands = stacked_reflectance[top_l[1]:bottom_r[1],
                                              top_l[0]:bottom_r[0], :]
    np.mean(panel_cropped_bands, (0, 1))
    '''save the stacked'''
    image_name_blue = cap.images[0].meta.get_item("File:FileName")
    image_name = image_name_blue[0:-6]  # remove '_1.tif'
コード例 #13
0
def proc_imgs_comp(i, warp_matrices, bndFolders, panel_irradiance, rf,
                   warp_md):

    i.compute_reflectance(panel_irradiance)
    #i.plot_undistorted_reflectance(panel_irradiance)

    cropped_dimensions, edges = imageutils.find_crop_bounds(i, warp_matrices)

    im_aligned = imageutils.aligned_capture(i,
                                            warp_matrices,
                                            warp_md,
                                            cropped_dimensions,
                                            match_index=rf,
                                            img_type="reflectance")

    im_display = np.zeros((im_aligned.shape[0], im_aligned.shape[1], 5),
                          dtype=np.float32)

    for iM in range(0, im_aligned.shape[2]):
        im_display[:, :,
                   iM] = imageutils.normalize(im_aligned[:, :, iM]) * 32768

    rgb = im_display[:, :, [2, 1, 0]]
    #cir = im_display[:,:,[3,2,1]]
    RRENir = im_display[:, :, [4, 3, 2]]

    #    cir = im_display[:,:,[3,2,1]]
    #
    #    grRE = im_display[:,:,[4,2,1]]
    #
    #    imoot = [rgb, RRENir]

    del im_display

    imtags = ["RGB.tif", "RRENir.tif"]  #, "GRNir.tif", "GRRE.tif"]
    im = i.images[1]
    hd, nm = os.path.split(im.path[:-5])

    #cmdList = []

    def _writeim(image, folder, nametag, im):

        #for ind, k in enumerate(bndFolders):
        #img8 = bytescale(imoot[ind])
        #imgre = exposure.rescale_intensity(image,  out_range='uint16')

        #with warnings.catch_warnings():
        #warnings.simplefilter("ignore")
        img16 = np.uint16(np.round(image, decimals=0))
        del image
        outFile = os.path.join(folder, nm + nametag)
        #imageio.imwrite(outfile, img8)

        #imOut = Image.fromarray(img16)

        #imOut.save(outFile)
        imageio.imwrite(outFile, img16)

        del img16
        cmd = [
            "exiftool", "-tagsFromFile", im.path, "-file:all", "-iptc:all",
            "-exif:all", "-xmp", "-Composite:all", outFile,
            "-overwrite_original"
        ]
        call(cmd)

    _writeim(rgb, bndFolders[0], imtags[0], im)
    del rgb
    _writeim(RRENir, bndFolders[1], imtags[1], im)
    del RRENir  #,
コード例 #14
0
def align_template(imAl, mx, reflFolder, ref_ind=rf):

    
    warp_matrices, alignment_pairs = imageutils.align_capture(imAl,
                                                              ref_index=ref_ind, 
                                                              warp_mode=cv2.MOTION_HOMOGRAPHY,
                                                              max_iterations=mx)
    for x,mat in enumerate(warp_matrices):
        print("Band {}:\n{}".format(x,mat))

    # cropped_dimensions is of the form:
    # (first column with overlapping pixels present in all images, 
    #  first row with overlapping pixels present in all images, 
    #  number of columns with overlapping pixels in all images, 
    #  number of rows with overlapping pixels in all images   )
    dist_coeffs = []
    cam_mats = []
# create lists of the distortion coefficients and camera matricies
    for i,img in enumerate(imAl.images):
        dist_coeffs.append(img.cv2_distortion_coeff())
        cam_mats.append(img.cv2_camera_matrix())
        
    warp_mode = cv2.MOTION_HOMOGRAPHY #alignment_pairs[0]['warp_mode']
    match_index = alignment_pairs[0]['ref_index']
    
    cropped_dimensions, edges = imageutils.find_crop_bounds(imAl, 
                                                            warp_matrices,
                                                            warp_mode=cv2.MOTION_HOMOGRAPHY)
   # capture, warp_matrices, cv2.MOTION_HOMOGRAPHY, cropped_dimensions, None, img_type="reflectance",
    im_aligned = imageutils.aligned_capture(imAl, warp_matrices, warp_mode,
                                            cropped_dimensions, match_index,
                                            img_type="reflectance")
    
    im_display = np.zeros((im_aligned.shape[0],im_aligned.shape[1],5), dtype=np.float32 )
    
    for iM in range(0,im_aligned.shape[2]):
        im_display[:,:,iM] =  imageutils.normalize(im_aligned[:,:,iM])
        
    rgb = im_display[:,:,[2,1,0]] 
    cir = im_display[:,:,[3,2,1]] 
    grRE = im_display[:,:,[4,2,1]] 
    
    
    if args.plts == True:
        
        fig, axes = plt.subplots(1, 3, figsize=(16,16)) 
        plt.title("Red-Green-Blue Composite") 
        axes[0].imshow(rgb) 
        plt.title("Color Infrared (CIR) Composite") 
        axes[1].imshow(cir) 
        plt.title("Red edge-Green-Red (ReGR) Composite") 
        axes[2].imshow(grRE) 
        plt.show()
    
    prevList = [rgb, cir, grRE]
    nmList = ['rgb.jpg', 'cir.jpg', 'grRE.jpg']
    names = [os.path.join(reflFolder, pv) for pv in nmList]
    
    for ind, p in enumerate(prevList):
        img8 = bytescale(p)
        imageio.imwrite(names[ind], img8)
    
    return warp_matrices, alignment_pairs#, dist_coeffs, cam_mats, cropped_dimensions
コード例 #15
0
# In[ ]:

dist_coeffs = []
cam_mats = []
# create lists of the distortion coefficients and camera matricies
for i, img in enumerate(capture.images):
    dist_coeffs.append(img.cv2_distortion_coeff())
    cam_mats.append(img.cv2_camera_matrix())
# cropped_dimensions is of the form:
# (first column with overlapping pixels present in all images,
#  first row with overlapping pixels present in all images,
#  number of columns with overlapping pixels in all images,
#  number of rows with overlapping pixels in all images   )
cropped_dimensions = imageutils.find_crop_bounds(capture.images[0].size(),
                                                 warp_matrices, dist_coeffs,
                                                 cam_mats)

# ## Visualize Aligned Images
#
# Once the transformation has been found, it can be verified by composting the aligned images to check alignment. The image 'stack' containing all bands can also be exported to a multi-band TIFF file for viewing in extrernal software such as QGIS.  Useful componsites are a naturally colored RGB as well as color infrared, or CIR.

# In[ ]:

im_aligned = imageutils.aligned_capture(warp_matrices, alignment_pairs,
                                        cropped_dimensions)
# Create a normalized stack for viewing
im_display = np.zeros((im_aligned.shape[0], im_aligned.shape[1], 5),
                      dtype=np.float32)

for i in range(0, im_aligned.shape[2]):
コード例 #16
0
def run():
    import sys
    from micasense.capture import Capture
    import cv2
    import numpy as np
    import matplotlib.pyplot as plt
    import micasense.imageutils as imageutils
    import micasense.plotutils as plotutils
    import argparse
    import os, glob
    from multiprocessing import Process, freeze_support
    import imutils
    import statistics
    import matplotlib.pyplot as plt
    from micasense.image import Image
    from micasense.panel import Panel
    import micasense.utils as msutils
    import csv
    import pickle

    freeze_support()

    ap = argparse.ArgumentParser()
    ap.add_argument(
        "-l",
        "--log_file_path",
        required=False,
        help=
        "file path to write log to. useful for using from the web interface")
    ap.add_argument(
        "-a",
        "--image_path",
        required=False,
        help=
        "image path to directory with all images inside of it. useful for using from command line. e.g. /home/nmorales/MicasenseTest/000. NOTE: a temp folder will be created within this directory"
    )
    ap.add_argument(
        "-b",
        "--file_with_image_paths",
        required=False,
        help=
        "file path to file that has all image file names and temporary file names for each image in it, comma separated and separated by a newline. useful for using from the web interface. e.g. /home/nmorales/myfilewithnames.txt"
    )
    ap.add_argument(
        "-c",
        "--panel_image_path",
        required=False,
        help=
        "image path to directory with all 5 panel images inside of it. useful for using from command line. e.g. /home/nmorales/MicasenseTest/000"
    )
    ap.add_argument(
        "-d",
        "--file_with_panel_image_paths",
        required=False,
        help=
        "file path to file that has all image file names in it, separated by a newline. useful for using from the web interface. e.g. /home/nmorales/myfilewithnames.txt"
    )
    ap.add_argument(
        "-o",
        "--output_path",
        required=True,
        help=
        "output path to directory in which all resulting files will be placed. useful for using from the command line"
    )
    ap.add_argument("-y",
                    "--final_rgb_output_path",
                    required=True,
                    help="output file path for stitched RGB image")
    ap.add_argument("-z",
                    "--final_rnre_output_path",
                    required=True,
                    help="output file path for stitched RNRe image")
    ap.add_argument(
        "-p",
        "--output_path_band1",
        required=True,
        help=
        "output file path in which resulting band 1 will be placed. useful for using from the web interface"
    )
    ap.add_argument(
        "-q",
        "--output_path_band2",
        required=True,
        help=
        "output file path in which resulting band 2 will be placed. useful for using from the web interface"
    )
    ap.add_argument(
        "-r",
        "--output_path_band3",
        required=True,
        help=
        "output file path in which resulting band 3 will be placed. useful for using from the web interface"
    )
    ap.add_argument(
        "-s",
        "--output_path_band4",
        required=True,
        help=
        "output file path in which resulting band 4 will be placed. useful for using from the web interface"
    )
    ap.add_argument(
        "-u",
        "--output_path_band5",
        required=True,
        help=
        "output file path in which resulting band 5 will be placed. useful for using from the web interface"
    )
    ap.add_argument(
        "-n",
        "--number_captures",
        required=False,
        help="When you want to test using only a subset of images.")
    ap.add_argument(
        "-k",
        "--thin_images",
        required=False,
        help=
        "When you have too many images, specify a number of images to skip. e.g. 1 will only use every other image, 2 will use every third image, 3 will use every fourth image."
    )
    ap.add_argument(
        "-w",
        "--work_megapix",
        required=False,
        help="Resolution for image registration step. The default is 0.6 Mpx")
    ap.add_argument(
        "-x",
        "--ba_refine_mask",
        required=False,
        default='xxxxx',
        help=
        "Set refinement mask for bundle adjustment. It looks like 'x_xxx' where 'x' means refine respective parameter and '_' means don't refine one, and has the following format: <fx><skew><ppx><aspect><ppy>. The default mask is 'xxxxx'. If bundle adjustment doesn't support estimation of selected parameter then the respective flag is ignored."
    )
    args = vars(ap.parse_args())

    log_file_path = args["log_file_path"]
    image_path = args["image_path"]
    file_with_image_paths = args["file_with_image_paths"]
    panel_image_path = args["panel_image_path"]
    file_with_panel_image_paths = args["file_with_panel_image_paths"]
    output_path = args["output_path"]
    final_rgb_output_path = args["final_rgb_output_path"]
    final_rnre_output_path = args["final_rnre_output_path"]
    output_path_band1 = args["output_path_band1"]
    output_path_band2 = args["output_path_band2"]
    output_path_band3 = args["output_path_band3"]
    output_path_band4 = args["output_path_band4"]
    output_path_band5 = args["output_path_band5"]
    thin_images = args["thin_images"]
    if thin_images is not None:
        thin_images = int(thin_images)
    number_captures = args["number_captures"]
    if number_captures is not None:
        number_captures = int(number_captures)
    work_megapix = args["work_megapix"]
    ba_refine_mask = args["ba_refine_mask"]

    if sys.version_info[0] < 3:
        raise Exception("Must use Python3. Use python3 in your command line.")

    if log_file_path is not None:
        sys.stderr = open(log_file_path, 'a')

    def eprint(*args, **kwargs):
        print(*args, file=sys.stderr, **kwargs)

    #Must supply either image_path or file_with_image_paths as a source of images
    imageNamesAll = []
    imageTempNames = []
    tempImagePath = None
    if image_path is not None:

        tempImagePath = os.path.join(image_path, 'temp')
        if not os.path.exists(tempImagePath):
            os.makedirs(tempImagePath)

        imageNamesAll = glob.glob(os.path.join(image_path, '*.tif'))
        for idx, val in enumerate(imageNamesAll):
            imageTempNames.append(
                os.path.join(tempImagePath, 'temp' + str(idx) + '.tif'))

    elif file_with_image_paths is not None:
        with open(file_with_image_paths) as fp:
            for line in fp:
                imageName, tempImageName = line.strip().split(",")
                imageNamesAll.append(imageName)
                imageTempNames.append(tempImageName)
    else:
        if log_file_path is not None:
            eprint(
                "No input images given. use image_path OR file_with_image_paths args"
            )
        else:
            print(
                "No input images given. use image_path OR file_with_image_paths args"
            )
        os._exit

    panelBandCorrection = {}
    panelNames = []
    if panel_image_path is not None:
        panelNames = glob.glob(os.path.join(panel_image_path, '*.tif'))
    elif file_with_panel_image_paths is not None:
        with open(file_with_panel_image_paths) as fp:
            for line in fp:
                imageName = line.strip()
                panelNames.append(imageName)
    else:
        if log_file_path is not None:
            eprint(
                "No panel input images given. use panel_image_path OR file_with_panel_image_paths args"
            )
        else:
            print(
                "No panel input images given. use panel_image_path OR file_with_panel_image_paths args"
            )
        #os._exit

    for imageName in panelNames:
        img = Image(imageName)
        band_name = img.band_name
        if img.auto_calibration_image:
            if log_file_path is not None:
                eprint("Found automatic calibration image")
            else:
                print("Found automatic calibration image")
        panel = Panel(img)

        if not panel.panel_detected():
            raise IOError("Panel Not Detected!")

        mean, std, num, sat_count = panel.raw()
        micasense_panel_calibration = panel.reflectance_from_panel_serial()
        radianceToReflectance = micasense_panel_calibration / mean
        panelBandCorrection[band_name] = radianceToReflectance
        if log_file_path is not None:
            eprint("Detected panel serial: {}".format(panel.serial))
            eprint("Extracted Panel Statistics:")
            eprint("Mean: {}".format(mean))
            eprint("Standard Deviation: {}".format(std))
            eprint("Panel Pixel Count: {}".format(num))
            eprint("Saturated Pixel Count: {}".format(sat_count))
            eprint('Panel Calibration: {:1.3f}'.format(
                micasense_panel_calibration))
            eprint('Radiance to reflectance conversion factor: {:1.3f}'.format(
                radianceToReflectance))
        else:
            print("Detected panel serial: {}".format(panel.serial))
            print("Extracted Panel Statistics:")
            print("Mean: {}".format(mean))
            print("Standard Deviation: {}".format(std))
            print("Panel Pixel Count: {}".format(num))
            print("Saturated Pixel Count: {}".format(sat_count))
            print('Panel Calibration: {:1.3f}'.format(
                micasense_panel_calibration))
            print('Radiance to reflectance conversion factor: {:1.3f}'.format(
                radianceToReflectance))

    imageNamesDict = {}
    for i in imageNamesAll:
        s = i.split("_")
        k = s[-1].split(".")
        if s[-2] not in imageNamesDict:
            imageNamesDict[s[-2]] = {}
        imageNamesDict[s[-2]][k[0]] = i

    imageNameCaptures = []
    capture_count = 0
    skip_count = 0
    image_count = 0
    skip_proceed = 1
    num_captures_proceed = 1
    for i in sorted(imageNamesDict.keys()):
        im = []
        if thin_images is not None:
            if image_count > 0 and skip_count < thin_images:
                skip_count = skip_count + 1
                skip_proceed = 0
            else:
                skip_count = 0
                skip_proceed = 1
            image_count = image_count + 1

        if skip_proceed == 1:
            if number_captures is not None:
                if capture_count < number_captures:
                    num_captures_proceed = 1
                else:
                    num_captures_proceed = 0
            if num_captures_proceed == 1:
                for j in sorted(imageNamesDict[i].keys()):
                    imageName = imageNamesDict[i][j]
                    img = Image(imageName)
                    # meta = img.meta
                    # flightImageRaw=plt.imread(imageName)
                    # flightRadianceImage, _, _, _ = msutils.raw_image_to_radiance(meta, flightImageRaw)
                    # flightReflectanceImage = flightRadianceImage * panelBandCorrection[img.band_name]
                    # flightUndistortedReflectance = msutils.correct_lens_distortion(meta, flightReflectanceImage)
                    # calibratedImage = imageNameToCalibratedImageName[imageName]
                    # print(flightUndistortedReflectance.shape)
                    # plt.imsave(calibratedImage, flightUndistortedReflectance, cmap='gray')
                    # calIm = Image(calibratedImage, meta = meta)
                    im.append(img)
                if len(im) > 0:
                    imageNameCaptures.append(im)
                    capture_count = capture_count + 1

    def enhance_image(rgb):
        gaussian_rgb = cv2.GaussianBlur(rgb, (9, 9), 10.0)
        gaussian_rgb[gaussian_rgb < 0] = 0
        gaussian_rgb[gaussian_rgb > 1] = 1
        unsharp_rgb = cv2.addWeighted(rgb, 1.5, gaussian_rgb, -0.5, 0)
        unsharp_rgb[unsharp_rgb < 0] = 0
        unsharp_rgb[unsharp_rgb > 1] = 1

        # Apply a gamma correction to make the render appear closer to what our eyes would see
        gamma = 1.4
        gamma_corr_rgb = unsharp_rgb**(1.0 / gamma)
        return (gamma_corr_rgb)

    captures = []
    # captureGPSDict = {}
    # counter = 0
    for i in imageNameCaptures:
        im = Capture(i)
        captures.append(im)
        # latitudes = []
        # longitudes = []
        # altitudes = []
        # for i,img in enumerate(im.images):
        #     latitudes.append(img.latitude)
        #     longitudes.append(img.longitude)
        #     altitudes.append(img.altitude)
        # captureGPSDict[counter] = [round(statistics.mean(latitudes), 4), round(statistics.mean(longitudes), 4), statistics.mean(altitudes)]
        # counter = counter + 1

    # GPSsorter = {}
    # for counter, loc in captureGPSDict.items():
    #     if loc[0] not in GPSsorter:
    #         GPSsorter[loc[0]] = {}
    #     GPSsorter[loc[0]][loc[1]] = counter

    imageCaptureSets = captures

    img_type = "reflectance"
    match_index = 0  # Index of the band
    max_alignment_iterations = 1000
    warp_mode = cv2.MOTION_HOMOGRAPHY  # MOTION_HOMOGRAPHY or MOTION_AFFINE. For Altum images only use HOMOGRAPHY
    pyramid_levels = None  # for images with RigRelatives, setting this to 0 or 1 may improve alignment

    if log_file_path is not None:
        eprint(img_type)
        eprint(
            "Alinging images. Depending on settings this can take from a few seconds to many minutes"
        )
    else:
        print(img_type)
        print(
            "Alinging images. Depending on settings this can take from a few seconds to many minutes"
        )

    warp_matrices = None
    if tempImagePath is not None:
        if os.path.exists(os.path.join(tempImagePath, 'capturealignment.pkl')):
            with open(os.path.join(tempImagePath, 'capturealignment.pkl'),
                      'rb') as f:
                warp_matrices, alignment_pairs = pickle.load(f)

    if warp_matrices is None:
        warp_matrices, alignment_pairs = imageutils.align_capture(
            captures[0],
            ref_index=match_index,
            max_iterations=max_alignment_iterations,
            warp_mode=warp_mode,
            pyramid_levels=pyramid_levels,
            multithreaded=True)

    if log_file_path is not None:
        eprint("Finished Aligning, warp matrices={}".format(warp_matrices))
    else:
        print("Finished Aligning, warp matrices={}".format(warp_matrices))

    if tempImagePath is not None:
        with open(os.path.join(tempImagePath, 'capturealignment.pkl'),
                  'wb') as f:
            pickle.dump([warp_matrices, alignment_pairs], f)

    images_to_stitch1 = []
    images_to_stitch2 = []
    count = 0
    for x in imageCaptureSets:
        cropped_dimensions, edges = imageutils.find_crop_bounds(
            x, warp_matrices, warp_mode=warp_mode)
        im_aligned = imageutils.aligned_capture(x,
                                                warp_matrices,
                                                warp_mode,
                                                cropped_dimensions,
                                                match_index,
                                                img_type=img_type)
        if log_file_path is not None:
            eprint(im_aligned.shape)
        else:
            print(im_aligned.shape)

        i1 = im_aligned[:, :, [0, 1, 2]]
        i1 = enhance_image(i1)
        image1 = np.uint8(i1 * 255)
        cv2.imwrite(imageTempNames[count], image1)
        images_to_stitch1.append(imageTempNames[count])
        count = count + 1

        i2 = im_aligned[:, :, [2, 3, 4]]
        i2 = enhance_image(i2)
        image2 = np.uint8(i2 * 255)
        cv2.imwrite(imageTempNames[count], image2)
        images_to_stitch2.append(imageTempNames[count])
        count = count + 1

        del cropped_dimensions
        del edges
        del im_aligned
        del i1
        del i2
        del image1
        del image2

    sep = " "
    images_string1 = sep.join(images_to_stitch1)
    images_string2 = sep.join(images_to_stitch2)
    num_images = len(images_to_stitch1)

    del imageNamesAll
    del imageTempNames
    del imageNamesDict
    del panelNames
    del imageNameCaptures
    del imageCaptureSets
    del images_to_stitch1
    del images_to_stitch2

    log_file_path_string = ''
    if log_file_path is not None:
        log_file_path_string = " --log_file '" + log_file_path + "'"
    stitchCmd = "stitching_multi " + images_string1 + " " + images_string2 + " --num_images " + str(
        num_images
    ) + " --result1 '" + final_rgb_output_path + "' --result2 '" + final_rnre_output_path + "' " + log_file_path_string
    # stitchCmd = "stitching_multi "+images_string1+" "+images_string2+" --num_images "+str(num_images)+" --result1 '"+final_rgb_output_path+"' --result2 '"+final_rnre_output_path+"' --log_file "+log_file_path+" --work_megapix "+work_megapix+" --ba_refine_mask "+ba_refine_mask
    # stitchCmd = "stitching_multi "+images_string1+" "+images_string2+" --num_images "+str(len(images_to_stitch1))+" --result1 '"+final_rgb_output_path+"' --result2 '"+final_rnre_output_path+"' --try_cuda yes --log_file "+log_file_path+" --work_megapix "+work_megapix
    if log_file_path is not None:
        eprint(stitchCmd)
        eprint(len(stitchCmd))
    else:
        print(stitchCmd)
        print(len(stitchCmd))
    os.system(stitchCmd)

    final_result_img1 = cv2.imread(final_rgb_output_path, cv2.IMREAD_UNCHANGED)
    final_result_img2 = cv2.imread(final_rnre_output_path,
                                   cv2.IMREAD_UNCHANGED)
    final_result_img1 = enhance_image(final_result_img1 / 255)
    final_result_img2 = enhance_image(final_result_img2 / 255)

    plt.imsave(final_rgb_output_path, final_result_img1)
    plt.imsave(final_rnre_output_path, final_result_img2)

    plt.imsave(output_path_band1, final_result_img1[:, :, 0], cmap='gray')
    plt.imsave(output_path_band2, final_result_img1[:, :, 1], cmap='gray')
    plt.imsave(output_path_band3, final_result_img1[:, :, 2], cmap='gray')
    plt.imsave(output_path_band4, final_result_img2[:, :, 1], cmap='gray')
    plt.imsave(output_path_band5, final_result_img2[:, :, 2], cmap='gray')
コード例 #17
0
def test_cropping(non_panel_altum_capture):
    warp_mode = cv2.MOTION_HOMOGRAPHY
    warp_matrices = non_panel_altum_capture.get_warp_matrices()
    cropped_dimensions,edges = imageutils.find_crop_bounds(non_panel_altum_capture,warp_matrices)
    assert(cropped_dimensions == expected_dimensions)
コード例 #18
0
def main(img_dir, out_dir, alt_thresh, ncores, start_count, scaling,
         irradiance, subset, layer, resolution):
    # Create output dir it doesn't exist yet
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    # Load all images as imageset
    imgset = imageset.ImageSet.from_directory(img_dir)
    meta_list = imgset.as_nested_lists()
    # Make feature collection of image centers and write it to tmp file
    point_list = [capture_to_point(c) for c in imgset.captures]
    feature_list = [{
        'type': 'Feature',
        'properties': {},
        'geometry': mapping(x)
    } for x in point_list]
    fc = {'type': 'FeatureCollection', 'features': feature_list}

    ###########################
    #### Optionally cut a spatial subset of the images
    ##########################
    if subset == 'interactive':
        # Write feature collection to tmp file, to make it accessible to the flask app
        # without messing up with the session context
        fc_tmp_file = os.path.join(tempfile.gettempdir(), 'micamac_fc.geojson')
        with open(fc_tmp_file, 'w') as dst:
            json.dump(fc, dst)
        # Select spatial subset interactively (available as feature in POLYGONS[0])
        app.run(debug=False, host='0.0.0.0')
        # Check which images intersect with the user defined polygon (list of booleans)
        poly_shape = shape(POLYGONS[0]['geometry'])
        in_polygon = [x.intersects(poly_shape) for x in point_list]
        print('Centroid of drawn polygon: %s' % poly_shape.centroid.wkt)
    elif subset is None:
        in_polygon = [True for x in point_list]
    elif os.path.exists(subset):
        with fiona.open(subset, layer) as src:
            poly_shape = shape(src[0]['geometry'])
        in_polygon = [x.intersects(poly_shape) for x in point_list]
        print('Centroid of supplied polygon: %s' % poly_shape.centroid.wkt)
    else:
        raise ValueError(
            '--subset must be interactive, the path to an OGR file or left empty'
        )

    ##################################
    ### Threshold on altitude
    ##################################
    if alt_thresh == 'interactive':
        alt_arr = np.array([x[3] for x in meta_list[0]])
        n, bins, patches = plt.hist(alt_arr, 100)
        plt.xlabel('Altitude')
        plt.ylabel('Freq')
        plt.show()
        # Ask user for alt threshold
        alt_thresh = input('Enter altitude threshold:')
        alt_thresh = float(alt_thresh)
        above_alt = [x[3] > alt_thresh for x in meta_list[0]]
    elif isinstance(alt_thresh, float):
        above_alt = [x[3] > alt_thresh for x in meta_list[0]]
    else:
        raise ValueError(
            '--alt_thresh argument must be a float or interactive')

    # Combine both boolean lists (altitude and in_polygon)
    is_valid = [x and y for x, y in zip(above_alt, in_polygon)]

    #########################
    ### Optionally retrieve irradiance values
    #########################
    if irradiance == 'panel':
        # Trying first capture, then last if doesn't work
        try:
            panel_cap = imgset.captures[0]
            # Auto-detect panel, perform visual check, retrieve corresponding irradiance values
            if panel_cap.detect_panels() != 5:
                raise AssertionError('Panels could not be detected')
            panel_cap.plot_panels()
            # Visual check and ask for user confirmation
            panel_check = input("Are panels properly detected ? (y/n):")
            if panel_check != 'y':
                raise AssertionError(
                    'User input, unsuitable detected panels !')
        except Exception as e:
            print(
                "Failed to use pre flight panels; trying post flight panel capture"
            )
            panel_cap = imgset.captures[-1]
            # Auto-detect panel, perform visual check, retrieve corresponding irradiance values
            if panel_cap.detect_panels() != 5:
                raise AssertionError('Panels could not be detected')
            panel_cap.plot_panels()
            # Visual check and ask for user confirmation
            panel_check = input("Are panels properly detected ? (y/n):")
            if panel_check != 'y':
                raise AssertionError(
                    'User input, unsuitable detected panels !')
        # Retrieve irradiance values from panels reflectance
        img_type = 'reflectance'
        irradiance_list = panel_cap.panel_irradiance()
    elif irradiance == 'dls':
        img_type = 'reflectance'
        irradiance_list = None
    elif irradiance == 'sixs':
        # Pick the middle cature, and use it to model clear sky irradiance using 6s
        middle_c = imgset.captures[round(len(imgset.captures) / 2)]
        img_type = 'reflectance'
        irradiance_list = modeled_irradiance_from_capture(middle_c)
    elif irradiance is None:
        img_type = None
        irradiance_list = None
    else:
        raise ValueError(
            'Incorrect value for --reflectance, must be panel, dls or left empty'
        )

    #########################
    ### Alignment parameters
    #########################
    # Select an arbitrary image, find warping and croping parameters, apply to image,
    # assemble a rgb composite to perform visual check
    alignment_confirmed = False
    while not alignment_confirmed:
        warp_cap_ind = random.randint(1, len(imgset.captures) - 1)
        warp_cap = imgset.captures[warp_cap_ind]
        warp_matrices, alignment_pairs = imageutils.align_capture(
            warp_cap, max_iterations=100, multithreaded=True)
        print("Finished Aligning")
        # Retrieve cropping dimensions
        cropped_dimensions, edges = imageutils.find_crop_bounds(
            warp_cap, warp_matrices)
        warp_mode = alignment_pairs[0]['warp_mode']
        match_index = alignment_pairs[0]['ref_index']
        # Apply warping and cropping to the Capture used for finding the parameters to
        # later perform a visual check
        im_aligned = imageutils.aligned_capture(warp_cap,
                                                warp_matrices,
                                                warp_mode,
                                                cropped_dimensions,
                                                match_index,
                                                img_type='radiance')
        rgb_list = [
            imageutils.normalize(im_aligned[:, :, i]) for i in [0, 1, 2]
        ]
        plt.imshow(np.stack(rgb_list, axis=-1))
        plt.show()

        cir_list = [
            imageutils.normalize(im_aligned[:, :, i]) for i in [1, 3, 4]
        ]
        plt.imshow(np.stack(cir_list, axis=-1))
        plt.show()

        alignment_check = input("""
Are all bands properly aligned? (y/n)
    y: Bands are properly aligned, begin processing
    n: Bands are not properly aliged or image is not representative of the whole set, try another image
""")
        if alignment_check.lower() == 'y':
            alignment_confirmed = True
        else:
            print('Trying another image')

    ##################
    ### Processing
    #################
    # Build iterator of captures
    cap_tuple_iterator = zip(imgset.captures, is_valid,
                             range(start_count,
                                   len(is_valid) + start_count))
    process_kwargs = {
        'warp_matrices': warp_matrices,
        'warp_mode': warp_mode,
        'cropped_dimensions': cropped_dimensions,
        'match_index': match_index,
        'out_dir': out_dir,
        'irradiance_list': irradiance_list,
        'img_type': img_type,
        'resolution': resolution,
        'scaling': scaling
    }
    # Run process function with multiprocessing
    pool = mp.Pool(ncores)
    pool.map(functools.partial(capture_to_files, **process_kwargs),
             cap_tuple_iterator)
コード例 #19
0
def stackImages(FILE, imageNames, panelNames=None):
    import os, glob
    import micasense.capture as capture
    import cv2
    import numpy as np
    import matplotlib.pyplot as plt
    import micasense.imageutils as imageutils
    import micasense.plotutils as plotutils
    # Allow this code to align both radiance and reflectance images; bu excluding
    # a definition for panelNames above, radiance images will be used
    # For panel images, efforts will be made to automatically extract the panel information
    # but if the panel/firmware is before Altum 1.3.5, RedEdge 5.1.7 the panel reflectance
    # will need to be set in the panel_reflectance_by_band variable.
    # Note: radiance images will not be used to properly create NDVI/NDRE images below.
    if panelNames is not None:
        panelCap = capture.Capture.from_filelist(panelNames)
    else:
        panelCap = None

    capture = capture.Capture.from_filelist(imageNames)

    if panelCap is not None:
        if panelCap.panel_albedo() is not None:
            panel_reflectance_by_band = panelCap.panel_albedo()
        else:
            panel_reflectance_by_band = [0.67, 0.69, 0.68, 0.61,
                                         0.67]  #RedEdge band_index order
        panel_irradiance = panelCap.panel_irradiance(panel_reflectance_by_band)
        img_type = "reflectance"
        capture.plot_undistorted_reflectance(panel_irradiance)
    else:
        if capture.dls_present():
            img_type = 'reflectance'
            #capture.plot_undistorted_reflectance(capture.dls_irradiance())
        else:
            img_type = "radiance"
            #capture.plot_undistorted_radiance()

    ## Alignment settings
    match_index = 1  # Index of the band
    max_alignment_iterations = 10
    warp_mode = cv2.MOTION_HOMOGRAPHY  # MOTION_HOMOGRAPHY or MOTION_AFFINE. For Altum images only use HOMOGRAPHY
    pyramid_levels = 0  # for images with RigRelatives, setting this to 0 or 1 may improve alignment

    print(
        "Aligning images. Depending on settings this can take from a few seconds to many minutes"
    )
    # Can potentially increase max_iterations for better results, but longer runtimes
    warp_matrices, alignment_pairs = imageutils.align_capture(
        capture,
        ref_index=match_index,
        max_iterations=max_alignment_iterations,
        warp_mode=warp_mode,
        pyramid_levels=pyramid_levels)
    if warp_matrices == -1:
        return -1
    print("Finished Aligning, warp matrices={}".format(warp_matrices))

    cropped_dimensions, edges = imageutils.find_crop_bounds(
        capture, warp_matrices, warp_mode=warp_mode)
    im_aligned = imageutils.aligned_capture(capture,
                                            warp_matrices,
                                            warp_mode,
                                            cropped_dimensions,
                                            match_index,
                                            img_type=img_type)

    # Create a normalized stack for viewing
    im_display = np.zeros(
        (im_aligned.shape[0], im_aligned.shape[1], im_aligned.shape[2]),
        dtype=np.float32)

    from osgeo import gdal, gdal_array
    rows, cols, bands = im_display.shape
    driver = gdal.GetDriverByName('GTiff')
    filename = FILE + "stacked"  #blue,green,red,nir,redEdge
    filename = os.path.join(directory, filename)
    outRaster = driver.Create(filename + ".tiff", cols, rows,
                              im_aligned.shape[2], gdal.GDT_UInt16)

    normalize = (img_type == 'radiance'
                 )  # normalize radiance images to fit with in UInt16

    # Output a 'stack' in the same band order as RedEdge/Alutm
    # Blue,Green,Red,NIR,RedEdge[,Thermal]
    # reflectance stacks are output with 32768=100% reflectance to provide some overhead for specular reflections
    # radiance stacks are output with 65535=100% radiance to provide some overhead for specular reflections

    # NOTE: NIR and RedEdge are not in wavelength order!

    multispec_min = np.min(im_aligned[:, :, 1:5])
    multispec_max = np.max(im_aligned[:, :, 1:5])

    for i in range(0, 5):
        outband = outRaster.GetRasterBand(i + 1)
        if normalize:
            outdata = imageutils.normalize(im_aligned[:, :, i], multispec_min,
                                           multispec_max)
        else:
            outdata = im_aligned[:, :, i]
            outdata[outdata < 0] = 0
            outdata[outdata > 2] = 2

        outdata = outdata * 32767
        outdata[outdata < 0] = 0
        outdata[outdata > 65535] = 65535
        outband.WriteArray(outdata)
        outband.FlushCache()

    if im_aligned.shape[2] == 6:
        outband = outRaster.GetRasterBand(6)
        outdata = im_aligned[:, :,
                             5] * 100  # scale to centi-C to fit into uint16
        outdata[outdata < 0] = 0
        outdata[outdata > 65535] = 65535
        outband.WriteArray(outdata)
        outband.FlushCache()
    outRaster = None
    return 1