def test_Allignment(i):
    imagePath = os.path.join('.', 'img')
    outrgbPath = os.path.join('.', 'data', 'rgb', '{:04d}.png'.format(i))
    outcirPath = os.path.join('.', 'data', 'cir', '{:04d}.png'.format(i))
    imageNames = glob.glob(
        os.path.join(imagePath, 'IMG_{:04d}_*.tif'.format(i)))
    panelNames = glob.glob(os.path.join(imagePath, 'IMG_0000_*.tif'))
    print(imageNames)
    if len(panelNames) == 0:
        panelCap = None
    else:
        panelCap = cap.Capture.from_filelist(panelNames)
    capture = cap.Capture.from_filelist(imageNames)

    allignmat, havePrev = ReadAllignmentMatrix(".")
    if havePrev == False:
        allignmat = GetAllignmentMatrix(capture)
        SaveAllignmentMatrix("a_mat_{}.txt", allignmat)
    im_aligned = AllignImage(allignmat, capture)
    print(capture.band_names())
    rgb_band_indices = [
        capture.band_names().index('Red'),
        capture.band_names().index('Green'),
        capture.band_names().index('Blue')
    ]
    cir_band_indices = [
        capture.band_names().index('NIR'),
        capture.band_names().index('Red'),
        capture.band_names().index('Green')
    ]
    im_display = np.zeros(
        (im_aligned.shape[0], im_aligned.shape[1], im_aligned.shape[2]),
        dtype=np.float32)

    im_min = np.percentile(im_aligned[:, :, rgb_band_indices].flatten(),
                           0.5)  # modify these percentiles to adjust contrast
    im_max = np.percentile(
        im_aligned[:, :, rgb_band_indices].flatten(),
        99.5)  # for many images, 0.5 and 99.5 are good values

    for i in rgb_band_indices:
        im_display[:, :, i] = imageutils.normalize(im_aligned[:, :, i], im_min,
                                                   im_max)

    rgb = im_display[:, :, rgb_band_indices]
    for i in cir_band_indices:
        im_display[:, :, i] = imageutils.normalize(im_aligned[:, :, i])
    cir = im_display[:, :, cir_band_indices]
    cv2.imshow("rgb", rgb)
    cv2.imshow("cir", cir)
    print(np.max(rgb))
    print(np.min(rgb))

    rgb = cv2.normalize(rgb, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC3)
    cir = cv2.normalize(cir, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC3)
    cv2.imwrite(outrgbPath, rgb)
    cv2.imwrite(outcirPath, cir)
    cv2.waitKey(10)
    return
示例#2
0
    def save_capture_as_rgb(self,
                            outfilename,
                            gamma=1.4,
                            downsample=1,
                            white_balance='norm',
                            hist_min_percent=0.5,
                            hist_max_percent=99.5,
                            sharpen=True):
        rgb_band_indices = [2, 1, 0]

        if self.__aligned_capture is None:
            raise RuntimeError(
                "call Capture.create_aligned_capture prior to saving as RGB")
        im_display = np.zeros(
            (self.__aligned_capture.shape[0], self.__aligned_capture.shape[1],
             self.__aligned_capture.shape[2]),
            dtype=np.float32)

        im_min = np.percentile(
            self.__aligned_capture[:, :, rgb_band_indices].flatten(),
            hist_min_percent)  # modify these percentiles to adjust contrast
        im_max = np.percentile(
            self.__aligned_capture[:, :, rgb_band_indices].flatten(),
            hist_max_percent)  # for many images, 0.5 and 99.5 are good values

        for i in rgb_band_indices:
            # for rgb true color, we usually want to use the same min and max scaling across the 3 bands to
            # maintain the "white balance" of the calibrated image
            if white_balance == 'norm':
                im_display[:, :, i] = imageutils.normalize(
                    self.__aligned_capture[:, :, i], im_min, im_max)
            else:
                im_display[:, :, i] = imageutils.normalize(
                    self.__aligned_capture[:, :, i])

        rgb = im_display[:, :, rgb_band_indices]
        rgb = cv2.resize(rgb,
                         None,
                         fx=1 / downsample,
                         fy=1 / downsample,
                         interpolation=cv2.INTER_AREA)

        if sharpen:
            gaussian_rgb = cv2.GaussianBlur(rgb, (9, 9), 10.0)
            gaussian_rgb[gaussian_rgb < 0] = 0
            gaussian_rgb[gaussian_rgb > 1] = 1
            unsharp_rgb = cv2.addWeighted(rgb, 1.5, gaussian_rgb, -0.5, 0)
            unsharp_rgb[unsharp_rgb < 0] = 0
            unsharp_rgb[unsharp_rgb > 1] = 1
        else:
            unsharp_rgb = rgb

        # Apply a gamma correction to make the render appear closer to what our eyes would see
        if gamma != 0:
            gamma_corr_rgb = unsharp_rgb**(1.0 / gamma)
            imageio.imwrite(outfilename,
                            (255 * gamma_corr_rgb).astype('uint8'))
        else:
            imageio.imwrite(outfilename, (255 * unsharp_rgb).astype('uint8'))
示例#3
0
    def save_capture_as_rgb(self, out_file_name, gamma=1.4, downsample=1, white_balance='norm', hist_min_percent=0.5,
                            hist_max_percent=99.5, sharpen=True, rgb_band_indices=(2, 1, 0)):
        """
        Output the Images in the Capture object as RGB.
        :param out_file_name: str system file path
        :param gamma: float gamma correction
        :param downsample: int downsample for cv2.resize()
        :param white_balance: str 'norm' to normalize across bands using hist_min_percent and hist_max_percent.
            Else this parameter is ignored.
        :param hist_min_percent: float for min histogram stretch
        :param hist_max_percent: float for max histogram stretch
        :param sharpen: boolean
        :param rgb_band_indices: List band order
        """
        if self.__aligned_capture is None:
            raise RuntimeError("Call Capture.create_aligned_capture() prior to saving as RGB.")
        im_display = np.zeros(
            (self.__aligned_capture.shape[0], self.__aligned_capture.shape[1], self.__aligned_capture.shape[2]),
            dtype=np.float32)

        # modify these percentiles to adjust contrast. for many images, 0.5 and 99.5 are good values
        im_min = np.percentile(self.__aligned_capture[:, :, rgb_band_indices].flatten(), hist_min_percent)
        im_max = np.percentile(self.__aligned_capture[:, :, rgb_band_indices].flatten(), hist_max_percent)

        for i in rgb_band_indices:
            # for rgb true color, we usually want to use the same min and max scaling across the 3 bands to
            # maintain the "white balance" of the calibrated image
            if white_balance == 'norm':
                im_display[:, :, i] = imageutils.normalize(self.__aligned_capture[:, :, i], im_min, im_max)
            else:
                im_display[:, :, i] = imageutils.normalize(self.__aligned_capture[:, :, i])

        rgb = im_display[:, :, rgb_band_indices]
        rgb = cv2.resize(rgb, None, fx=1 / downsample, fy=1 / downsample, interpolation=cv2.INTER_AREA)

        if sharpen:
            gaussian_rgb = cv2.GaussianBlur(rgb, (9, 9), 10.0)
            gaussian_rgb[gaussian_rgb < 0] = 0
            gaussian_rgb[gaussian_rgb > 1] = 1
            unsharp_rgb = cv2.addWeighted(rgb, 1.5, gaussian_rgb, -0.5, 0)
            unsharp_rgb[unsharp_rgb < 0] = 0
            unsharp_rgb[unsharp_rgb > 1] = 1
        else:
            unsharp_rgb = rgb

        # Apply a gamma correction to make the render appear closer to what our eyes would see
        if gamma != 0:
            gamma_corr_rgb = unsharp_rgb ** (1.0 / gamma)
            imageio.imwrite(out_file_name, (255 * gamma_corr_rgb).astype('uint8'))
        else:
            imageio.imwrite(out_file_name, (255 * unsharp_rgb).astype('uint8'))
示例#4
0
def proc_stack(i, warp_matrices, bndFolders, panel_irradiance):
    
    i.compute_reflectance(panel_irradiance) 
        #i.plot_undistorted_reflectance(panel_irradiance)  
    
    
    cropped_dimensions, edges = imageutils.find_crop_bounds(i, warp_matrices)
    
    im_aligned = imageutils.aligned_capture(i, warp_matrices,
                                            cv2.MOTION_HOMOGRAPHY,
                                            cropped_dimensions,
                                            None, img_type="reflectance")
    
    im_display = np.zeros((im_aligned.shape[0],im_aligned.shape[1],5), 
                          dtype=np.float32)
    
    rows, cols, bands = im_display.shape
    driver = gdal.GetDriverByName('GTiff')
    
    im = i.images[1].path
    hd, nm = os.path.split(im[:-6])

    filename = os.path.join(reflFolder, nm+'.tif') #blue,green,red,nir,redEdge
    #
    
    outRaster = driver.Create(filename, cols, rows, 5, gdal.GDT_Byte)
    normalize = False
    
    # Output a 'stack' in the same band order as RedEdge/Alutm
    # Blue,Green,Red,NIR,RedEdge[,Thermal]
    
    # NOTE: NIR and RedEdge are not in wavelength order!
    
    i.compute_reflectance(panel_irradiance+[0])
    
    for i in range(0,5):
        outband = outRaster.GetRasterBand(i+1)
        if normalize:
            outband.WriteArray(imageutils.normalize(im_aligned[:,:,i])*65535)
        else:
            outdata = im_aligned[:,:,i]
            outdata[outdata<0] = 0
            outdata[outdata>1] = 1
            
            outband.WriteArray(outdata*65535)
        outband.FlushCache()
    
    if im_aligned.shape[2] == 6:
        outband = outRaster.GetRasterBand(6)
        outdata = im_aligned[:,:,5] * 100 # scale to centi-C to fit into uint16
        outdata[outdata<0] = 0
        outdata[outdata>65535] = 65535
        outband.WriteArray(outdata)
        outband.FlushCache()
    outRaster = None
    
    cmd = ["exiftool", "-tagsFromFile", im,  "-file:all", "-iptc:all",
               "-exif:all",  "-xmp", "-Composite:all", filename, 
               "-overwrite_original"]
    call(cmd)
示例#5
0
def proc_imgs(i, warp_matrices, bndFolders, panel_irradiance, normalize=None):
    
    
#    for i in imgset.captures: 
    
    i.compute_reflectance(panel_irradiance) 
    #i.plot_undistorted_reflectance(panel_irradiance)  


    cropped_dimensions, edges = imageutils.find_crop_bounds(i, warp_matrices)
    
    im_aligned = imageutils.aligned_capture(i, warp_matrices,
                                            cv2.MOTION_HOMOGRAPHY,
                                            cropped_dimensions,
                                            None, img_type="reflectance")
    
    im_display = np.zeros((im_aligned.shape[0],im_aligned.shape[1],5), dtype=np.float32 )
    
    for iM in range(0,im_aligned.shape[2]):
        im_display[:,:,iM] =  imageutils.normalize(im_aligned[:,:,iM])*65535
    
    for k in range(0,im_display.shape[2]):
         im = i.images[k]
         hd, nm = os.path.split(im.path)
         outdata = im_aligned[:,:,i]
         outdata[outdata<0] = 0
         outdata[outdata>1] = 1
         
         outfile = os.path.join(bndFolders[k], nm)
         imageio.imwrite(outfile, outdata)
        
         cmd = ["exiftool", "-tagsFromFile", im.path,  "-file:all", "-iptc:all",
               "-exif:all",  "-xmp", "-Composite:all", outfile, 
               "-overwrite_original"]
         call(cmd)
示例#6
0
文件: capture.py 项目: gdslab/p4m
    def save_thermal_over_rgb(self,
                              outfilename,
                              figsize=(30, 23),
                              lw_index=None,
                              hist_min_percent=0.2,
                              hist_max_percent=99.8):
        if self.__aligned_capture is None:
            raise RuntimeError(
                "call Capture.create_aligned_capture prior to saving as RGB")

        # by default we don't mask the thermal, since it's native resolution is much lower than the MS
        if lw_index is None:
            lw_index = self.lw_indices()[0]
        masked_thermal = self.__aligned_capture[:, :, lw_index]

        im_display = np.zeros((self.__aligned_capture.shape[0],
                               self.__aligned_capture.shape[1], 3),
                              dtype=np.float32)
        rgb_band_indices = [
            self.band_names_lower().index('red'),
            self.band_names_lower().index('green'),
            self.band_names_lower().index('blue')
        ]

        # for rgb true color, we usually want to use the same min and max scaling across the 3 bands to
        # maintain the "white balance" of the calibrated image
        im_min = np.percentile(
            self.__aligned_capture[:, :, rgb_band_indices].flatten(),
            hist_min_percent)  # modify these percentiles to adjust contrast
        im_max = np.percentile(
            self.__aligned_capture[:, :, rgb_band_indices].flatten(),
            hist_max_percent)  # for many images, 0.5 and 99.5 are good values
        for dst_band, src_band in enumerate(rgb_band_indices):
            im_display[:, :, dst_band] = imageutils.normalize(
                self.__aligned_capture[:, :, src_band], im_min, im_max)

        # Compute a histogram
        min_display_therm = np.percentile(masked_thermal, hist_min_percent)
        max_display_therm = np.percentile(masked_thermal, hist_max_percent)

        fig, _ = plotutils.plot_overlay_withcolorbar(
            im_display,
            masked_thermal,
            figsize=figsize,
            title='Temperature over True Color',
            vmin=min_display_therm,
            vmax=max_display_therm,
            overlay_alpha=0.25,
            overlay_colormap='jet',
            overlay_steps=16,
            display_contours=True,
            contour_steps=16,
            contour_alpha=.4,
            contour_fmt="%.0fC",
            show=False)
        fig.savefig(outfilename)
示例#7
0
def main(imagePath, warp_matrices, alignment_pairs, panel_irradiance):
    import micasense.capture as capture
    
    for i in range(0,201):
            imageRoot = "IMG_%04i" % i
            outputRoot = imagePath.replace('Imagery\\','').replace('\\','_') + imageRoot
            imageSet = os.path.join(imagePath,imageRoot+'_*.tif')re
            imageNames = glob.glob(imageSet)
            if not len(imageNames): continue #skip
        # try:
            imgCap = capture.Capture.from_filelist(imageNames)
            imgCap.compute_reflectance(panel_irradiance)
            
            dist_coeffs = []
            cam_mats = []
            for i,img in enumerate(imgCap.images):
                dist_coeffs.append(img.cv2_distortion_coeff())
                cam_mats.append(img.cv2_camera_matrix())

            cropped_dimensions = imageutils.find_crop_bounds(imgCap.images[0].size(), warp_matrices, dist_coeffs, cam_mats)
            im_aligned = imageutils.aligned_capture(warp_matrices, alignment_pairs, cropped_dimensions)
            im_display = np.zeros((im_aligned.shape[0],im_aligned.shape[1],5), dtype=np.float32 )

            for i in range(0,im_aligned.shape[2]):
                im_display[:,:,i] =  imageutils.normalize(im_aligned[:,:,i])

            rgb = im_display[:,:,[2,1,0]]
            cir = im_display[:,:,[3,2,1]]

            gaussian_rgb = cv2.GaussianBlur(rgb, (9,9), 10.0)
            gaussian_rgb[gaussian_rgb<0] = 0
            gaussian_rgb[gaussian_rgb>1] = 1
            unsharp_rgb = cv2.addWeighted(rgb, 1.5, gaussian_rgb, -0.5, 0)
            unsharp_rgb[unsharp_rgb<0] = 0
            unsharp_rgb[unsharp_rgb>1] = 1
            gamma = 1.4
            gamma_corr_rgb = unsharp_rgb**(1.0/gamma)

            # Output
            imtype = '.jpg' # or 'jpg'
            imageio.imwrite(os.path.join('.','Output','rgb',outputRoot+imtype), (255*gamma_corr_rgb).astype('uint8'))
            imageio.imwrite(os.path.join('.','Output','cir',outputRoot+imtype), (255*cir).astype('uint8'))

            from osgeo import gdal, gdal_array
            rows, cols, bands = im_display.shape
            driver = gdal.GetDriverByName('GTiff')
            outRaster = driver.Create(os.path.join('.','Output','5band',outputRoot+".tiff"), cols, rows, bands, gdal.GDT_Float32)

            for i in range(0,bands):
                outband = outRaster.GetRasterBand(i+1)
                outband.WriteArray(im_aligned[:,:,i])
                outband.FlushCache()

            outRaster = None
            im_aligned = None
            print ("processed " + imageSet)
示例#8
0
def proc_imgs_comp(i, warp_matrices, bndFolders, panel_irradiance):
    
    
    
    i.compute_reflectance(panel_irradiance) 
    #i.plot_undistorted_reflectance(panel_irradiance)  


    cropped_dimensions, edges = imageutils.find_crop_bounds(i, warp_matrices)
    
    im_aligned = imageutils.aligned_capture(i, warp_matrices,
                                            cv2.MOTION_HOMOGRAPHY,
                                            cropped_dimensions,
                                            None, img_type="reflectance")
    
    im_display = np.zeros((im_aligned.shape[0],im_aligned.shape[1],5), dtype=np.float32 )
    
    for iM in range(0,im_aligned.shape[2]):
        im_display[:,:,iM] =  imageutils.normalize(im_aligned[:,:,iM])
    
    rgb = im_display[:,:,[2,1,0]] 
    #cir = im_display[:,:,[3,2,1]] 
    RRENir = im_display[:,:,[4,3,2]] 
    
    imoot = [rgb, RRENir]
    imtags = ["RGB.tif", "RRENir.tif"]
    im = i.images[1]
    hd, nm = os.path.split(im.path[:-5])
    
    for ind, k in enumerate(bndFolders):
         
         img8 = bytescale(imoot[ind])
         
         outfile = os.path.join(k, nm+imtags[ind])
         
         imageio.imwrite(outfile, img8)
        
         cmd = ["exiftool", "-tagsFromFile", im.path,  "-file:all", "-iptc:all",
               "-exif:all",  "-xmp", "-Composite:all", outfile, 
               "-overwrite_original"]
         call(cmd)
示例#9
0
def correction(image_path, image_name, panel_path, panel_name):
    import micasense.capture as capture
    get_ipython().run_line_magic('load_ext', 'autoreload')
    get_ipython().run_line_magic('autoreload', '2')
    # 輸入照片位置
    get_ipython().run_line_magic('matplotlib', 'inline')

    panelNames = None

    # This is an altum image with RigRelatives and a thermal band
    imageNames = glob.glob(os.path.join(image_path, image_name + '_*.tif'))
    panelNames = glob.glob(os.path.join(panel_path, panel_name + '_*.tif'))

    if panelNames is not None:
        panelCap = capture.Capture.from_filelist(panelNames)
    else:
        panelCap = None

    capture = capture.Capture.from_filelist(imageNames)

    for img in capture.images:
        if img.rig_relatives is None:
            raise ValueError(
                "Images must have RigRelatives tags set which requires updated firmware and calibration. See the links in text above"
            )

    if panelCap is not None:
        if panelCap.panel_albedo() is not None:
            panel_reflectance_by_band = panelCap.panel_albedo()
        else:
            # RedEdge band_index order
            panel_reflectance_by_band = [0.67, 0.69, 0.68, 0.61, 0.67]
        panel_irradiance = panelCap.panel_irradiance(panel_reflectance_by_band)
        img_type = "reflectance"
        capture.plot_undistorted_reflectance(panel_irradiance)
    else:
        if False:  # capture.dls_present():
            img_type = 'reflectance'
            capture.plot_undistorted_reflectance(capture.dls_irradiance())
        else:
            img_type = "radiance"
            capture.plot_undistorted_radiance()

    # 相片對齊
    warp_mode = cv2.MOTION_HOMOGRAPHY
    warp_matrices = capture.get_warp_matrices()

    cropped_dimensions, edges = imageutils.find_crop_bounds(
        capture, warp_matrices)
    im_aligned = imageutils.aligned_capture(capture,
                                            warp_matrices,
                                            warp_mode,
                                            cropped_dimensions,
                                            None,
                                            img_type=img_type)

    print("warp_matrices={}".format(warp_matrices))

    rgb_band_indices = [2, 1, 0]

    # Create an empty normalized stack for viewing
    im_display = np.zeros(
        (im_aligned.shape[0], im_aligned.shape[1], capture.num_bands + 1),
        dtype=np.float32)

    # modify with these percentilse to adjust contrast
    im_min = np.percentile(im_aligned[:, :, 0:2].flatten(), 0.1)
    # for many images, 0.5 and 99.5 are good values
    im_max = np.percentile(im_aligned[:, :, 0:2].flatten(), 99.9)

    for i in range(0, im_aligned.shape[2]):
        if img_type == 'reflectance':
            # for reflectance images we maintain white-balance by applying the same display scaling to all bands
            im_display[:, :, i] = imageutils.normalize(im_aligned[:, :, i],
                                                       im_min, im_max)
        elif img_type == 'radiance':
            # for radiance images we do an auto white balance since we don't know the input light spectrum by
            # stretching each display band histogram to it's own min and max
            im_display[:, :, i] = imageutils.normalize(im_aligned[:, :, i])

    rgb = im_display[:, :, rgb_band_indices]

    # 合成出rgb圖
    # Create an enhanced version of the RGB render using an unsharp mask
    gaussian_rgb = cv2.GaussianBlur(rgb, (9, 9), 10.0)
    gaussian_rgb[gaussian_rgb < 0] = 0
    gaussian_rgb[gaussian_rgb > 1] = 1
    unsharp_rgb = cv2.addWeighted(rgb, 1.5, gaussian_rgb, -0.5, 0)
    unsharp_rgb[unsharp_rgb < 0] = 0
    unsharp_rgb[unsharp_rgb > 1] = 1

    # Apply a gamma correction to make the render appear closer to what our eyes would see
    gamma = 1.2
    gamma_corr_rgb = unsharp_rgb**(1.0 / gamma)
    plt.imshow(gamma_corr_rgb, aspect='equal')
    plt.axis('off')
    plt.show()
    # 匯出rgb圖
    imtype = 'png'  # or 'jpg'
    imageio.imwrite(image_name + '_rgb.' + imtype,
                    (255 * gamma_corr_rgb).astype('uint8'))

    blue_band = capture.band_names_lower().index('blue')
    green_band = capture.band_names_lower().index('green')
    red_band = capture.band_names_lower().index('red')
    nir_band = capture.band_names_lower().index('nir')
    rededge_band = capture.band_names_lower().index('red edge')

    blue = im_aligned[:, :, blue_band]
    blue = blue.astype('float64')
    np.save(image_name + '_blue', blue)

    green = im_aligned[:, :, green_band]
    green = green.astype('float64')
    np.save(image_name + '_green', green)

    red = im_aligned[:, :, red_band]
    red = red.astype('float64')
    np.save(image_name + '_red', red)

    rededge = im_aligned[:, :, rededge_band]
    red = rededge.astype('float64')
    np.save(image_name + '_rededge', rededge)

    nir = im_aligned[:, :, nir_band]
    nir = nir.astype('float64')
    np.save(image_name + '_nir', nir)
    return print("all bands npy file save")
    get_ipython().run_line_magic('load_ext', 'autoreload')
    get_ipython().run_line_magic('autoreload', '2')
    # 輸入照片位置
    get_ipython().run_line_magic('matplotlib', 'inline')

    panelNames = None

    # This is an altum image with RigRelatives and a thermal band
    imagePath = os.path.join('.', 'data')
    imageNames = glob.glob(os.path.join(imagePath, 'IMG_0134_*.tif'))
    panelNames = glob.glob(os.path.join(imagePath, 'IMG_0003_*.tif'))

    if panelNames is not None:
        panelCap = capture.Capture.from_filelist(panelNames)
    else:
        panelCap = None

    capture = capture.Capture.from_filelist(imageNames)

    for img in capture.images:
        if img.rig_relatives is None:
            raise ValueError(
                "Images must have RigRelatives tags set which requires updated firmware and calibration. See the links in text above"
            )

    if panelCap is not None:
        if panelCap.panel_albedo() is not None:
            panel_reflectance_by_band = panelCap.panel_albedo()
        else:
            # RedEdge band_index order
            panel_reflectance_by_band = [0.67, 0.69, 0.68, 0.61, 0.67]
        panel_irradiance = panelCap.panel_irradiance(panel_reflectance_by_band)
        img_type = "reflectance"
        capture.plot_undistorted_reflectance(panel_irradiance)
    else:
        if False:  # capture.dls_present():
            img_type = 'reflectance'
            capture.plot_undistorted_reflectance(capture.dls_irradiance())
        else:
            img_type = "radiance"
            capture.plot_undistorted_radiance()

    # 相片對齊
    warp_mode = cv2.MOTION_HOMOGRAPHY
    warp_matrices = capture.get_warp_matrices()

    cropped_dimensions, edges = imageutils.find_crop_bounds(
        capture, warp_matrices)
    im_aligned = imageutils.aligned_capture(capture,
                                            warp_matrices,
                                            warp_mode,
                                            cropped_dimensions,
                                            None,
                                            img_type=img_type)

    print("warp_matrices={}".format(warp_matrices))

    rgb_band_indices = [2, 1, 0]

    # Create an empty normalized stack for viewing
    im_display = np.zeros(
        (im_aligned.shape[0], im_aligned.shape[1], capture.num_bands + 1),
        dtype=np.float32)

    # modify with these percentilse to adjust contrast
    im_min = np.percentile(im_aligned[:, :, 0:2].flatten(), 0.1)
    # for many images, 0.5 and 99.5 are good values
    im_max = np.percentile(im_aligned[:, :, 0:2].flatten(), 99.9)

    for i in range(0, im_aligned.shape[2]):
        if img_type == 'reflectance':
            # for reflectance images we maintain white-balance by applying the same display scaling to all bands
            im_display[:, :, i] = imageutils.normalize(im_aligned[:, :, i],
                                                       im_min, im_max)
        elif img_type == 'radiance':
            # for radiance images we do an auto white balance since we don't know the input light spectrum by
            # stretching each display band histogram to it's own min and max
            im_display[:, :, i] = imageutils.normalize(im_aligned[:, :, i])

    rgb = im_display[:, :, rgb_band_indices]

    # 合成出rgb圖
    # Create an enhanced version of the RGB render using an unsharp mask
    gaussian_rgb = cv2.GaussianBlur(rgb, (9, 9), 10.0)
    gaussian_rgb[gaussian_rgb < 0] = 0
    gaussian_rgb[gaussian_rgb > 1] = 1
    unsharp_rgb = cv2.addWeighted(rgb, 1.5, gaussian_rgb, -0.5, 0)
    unsharp_rgb[unsharp_rgb < 0] = 0
    unsharp_rgb[unsharp_rgb > 1] = 1

    # Apply a gamma correction to make the render appear closer to what our eyes would see
    gamma = 1.2
    gamma_corr_rgb = unsharp_rgb**(1.0 / gamma)
    plt.imshow(gamma_corr_rgb, aspect='equal')
    plt.axis('off')
    plt.show()
    # 匯出rgb圖
    imtype = 'png'  # or 'jpg'
    imageio.imwrite('rgb.' + imtype, (255 * gamma_corr_rgb).astype('uint8'))

    blue_band = capture.band_names_lower().index('blue')
    green_band = capture.band_names_lower().index('green')
    red_band = capture.band_names_lower().index('red')
    nir_band = capture.band_names_lower().index('nir')
    rededge_band = capture.band_names_lower().index('red edge')

    blue = im_aligned[:, :, blue_band]
    blue = blue.astype('float64')
    np.save('blue', blue)

    green = im_aligned[:, :, green_band]
    green = green.astype('float64')
    np.save('green', green)

    red = im_aligned[:, :, red_band]
    red = red.astype('float64')
    np.save('red', red)

    rededge = im_aligned[:, :, rededge_band]
    red = rededge.astype('float64')
    np.save('rededge', rededge)

    nir = im_aligned[:, :, nir_band]
    nir = nir.astype('float64')
    np.save('nir', nir)
示例#10
0
def align_template(imAl, mx, reflFolder, ref_ind=rf):

    
    warp_matrices, alignment_pairs = imageutils.align_capture(imAl,
                                                              ref_index=ref_ind, 
                                                              warp_mode=cv2.MOTION_HOMOGRAPHY,
                                                              max_iterations=mx)
    for x,mat in enumerate(warp_matrices):
        print("Band {}:\n{}".format(x,mat))

    # cropped_dimensions is of the form:
    # (first column with overlapping pixels present in all images, 
    #  first row with overlapping pixels present in all images, 
    #  number of columns with overlapping pixels in all images, 
    #  number of rows with overlapping pixels in all images   )
    dist_coeffs = []
    cam_mats = []
# create lists of the distortion coefficients and camera matricies
    for i,img in enumerate(imAl.images):
        dist_coeffs.append(img.cv2_distortion_coeff())
        cam_mats.append(img.cv2_camera_matrix())
        
    warp_mode = cv2.MOTION_HOMOGRAPHY #alignment_pairs[0]['warp_mode']
    match_index = alignment_pairs[0]['ref_index']
    
    cropped_dimensions, edges = imageutils.find_crop_bounds(imAl, 
                                                            warp_matrices,
                                                            warp_mode=cv2.MOTION_HOMOGRAPHY)
   # capture, warp_matrices, cv2.MOTION_HOMOGRAPHY, cropped_dimensions, None, img_type="reflectance",
    im_aligned = imageutils.aligned_capture(imAl, warp_matrices, warp_mode,
                                            cropped_dimensions, match_index,
                                            img_type="reflectance")
    
    im_display = np.zeros((im_aligned.shape[0],im_aligned.shape[1],5), dtype=np.float32 )
    
    for iM in range(0,im_aligned.shape[2]):
        im_display[:,:,iM] =  imageutils.normalize(im_aligned[:,:,iM])
        
    rgb = im_display[:,:,[2,1,0]] 
    cir = im_display[:,:,[3,2,1]] 
    grRE = im_display[:,:,[4,2,1]] 
    
    
    if args.plts == True:
        
        fig, axes = plt.subplots(1, 3, figsize=(16,16)) 
        plt.title("Red-Green-Blue Composite") 
        axes[0].imshow(rgb) 
        plt.title("Color Infrared (CIR) Composite") 
        axes[1].imshow(cir) 
        plt.title("Red edge-Green-Red (ReGR) Composite") 
        axes[2].imshow(grRE) 
        plt.show()
    
    prevList = [rgb, cir, grRE]
    nmList = ['rgb.jpg', 'cir.jpg', 'grRE.jpg']
    names = [os.path.join(reflFolder, pv) for pv in nmList]
    
    for ind, p in enumerate(prevList):
        img8 = bytescale(p)
        imageio.imwrite(names[ind], img8)
    
    return warp_matrices, alignment_pairs#, dist_coeffs, cam_mats, cropped_dimensions
示例#11
0
def main(img_dir, out_dir, alt_thresh, ncores, start_count, scaling,
         irradiance, subset, layer, resolution):
    # Create output dir it doesn't exist yet
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    # Load all images as imageset
    imgset = imageset.ImageSet.from_directory(img_dir)
    meta_list = imgset.as_nested_lists()
    # Make feature collection of image centers and write it to tmp file
    point_list = [capture_to_point(c) for c in imgset.captures]
    feature_list = [{
        'type': 'Feature',
        'properties': {},
        'geometry': mapping(x)
    } for x in point_list]
    fc = {'type': 'FeatureCollection', 'features': feature_list}

    ###########################
    #### Optionally cut a spatial subset of the images
    ##########################
    if subset == 'interactive':
        # Write feature collection to tmp file, to make it accessible to the flask app
        # without messing up with the session context
        fc_tmp_file = os.path.join(tempfile.gettempdir(), 'micamac_fc.geojson')
        with open(fc_tmp_file, 'w') as dst:
            json.dump(fc, dst)
        # Select spatial subset interactively (available as feature in POLYGONS[0])
        app.run(debug=False, host='0.0.0.0')
        # Check which images intersect with the user defined polygon (list of booleans)
        poly_shape = shape(POLYGONS[0]['geometry'])
        in_polygon = [x.intersects(poly_shape) for x in point_list]
        print('Centroid of drawn polygon: %s' % poly_shape.centroid.wkt)
    elif subset is None:
        in_polygon = [True for x in point_list]
    elif os.path.exists(subset):
        with fiona.open(subset, layer) as src:
            poly_shape = shape(src[0]['geometry'])
        in_polygon = [x.intersects(poly_shape) for x in point_list]
        print('Centroid of supplied polygon: %s' % poly_shape.centroid.wkt)
    else:
        raise ValueError(
            '--subset must be interactive, the path to an OGR file or left empty'
        )

    ##################################
    ### Threshold on altitude
    ##################################
    if alt_thresh == 'interactive':
        alt_arr = np.array([x[3] for x in meta_list[0]])
        n, bins, patches = plt.hist(alt_arr, 100)
        plt.xlabel('Altitude')
        plt.ylabel('Freq')
        plt.show()
        # Ask user for alt threshold
        alt_thresh = input('Enter altitude threshold:')
        alt_thresh = float(alt_thresh)
        above_alt = [x[3] > alt_thresh for x in meta_list[0]]
    elif isinstance(alt_thresh, float):
        above_alt = [x[3] > alt_thresh for x in meta_list[0]]
    else:
        raise ValueError(
            '--alt_thresh argument must be a float or interactive')

    # Combine both boolean lists (altitude and in_polygon)
    is_valid = [x and y for x, y in zip(above_alt, in_polygon)]

    #########################
    ### Optionally retrieve irradiance values
    #########################
    if irradiance == 'panel':
        # Trying first capture, then last if doesn't work
        try:
            panel_cap = imgset.captures[0]
            # Auto-detect panel, perform visual check, retrieve corresponding irradiance values
            if panel_cap.detect_panels() != 5:
                raise AssertionError('Panels could not be detected')
            panel_cap.plot_panels()
            # Visual check and ask for user confirmation
            panel_check = input("Are panels properly detected ? (y/n):")
            if panel_check != 'y':
                raise AssertionError(
                    'User input, unsuitable detected panels !')
        except Exception as e:
            print(
                "Failed to use pre flight panels; trying post flight panel capture"
            )
            panel_cap = imgset.captures[-1]
            # Auto-detect panel, perform visual check, retrieve corresponding irradiance values
            if panel_cap.detect_panels() != 5:
                raise AssertionError('Panels could not be detected')
            panel_cap.plot_panels()
            # Visual check and ask for user confirmation
            panel_check = input("Are panels properly detected ? (y/n):")
            if panel_check != 'y':
                raise AssertionError(
                    'User input, unsuitable detected panels !')
        # Retrieve irradiance values from panels reflectance
        img_type = 'reflectance'
        irradiance_list = panel_cap.panel_irradiance()
    elif irradiance == 'dls':
        img_type = 'reflectance'
        irradiance_list = None
    elif irradiance == 'sixs':
        # Pick the middle cature, and use it to model clear sky irradiance using 6s
        middle_c = imgset.captures[round(len(imgset.captures) / 2)]
        img_type = 'reflectance'
        irradiance_list = modeled_irradiance_from_capture(middle_c)
    elif irradiance is None:
        img_type = None
        irradiance_list = None
    else:
        raise ValueError(
            'Incorrect value for --reflectance, must be panel, dls or left empty'
        )

    #########################
    ### Alignment parameters
    #########################
    # Select an arbitrary image, find warping and croping parameters, apply to image,
    # assemble a rgb composite to perform visual check
    alignment_confirmed = False
    while not alignment_confirmed:
        warp_cap_ind = random.randint(1, len(imgset.captures) - 1)
        warp_cap = imgset.captures[warp_cap_ind]
        warp_matrices, alignment_pairs = imageutils.align_capture(
            warp_cap, max_iterations=100, multithreaded=True)
        print("Finished Aligning")
        # Retrieve cropping dimensions
        cropped_dimensions, edges = imageutils.find_crop_bounds(
            warp_cap, warp_matrices)
        warp_mode = alignment_pairs[0]['warp_mode']
        match_index = alignment_pairs[0]['ref_index']
        # Apply warping and cropping to the Capture used for finding the parameters to
        # later perform a visual check
        im_aligned = imageutils.aligned_capture(warp_cap,
                                                warp_matrices,
                                                warp_mode,
                                                cropped_dimensions,
                                                match_index,
                                                img_type='radiance')
        rgb_list = [
            imageutils.normalize(im_aligned[:, :, i]) for i in [0, 1, 2]
        ]
        plt.imshow(np.stack(rgb_list, axis=-1))
        plt.show()

        cir_list = [
            imageutils.normalize(im_aligned[:, :, i]) for i in [1, 3, 4]
        ]
        plt.imshow(np.stack(cir_list, axis=-1))
        plt.show()

        alignment_check = input("""
Are all bands properly aligned? (y/n)
    y: Bands are properly aligned, begin processing
    n: Bands are not properly aliged or image is not representative of the whole set, try another image
""")
        if alignment_check.lower() == 'y':
            alignment_confirmed = True
        else:
            print('Trying another image')

    ##################
    ### Processing
    #################
    # Build iterator of captures
    cap_tuple_iterator = zip(imgset.captures, is_valid,
                             range(start_count,
                                   len(is_valid) + start_count))
    process_kwargs = {
        'warp_matrices': warp_matrices,
        'warp_mode': warp_mode,
        'cropped_dimensions': cropped_dimensions,
        'match_index': match_index,
        'out_dir': out_dir,
        'irradiance_list': irradiance_list,
        'img_type': img_type,
        'resolution': resolution,
        'scaling': scaling
    }
    # Run process function with multiprocessing
    pool = mp.Pool(ncores)
    pool.map(functools.partial(capture_to_files, **process_kwargs),
             cap_tuple_iterator)
示例#12
0
                                                 cam_mats)

# ## Visualize Aligned Images
#
# Once the transformation has been found, it can be verified by composting the aligned images to check alignment. The image 'stack' containing all bands can also be exported to a multi-band TIFF file for viewing in extrernal software such as QGIS.  Useful componsites are a naturally colored RGB as well as color infrared, or CIR.

# In[ ]:

im_aligned = imageutils.aligned_capture(warp_matrices, alignment_pairs,
                                        cropped_dimensions)
# Create a normalized stack for viewing
im_display = np.zeros((im_aligned.shape[0], im_aligned.shape[1], 5),
                      dtype=np.float32)

for i in range(0, im_aligned.shape[2]):
    im_display[:, :, i] = imageutils.normalize(im_aligned[:, :, i])

rgb = im_display[:, :, [2, 1, 0]]
cir = im_display[:, :, [3, 2, 1]]
fig, axes = plt.subplots(1, 2, figsize=(16, 16))
plt.title("Red-Green-Blue Composite")
axes[0].imshow(rgb)
plt.title("Color Infrared (CIR) Composite")
axes[1].imshow(cir)
plt.show()

# ## Image Enhancement
#
# There are many techniques for image enhancement, but one which is commonly used to improve the visual sharpness of imagery is the unsharp mask.  Here we apply an unsharp mask to the RGB image to improve the visualization, and then apply a gamma curve to make the darkest areas brighter.

# In[ ]:
示例#13
0
def proc_imgs_comp(i, warp_matrices, bndFolders, panel_irradiance, rf,
                   warp_md):

    i.compute_reflectance(panel_irradiance)
    #i.plot_undistorted_reflectance(panel_irradiance)

    cropped_dimensions, edges = imageutils.find_crop_bounds(i, warp_matrices)

    im_aligned = imageutils.aligned_capture(i,
                                            warp_matrices,
                                            warp_md,
                                            cropped_dimensions,
                                            match_index=rf,
                                            img_type="reflectance")

    im_display = np.zeros((im_aligned.shape[0], im_aligned.shape[1], 5),
                          dtype=np.float32)

    for iM in range(0, im_aligned.shape[2]):
        im_display[:, :,
                   iM] = imageutils.normalize(im_aligned[:, :, iM]) * 32768

    rgb = im_display[:, :, [2, 1, 0]]
    #cir = im_display[:,:,[3,2,1]]
    RRENir = im_display[:, :, [4, 3, 2]]

    #    cir = im_display[:,:,[3,2,1]]
    #
    #    grRE = im_display[:,:,[4,2,1]]
    #
    #    imoot = [rgb, RRENir]

    del im_display

    imtags = ["RGB.tif", "RRENir.tif"]  #, "GRNir.tif", "GRRE.tif"]
    im = i.images[1]
    hd, nm = os.path.split(im.path[:-5])

    #cmdList = []

    def _writeim(image, folder, nametag, im):

        #for ind, k in enumerate(bndFolders):
        #img8 = bytescale(imoot[ind])
        #imgre = exposure.rescale_intensity(image,  out_range='uint16')

        #with warnings.catch_warnings():
        #warnings.simplefilter("ignore")
        img16 = np.uint16(np.round(image, decimals=0))
        del image
        outFile = os.path.join(folder, nm + nametag)
        #imageio.imwrite(outfile, img8)

        #imOut = Image.fromarray(img16)

        #imOut.save(outFile)
        imageio.imwrite(outFile, img16)

        del img16
        cmd = [
            "exiftool", "-tagsFromFile", im.path, "-file:all", "-iptc:all",
            "-exif:all", "-xmp", "-Composite:all", outFile,
            "-overwrite_original"
        ]
        call(cmd)

    _writeim(rgb, bndFolders[0], imtags[0], im)
    del rgb
    _writeim(RRENir, bndFolders[1], imtags[1], im)
    del RRENir  #,
def stackImages(FILE, imageNames, panelNames=None):
    import os, glob
    import micasense.capture as capture
    import cv2
    import numpy as np
    import matplotlib.pyplot as plt
    import micasense.imageutils as imageutils
    import micasense.plotutils as plotutils
    # Allow this code to align both radiance and reflectance images; bu excluding
    # a definition for panelNames above, radiance images will be used
    # For panel images, efforts will be made to automatically extract the panel information
    # but if the panel/firmware is before Altum 1.3.5, RedEdge 5.1.7 the panel reflectance
    # will need to be set in the panel_reflectance_by_band variable.
    # Note: radiance images will not be used to properly create NDVI/NDRE images below.
    if panelNames is not None:
        panelCap = capture.Capture.from_filelist(panelNames)
    else:
        panelCap = None

    capture = capture.Capture.from_filelist(imageNames)

    if panelCap is not None:
        if panelCap.panel_albedo() is not None:
            panel_reflectance_by_band = panelCap.panel_albedo()
        else:
            panel_reflectance_by_band = [0.67, 0.69, 0.68, 0.61,
                                         0.67]  #RedEdge band_index order
        panel_irradiance = panelCap.panel_irradiance(panel_reflectance_by_band)
        img_type = "reflectance"
        capture.plot_undistorted_reflectance(panel_irradiance)
    else:
        if capture.dls_present():
            img_type = 'reflectance'
            #capture.plot_undistorted_reflectance(capture.dls_irradiance())
        else:
            img_type = "radiance"
            #capture.plot_undistorted_radiance()

    ## Alignment settings
    match_index = 1  # Index of the band
    max_alignment_iterations = 10
    warp_mode = cv2.MOTION_HOMOGRAPHY  # MOTION_HOMOGRAPHY or MOTION_AFFINE. For Altum images only use HOMOGRAPHY
    pyramid_levels = 0  # for images with RigRelatives, setting this to 0 or 1 may improve alignment

    print(
        "Aligning images. Depending on settings this can take from a few seconds to many minutes"
    )
    # Can potentially increase max_iterations for better results, but longer runtimes
    warp_matrices, alignment_pairs = imageutils.align_capture(
        capture,
        ref_index=match_index,
        max_iterations=max_alignment_iterations,
        warp_mode=warp_mode,
        pyramid_levels=pyramid_levels)
    if warp_matrices == -1:
        return -1
    print("Finished Aligning, warp matrices={}".format(warp_matrices))

    cropped_dimensions, edges = imageutils.find_crop_bounds(
        capture, warp_matrices, warp_mode=warp_mode)
    im_aligned = imageutils.aligned_capture(capture,
                                            warp_matrices,
                                            warp_mode,
                                            cropped_dimensions,
                                            match_index,
                                            img_type=img_type)

    # Create a normalized stack for viewing
    im_display = np.zeros(
        (im_aligned.shape[0], im_aligned.shape[1], im_aligned.shape[2]),
        dtype=np.float32)

    from osgeo import gdal, gdal_array
    rows, cols, bands = im_display.shape
    driver = gdal.GetDriverByName('GTiff')
    filename = FILE + "stacked"  #blue,green,red,nir,redEdge
    filename = os.path.join(directory, filename)
    outRaster = driver.Create(filename + ".tiff", cols, rows,
                              im_aligned.shape[2], gdal.GDT_UInt16)

    normalize = (img_type == 'radiance'
                 )  # normalize radiance images to fit with in UInt16

    # Output a 'stack' in the same band order as RedEdge/Alutm
    # Blue,Green,Red,NIR,RedEdge[,Thermal]
    # reflectance stacks are output with 32768=100% reflectance to provide some overhead for specular reflections
    # radiance stacks are output with 65535=100% radiance to provide some overhead for specular reflections

    # NOTE: NIR and RedEdge are not in wavelength order!

    multispec_min = np.min(im_aligned[:, :, 1:5])
    multispec_max = np.max(im_aligned[:, :, 1:5])

    for i in range(0, 5):
        outband = outRaster.GetRasterBand(i + 1)
        if normalize:
            outdata = imageutils.normalize(im_aligned[:, :, i], multispec_min,
                                           multispec_max)
        else:
            outdata = im_aligned[:, :, i]
            outdata[outdata < 0] = 0
            outdata[outdata > 2] = 2

        outdata = outdata * 32767
        outdata[outdata < 0] = 0
        outdata[outdata > 65535] = 65535
        outband.WriteArray(outdata)
        outband.FlushCache()

    if im_aligned.shape[2] == 6:
        outband = outRaster.GetRasterBand(6)
        outdata = im_aligned[:, :,
                             5] * 100  # scale to centi-C to fit into uint16
        outdata[outdata < 0] = 0
        outdata[outdata > 65535] = 65535
        outband.WriteArray(outdata)
        outband.FlushCache()
    outRaster = None
    return 1