示例#1
0
    def __init__(self, *args, **kwargs):
        super(LfpDevignetter, self).__init__(*args, **kwargs)

        self._wht_img = np.ones(
            self._lfp_img.shape) if self._wht_img is None else self._wht_img

        # config for decision making whether division by raw image or fit values
        self.noise_lev = kwargs['noise_lev'] if 'noise_lev' in kwargs else None
        self.noise_th = 0.05
        self.patch_mode = False

        # add noise
        self.test = False
        if self.test:
            self._wht_img += np.random.normal(0, .15, self._wht_img.shape)

        # white balance
        if len(self._wht_img.shape) == 3:
            # balance RGB channels in white image
            self._wht_img = rgb2gry(
                self._wht_img
            ) if self._wht_img.shape[2] == 3 else self._wht_img

        # check for same dimensionality
        self._wht_img = self._wht_img if len(self._wht_img.shape) == len(
            self._lfp_img.shape) else rgb2gry(self._wht_img)
示例#2
0
    def export_vp_stack(self, type='png', downscale=None):
        """ write viewpoint images stitched to together in a single image """

        # print status
        self.sta.status_msg('Write viewpoint image stack',
                            self.cfg.params[self.cfg.opt_prnt])
        self.sta.progress(None, self.cfg.params[self.cfg.opt_prnt])

        # downscale image
        downscale = True if downscale is None else downscale
        views_stacked_img = misc.img_resize(self.views_stacked_img.copy(), 1 / self._M) \
            if downscale else self.views_stacked_img.copy()

        # normalization
        p_lo = np.percentile(rgb2gry(self.central_view), 0.05)
        p_hi = np.percentile(rgb2gry(self.central_view), 99.995)
        views_stacked_img = misc.Normalizer(views_stacked_img,
                                            min=p_lo,
                                            max=p_hi).uint8_norm()

        # export all viewpoints in single image
        views_stacked_path = os.path.join(
            self.cfg.exp_path, 'views_stacked_img_' + str(self._M) + 'px')
        misc.save_img_file(views_stacked_img,
                           file_path=views_stacked_path,
                           file_type=type)

        self.sta.progress(100, self.cfg.params[self.cfg.opt_prnt])

        return True
示例#3
0
def blur_metric(img_tile):
    """ img_tile : cropped image """

    img = rgb2gry(img_tile)[..., 0] if len(img_tile.shape) == 3 else img_tile
    y, x = img.shape

    magnitude = np.abs(fftpack.fft2(img))
    magnitudeCrop = magnitude[:int(np.ceil(y / 2)), :int(np.ceil(x / 2))]
    #figure, imshow(magnitude,[0 1000]), colormap gray # magnitude

    F2 = fftpack.fftshift(magnitude)
    psd2D = np.abs(F2)**2

    #plt.figure(1)
    #plt.imshow(psd2D/np.percentile(psd2D, 95))
    #plt.show()

    # total energy
    TE = sum(sum(magnitudeCrop**2))

    # high frequency energy
    freq_bounds = (int(np.ceil(y / 500)), int(np.ceil(x / 500)))
    HE = TE - sum(sum(magnitudeCrop[:freq_bounds[0], :freq_bounds[1]]**2))

    # energy ratio (Sharpness)
    S = HE / TE

    return S
示例#4
0
    def __init__(self, img, centroids, cfg=None, sta=None):

        # input variables
        self._img = Normalizer(rgb2gry(img.copy())).uint8_norm()
        self._centroids = np.asarray(centroids)
        self.cfg = cfg if cfg is not None else PlenopticamConfig()
        self.sta = sta if sta is not None else PlenopticamStatus()
示例#5
0
def michelson_contrast(img_tile):
    """ https://colorusage.arc.nasa.gov/luminance_cont.php """

    #lum_tile = misc.yuv_conv(img_tile)[..., 0]
    lum_tile = rgb2gry(img_tile)[..., 0]

    c_m = (lum_tile.max() - lum_tile.min()) / (lum_tile.max() + lum_tile.min())

    return c_m
示例#6
0
def color_channel_adjustment(
        img_l: np.ndarray = None,
        img_r: np.ndarray = None) -> (np.ndarray, np.ndarray):
    """
    Validate channels of stereo image pairs match and reduce to monochromatic channel information

    :param img_l: left image
    :param img_r: right image
    :return: img_l, img_r of H x W x 1 size
    """

    if len(img_l.shape) == 3 and len(img_r.shape) == 3:
        img_l, img_r = rgb2gry(img_l)[..., 0], rgb2gry(img_r)[..., 0]
    elif len(img_l.shape) == 2 and len(img_r.shape) == 2:
        pass
    else:
        raise Exception('Image color channel mismatch')

    return img_l, img_r
示例#7
0
    def auto_hist_align(img, ref_img, opt=None):

        if opt:
            p_lo, p_hi = (0.005, 99.9)#(0.001, 99.999)
            min_perc = np.percentile(rgb2gry(ref_img), p_lo)
            max_perc = np.percentile(ref_img, p_hi)
        else:
            p_lo, p_hi = (0.5, 99.9)
            min_perc = np.percentile(ref_img, p_lo)
            max_perc = np.percentile(ref_img, p_hi)

        img = misc.Normalizer(img, min=min_perc, max=max_perc).type_norm()

        return img
示例#8
0
    def estimate_gamma(self, img: np.ndarray = None) -> float:
        """ set gamma value"""

        img = self._img if img is None else np.asarray(img, dtype='float64')

        # extract luminance
        lum = rgb2gry(img)

        # normalize
        lum /= lum.max()

        self._gam = 1 / np.log(np.mean(lum / lum.max())) / np.log(.5)

        return self._gam
示例#9
0
    def estimate_gamma(self, img: np.ndarray = None) -> float:

        img = self._img if img is None else np.asarray(img, dtype='float64')

        # extract luminance
        lum = rgb2gry(img)

        # normalize
        lum /= lum.max()

        #self._gam = 1/np.log(img.mean())/np.log((img.max()-img.min())/2)
        #self._gam = -.3/np.log10(np.mean(img/img.max()))
        self._gam = 1 / np.log(np.mean(lum / lum.max())) / np.log(.5)

        return self._gam
示例#10
0
    def _estimate_noise_level(self):
        """ estimate white image noise level """

        # print status
        self.sta.status_msg('Estimate white image noise level',
                            self.cfg.params[self.cfg.opt_prnt])
        self.sta.progress(None, self.cfg.params[self.cfg.opt_prnt])

        M = np.mean(self.cfg.calibs[self.cfg.ptc_mean])
        lp_kernel = misc.create_gauss_kernel(length=M)
        if len(self._wht_img.shape) == 3:
            bw_img = rgb2gry(self._wht_img)[
                ..., 0] if self._wht_img.shape[2] == 3 else self._wht_img[...,
                                                                          0]
        else:
            bw_img = self._wht_img
        flt_img = convolve2d(bw_img, lp_kernel, 'same')

        self.sta.progress(100, self.cfg.params[self.cfg.opt_prnt])

        return np.std(bw_img - flt_img)
示例#11
0
def blur_metric(img_tile):
    """ img_tile : cropped image """

    img = rgb2gry(img_tile)[..., 0] if len(img_tile.shape) == 3 else img_tile
    y, x = img.shape

    magnitude = abs(np.fft.fft2(img))
    magnitudeCrop = magnitude[:int(np.ceil(y/2)), :int(np.ceil(x/2))]
    #figure, imshow(magnitude,[0 1000]), colormap gray # magnitude

    # total energy
    TE = sum(sum(magnitudeCrop**2))

    # high frequency energy
    freq_bounds = (y//100, x//100)
    HE = TE - sum(sum(magnitudeCrop[:freq_bounds[0], :freq_bounds[1]]**2))

    # energy ratio (Sharpness)
    S = HE/TE

    return S
示例#12
0
    def fit_patch(self, patch):
        """ compute polynomial coefficients and intensity map of 2-D image via least-squares regression """

        x = np.linspace(0, 1, patch.shape[1])
        y = np.linspace(0, 1, patch.shape[0])
        X, Y = np.meshgrid(x, y, copy=False)

        X = X.flatten()
        Y = Y.flatten()
        b = rgb2gry(patch)[..., 0].flatten() if len(
            patch.shape) == 3 else patch.flatten()

        A = self.compose_vandermonde_2d(X, Y, deg=3)

        # solve for a least squares estimate via pseudo inverse and coefficients in b
        coeffs = np.dot(np.linalg.pinv(A), b)

        # create weighting window
        weight_map = np.dot(A, coeffs).reshape(patch.shape[1], patch.shape[0])
        weight_map /= weight_map.max()

        return coeffs, weight_map
示例#13
0
fname = './c'
cfg.params[cfg.cal_path] = fname + '.png'
cfg.params[cfg.cal_meth] = constants.CALI_METH[2]  #'peak'   #
wht_img = load_img_file(cfg.params[cfg.cal_path])
crop = True

# load ground truth (remove outlying centers)
spots_grnd_trth = np.loadtxt(fname + '.txt')
spots_grnd_trth = spots_grnd_trth[spots_grnd_trth[:, 1] > 0]
spots_grnd_trth = spots_grnd_trth[spots_grnd_trth[:, 0] > 0]
spots_grnd_trth = spots_grnd_trth[spots_grnd_trth[:, 1] < wht_img.shape[1]]
spots_grnd_trth = spots_grnd_trth[spots_grnd_trth[:, 0] < wht_img.shape[0]]

# ensure white image is monochromatic
if len(wht_img.shape) == 3:
    wht_img = rgb2gry(wht_img)[..., 0] if wht_img.shape[-1] == 3 else wht_img

# estimate micro image diameter
obj = PitchEstimator(wht_img, cfg, sta)
obj.main()
M = obj.M
del obj
print("Estimated pitch size: %s " % M)

# compute all centroids of micro images
obj = CentroidExtractor(wht_img, cfg, sta, M)
obj.main()
centroids = obj.centroids
peak_img = obj.peak_img
del obj
示例#14
0
def semi_global_matching(img_l: np.ndarray = None,
                         img_r: np.ndarray = None,
                         disp_max: int = 64,
                         disp_min: int = 0,
                         p1: float = 10,
                         p2: float = 120,
                         feat_method: str = 'census',
                         dsim_method: str = 'xor',
                         size_k: int = 3,
                         blur_opt: bool = False,
                         medi_opt: bool = False,
                         *args,
                         **kwargs) -> (np.ndarray, np.ndarray):
    """
    Semi-global matching variant covering feature extraction, dissimilarity measure and cost aggregation.

    :param img_l: left image
    :param img_r: right image
    :param disp_max: maximum disparity
    :param disp_min: minimum disparity
    :param p1: minor penalty for cost aggregation
    :param p2: major penalty for cost aggregation
    :param feat_method: feature extraction method (only supports 'census' or None)
    :param dsim_method: dissimilarity measure (only supports 'xor' or 'abs_diff')
    :param size_k: kernel width for filter operations
    :param blur_opt: flag for Gaussian blur usage
    :param medi_opt: flag for Median filter usage
    :return: tuple of two numpy arrays for left and right disparity maps
    """

    # gray scale conversion
    gray_l, gray_r = rgb2gry(img_l)[..., 0], rgb2gry(img_r)[..., 0]
    gray_l, gray_r = Normalizer(gray_l).uint16_norm(), Normalizer(
        gray_r).uint16_norm()

    # remove high frequency noise
    if blur_opt and size_k > 0:
        print('\nBlur computation...')
        gray_l, gray_r = gaussian_filter(gray_l, size_k), gaussian_filter(
            gray_r, size_k)

    print('\nFeature computation...')
    gray_l, gray_r = compute_census(
        gray_l, gray_r, size_k) if feat_method == 'census' else (gray_l,
                                                                 gray_r)

    print('\nCost computation...')
    cost_l, cost_r = compute_costs(gray_l,
                                   gray_r,
                                   disp_max,
                                   disp_min,
                                   offset=size_k,
                                   method=dsim_method)

    print('\nLeft aggregation computation...')
    cost_l = aggregate_costs(cost_l, p1, p2)
    print('\nRight aggregation computation...')
    cost_r = aggregate_costs(cost_r, p1, p2)

    disp_l = Normalizer(np.argmin(cost_l, axis=2)).uint8_norm()
    disp_r = Normalizer(np.argmin(cost_r, axis=2)).uint8_norm()

    if medi_opt:
        print('\nMedian filter...')
        disp_l = median_filter(disp_l, (size_k, size_k))
        disp_r = median_filter(disp_r, (size_k, size_k))

    print('\nFinished')

    return disp_l, disp_r
示例#15
0
    def main(self):

        if self._wht_img is None:
            self.sta.status_msg(msg='White image file not present',
                                opt=self.cfg.params[self.cfg.opt_prnt])
            self.sta.error = True

        # convert Bayer to RGB representation
        if len(self._wht_img.shape) == 2 and 'bay' in self.cfg.lfpimg:
            # perform color filter array management and obtain rgb image
            cfa_obj = CfaProcessor(bay_img=self._wht_img,
                                   cfg=self.cfg,
                                   sta=self.sta)
            cfa_obj.bay2rgb()
            self._wht_img = cfa_obj.rgb_img
            del cfa_obj

        # ensure white image is monochromatic
        if len(self._wht_img.shape) == 3:
            self._wht_img = rgb2gry(self._wht_img)[
                ..., 0] if self._wht_img.shape[-1] == 3 else self._wht_img

        # estimate micro image diameter
        obj = PitchEstimator(self._wht_img, self.cfg, self.sta)
        obj.main()
        self._M = obj.M if not self._M else self._M
        del obj

        # compute all centroids of micro images
        obj = CentroidExtractor(self._wht_img,
                                self.cfg,
                                self.sta,
                                self._M,
                                method='area')
        obj.main()
        centroids = obj.centroids
        del obj

        # write micro image center image to hard drive if debug option is set
        if self.cfg.params[self.cfg.opt_dbug]:
            draw_obj = CentroidDrawer(self._wht_img, centroids, self.cfg,
                                      self.sta)
            draw_obj.write_centroids_img(fn='wht_img+mics_unsorted.png')
            del draw_obj

        # reorder MICs and assign indices based on the detected MLA pattern
        obj = CentroidSorter(centroids, self.cfg, self.sta)
        obj.main()
        mic_list, pattern, pitch = obj.mic_list, obj.pattern, obj.pitch
        del obj

        # fit grid of MICs using least-squares method to obtain accurate MICs from line intersections
        if not self.sta.interrupt and None:
            from plenopticam.lfp_calibrator import GridFitter
            self.cfg.calibs[self.cfg.pat_type] = pattern
            obj = GridFitter(coords_list=mic_list, cfg=self.cfg, sta=self.sta)
            obj.main()
            mic_list = obj.grid_fit
            del obj

        # save calibration metadata
        self.sta.status_msg('Save calibration data',
                            opt=self.cfg.params[self.cfg.opt_prnt])
        self.sta.progress(None, opt=self.cfg.params[self.cfg.opt_prnt])
        try:
            self.cfg.save_cal_data(mic_list=mic_list,
                                   pat_type=pattern,
                                   ptc_mean=pitch)
            self.sta.progress(100, opt=self.cfg.params[self.cfg.opt_prnt])
        except PermissionError:
            self.sta.status_msg('Could not save calibration data',
                                opt=self.cfg.params[self.cfg.opt_prnt])

        # write image to hard drive (only if debug option is set)
        if self.cfg.params[self.cfg.opt_dbug]:
            draw_obj = CentroidDrawer(self._wht_img, mic_list, self.cfg,
                                      self.sta)
            draw_obj.write_centroids_img(fn='wht_img+mics_sorted.png')
            del draw_obj

        return True
示例#16
0
        # perform centroid calibration
        cal_obj = lfp_calibrator.LfpCalibrator(wht_img, cfg)
        cal_obj.main()
        cfg = cal_obj.cfg
        del cal_obj
    else:
        # convert Bayer to RGB representation
        if len(wht_img.shape) == 2 and 'bay' in cfg.lfpimg:
            # perform color filter array management and obtain rgb image
            cfa_obj = CfaProcessor(bay_img=wht_img, cfg=cfg)
            cfa_obj.bay2rgb()
            wht_img = cfa_obj.rgb_img
            del cfa_obj

    # ensure white image is monochromatic
    wht_img = rgb2gry(wht_img)[..., 0] if len(wht_img.shape) is 3 else wht_img

    # load calibration data
    cfg.load_cal_data()

    if cfg.params[cfg.opt_rota]:
        # de-rotate centroids
        obj = LfpRotator(wht_img, cfg.calibs[cfg.mic_list], rad=None, cfg=cfg)
        obj.main()
        wht_rot, centroids_rot = obj.lfp_img, obj.centroids
        del obj

    plot_centroids(wht_img,
                   centroids=cfg.calibs[cfg.mic_list],
                   fn='_',
                   marker_color='red')