Example #1
0
    def lum_eq(self):

        # RGB/YUV color conversion
        self._ref_img = misc.yuv_conv(self._ref_img)

        # create cumulative distribution function of reference image
        self.set_histeq_params()

        # histogram mapping using cumulative distribution function
        self.correct_histeq()

        # YUV/RGB color conversion
        return misc.yuv_conv(self._ref_img, inverse=True)
Example #2
0
    def apply_stretch_lum(self):
        ''' contrast and brightness rectification to luminance channel of provided RGB image '''

        # color model conversion
        self.vp_img_arr = misc.yuv_conv(self.vp_img_arr) if len(
            self.vp_img_arr.shape) > 4 else self.vp_img_arr

        # apply histogram stretching to luminance channel only
        self.apply_stretch(ch=0)

        # color model conversion
        self.vp_img_arr = misc.yuv_conv(self.vp_img_arr, inverse=True)

        return True
Example #3
0
    def uv_eq(self):

        # RGB/YUV color conversion
        self._ref_img = misc.yuv_conv(self._ref_img)

        for i in range(1, self._ref_img.shape[-1]):

            # create cumulative distribution function of reference image
            self.set_histeq_params(ch=i)

            # histogram mapping using cumulative distribution function
            self.correct_histeq(ch=i)

        # YUV/RGB color conversion
        return misc.yuv_conv(self._ref_img, inverse=True)
Example #4
0
    def correct_luma_outliers(self, img, n=2, perc=.2):

        # luma channel conversion
        luma = misc.yuv_conv(img.copy())[..., 0]

        for j in range(n, luma.shape[0] - n):
            for i in range(n, luma.shape[1] - n):
                win = luma[j - n:j + n + 1, i - n:i + n + 1]

                # hot pixel detection
                num_hi = len(win[win > luma[j, i] * (1 - perc)])

                # dead pixel detection
                num_lo = len(win[win < luma[j, i] * (1 + perc)])

                if num_hi < win.size / 5 or num_lo < win.size / 5:
                    # replace outlier by average of all directly adjacent pixels
                    img[j,
                        i, :] = (sum(sum(img[j - 1:j + 2, i - 1:i + 2, :])) -
                                 img[j, i, :]) / 8.

                # check interrupt status
                if self.sta.interrupt:
                    return False

        return img
    def fit_patch(self, patch, th=None):

        self._th = th if th is not None else self._th

        x = np.linspace(0, 1, patch.shape[1])
        y = np.linspace(0, 1, patch.shape[0])
        X, Y = np.meshgrid(x, y, copy=False)

        X = X.flatten()
        Y = Y.flatten()
        b = misc.yuv_conv(patch)[..., 0].flatten()

        A = self.compose_vandermonde(X, Y, deg=3)

        # Solve for a least squares estimate via pseudo inverse and coefficients in beta
        coeffs = np.dot(np.linalg.pinv(A), b)

        # create weighting window
        weight_win = np.dot(A, coeffs).reshape(patch.shape[1], patch.shape[0])[..., np.newaxis]
        weight_win /= weight_win.max()

        # thresholding (to prevent too large numbers in corrected image)
        weight_win[weight_win < self._th] = self._th

        return coeffs, weight_win
Example #6
0
def correct_luma_outliers(img, n=2, perc=.2, sta=None):

    # status init
    sta = sta if sta is not None else misc.PlenopticamStatus()
    sta.status_msg('Hot pixel removal', True)

    # luma channel conversion
    luma = misc.yuv_conv(img.copy())[..., 0]

    for i in range(n, luma.shape[0] - n):
        for j in range(n, luma.shape[1] - n):
            win = luma[i - n:i + n + 1, j - n:j + n + 1]

            # hot pixel detection
            num_hi = len(win[win > luma[i, j] * (1 - perc)])

            # dead pixel detection
            num_lo = len(win[win < luma[i, j] * (1 + perc)])

            if num_hi < win.size / 5 or num_lo < win.size / 5:
                # replace outlier by average of all directly adjacent pixels
                img[i, j, :] = (sum(sum(img[i - 1:i + 2, j - 1:j + 2, :])) -
                                img[i, j, :]) / 8.

            # progress update
            sta.progress((i * luma.shape[1] + (j + 1)) / luma.size * 100, True)

    return img
Example #7
0
def michelson_contrast(img_tile):
    ''' https://colorusage.arc.nasa.gov/luminance_cont.php '''

    lum_tile = misc.yuv_conv(img_tile)[..., 0]

    c_m = (lum_tile.max() - lum_tile.min()) / (lum_tile.max() + lum_tile.min())

    return c_m
Example #8
0
def correct_contrast(img_arr, contrast=1, brightness=0, ch=0):

    # color model conversion
    img_yuv = misc.yuv_conv(img_arr)

    # convert to float
    f = img_yuv[..., ch].astype(np.float32)

    # perform auto contrast (by default: "value" channel only)
    img_yuv[..., ch] = contrast * f + brightness

    # clip to input extrema to remove contrast outliers
    img_yuv[..., ch][img_yuv[..., ch] < img_arr.min()] = img_arr.min()
    img_yuv[..., ch][img_yuv[..., ch] > img_arr.max()] = img_arr.max()

    # color model conversion
    img = misc.yuv_conv(img_yuv, inverse=True)

    return img
Example #9
0
    def con_bal(self):

        # estimate contrast and brightness via least-squares method
        self.set_stretch(ref_ch=misc.yuv_conv(self.central_view)[..., 0])

        self.proc_vp_arr(misc.yuv_conv)
        self.proc_vp_arr(self.apply_stretch, ch=0, msg='Contrast balance')
        self.proc_vp_arr(misc.yuv_conv, inverse=True)

        # status update
        self.sta.progress(100, opt=self.cfg.params[self.cfg.opt_prnt])
Example #10
0
def robust_awb(img, t=0.3, max_iter=1000):
    ''' inspired by Jun-yan Huo et al. and http://web.stanford.edu/~sujason/ColorBalancing/Code/robustAWB.m '''

    img = misc.type_norm(img, dtype='float16', lim_min=0, lim_max=1.0)
    ref_pixel = img[0, 0, :].copy()

    u = .01  # gain step size
    a = .8  # double step threshold
    b = .001  # convergence threshold

    gains_adj = np.array([1., 1., 1.])

    #sRGBtoXYZ = [[0.4124564, 0.3575761, 0.1804375], [0.2126729, 0.7151522, 0.0721750], [0.0193339, 0.1191920, 0.9503041]]
    sRGBtoXYZ = [[0.4124564, 0.2126729, 0.0193339],
                 [0.3575761, 0.7151522, 0.0193339],
                 [0.1804375, 0.0721750, 0.9503041]]

    for i in range(max_iter):
        img_yuv = misc.yuv_conv(img)
        f = (abs(img_yuv[..., 1]) + abs(img_yuv[..., 2])) / img_yuv[..., 0]
        grays = np.zeros(img_yuv.shape)
        grays[f < t] = img_yuv[f < t]
        if np.sum(f < t) == 0:
            print('No valid gray pixels found.')
            break

        u_bar = np.mean(grays[..., 1])  #estimate
        v_bar = np.mean(grays[..., 2])  #estimate

        #rgb_est = misc.yuv_conv(np.array([100, u_bar, v_bar]), inverse=True)    # convert average gray from YUV to RGB

        # U > V: blue needs adjustment otherwise red is treated
        err, ch = (u_bar, 2) if abs(u_bar) > abs(v_bar) else (v_bar, 0)

        if abs(err) >= a:
            delta = 2 * np.sign(
                err) * u  # accelerate gain adjustment if far off
        elif abs(err) < b:  #converged
            delta = 0
            print(('Converged. U_bar and V_bar < {0} in magnitude.').format(
                str(b)))
            break
        else:
            delta = err * u

        gains_adj[ch] -= delta  # negative fdbk loop

        img = np.dot(img, np.diag(gains_adj))

    gains = img[0, 0, :] / ref_pixel

    return gains
Example #11
0
    def robust_awb(self, t=0.3, max_iter=1000):
        ''' inspired by Jun-yan Huo et al. and http://web.stanford.edu/~sujason/ColorBalancing/Code/robustAWB.m '''

        img = Normalizer(self.central_view).type_norm(dtype='float16',
                                                      lim_min=0,
                                                      lim_max=1.0)
        ref_pixel = img[0, 0, :].copy()

        u = .01  # gain step size
        a = .8  # double step threshold
        b = .001  # convergence threshold

        gains_adj = np.array([1., 1., 1.])

        for i in range(max_iter):
            img_yuv = misc.yuv_conv(img)
            f = (abs(img_yuv[..., 1]) + abs(img_yuv[..., 2])) / img_yuv[..., 0]
            grays = np.zeros(img_yuv.shape)
            grays[f < t] = img_yuv[f < t]
            if np.sum(f < t) == 0:
                self.sta.status_msg('No valid gray pixels found.',
                                    self.cfg.params[self.cfg.opt_prnt])
                break

            u_bar = np.mean(grays[..., 1])  # estimate
            v_bar = np.mean(grays[..., 2])  # estimate

            # rgb_est = misc.yuv_conv(np.array([100, u_bar, v_bar]), inverse=True)    # convert average gray from YUV to RGB

            # U > V: blue needs adjustment otherwise red is treated
            err, ch = (u_bar, 2) if abs(u_bar) > abs(v_bar) else (v_bar, 0)

            if abs(err) >= a:
                delta = 2 * np.sign(
                    err) * u  # accelerate gain adjustment if far off
            elif abs(err) < b:  # converged when u_bar and v_bar < b
                # delta = 0
                #self.sta.status_msg('AWB convergence reached', self.cfg.params[self.cfg.opt_prnt])
                break
            else:
                delta = err * u

            # negative feedback loop
            gains_adj[ch] -= delta

            img = np.dot(img, np.diag(gains_adj))

        # take gains only if result is obtained by convergence
        if i != max_iter - 1:
            self._gains = img[0, 0, :] / ref_pixel

        return True
Example #12
0
def auto_contrast(img_arr, p_lo=0.001, p_hi=0.999, ch=0):
    ''' according to Adi Shavit on https://stackoverflow.com/questions/9744255/instagram-lux-effect/9761841#9761841 '''

    # estimate contrast und brightness parameters (by default: achromatic "luma" channel only)
    val_lim = 2**16 - 1
    img_yuv = misc.yuv_conv(img_arr)
    h = np.histogram(img_yuv[..., ch], bins=np.arange(val_lim))[0]
    H = np.cumsum(h) / float(np.sum(h))
    try:
        px_lo = find_x_given_y(p_lo, np.arange(val_lim), H)
        px_hi = find_x_given_y(p_hi, np.arange(val_lim), H)
    except:
        px_lo = 0
        px_hi = 1
    A = np.array([[px_lo, 1], [px_hi, 1]])
    b = np.array([0, val_lim])
    contrast, brightness = np.dot(np.linalg.inv(A), b)

    return contrast, brightness
Example #13
0
    def set_stretch_lum(self):

        # use luminance channel for parameter analysis
        ref_img = misc.yuv_conv(self.central_view)
        self.set_stretch(ref_ch=ref_img[..., 0])