def refined_t(normI,w=15):    
    tmin=0.2
    r=40
    eps=1e-3
    t = transmission_map(normI,w)
    refinedt_blue = np.maximum(t[:,:,0], tmin)
    refinedt_green = np.maximum(t[:,:,1], tmin)
    refinedt_blue = guided_filter(normI, refinedt_blue, r, eps) 
    refinedt_green = guided_filter(normI, refinedt_green, r, eps) 
    return refinedt_blue, refinedt_green
Esempio n. 2
0
def usemodel(dehazenet, hazy_image):
   
    patch_size = 16
    p = 0.001
    L = 256
    
    height = hazy_image.shape[0]
    width = hazy_image.shape[1]
    channel = hazy_image.shape[2]
    
    if height % patch_size != 0:
        height = height // patch_size * patch_size
    if width % patch_size != 0:
        width = width // patch_size * patch_size
        
    hazy_image = cv2.resize(hazy_image, (width, height), interpolation = cv2.INTER_AREA)
    trans_map = np.zeros((height, width))
    
    for i in range(height // patch_size):
        for j in range(width // patch_size):
            hazy_patch = hazy_image[(i * 16) : (16 * i + 16), (j * 16) : (j * 16 + 16), :]
            hazy_input = np.reshape(hazy_patch, (1, patch_size, patch_size, channel)) / 255.0
            trans = dehazenet.predict(hazy_input)
            trans_map[(i * 16) : (16 * i + 16), (j * 16) : (j * 16 + 16)] = trans
    
    norm_hazy_image = (hazy_image - hazy_image.min()) / (hazy_image.max() - hazy_image.min())
    refined_trans_map = guided_filter(norm_hazy_image, trans_map)
    
    Airlight = get_airlight(hazy_image, refined_trans_map, p)
    clear_image = get_radiance(hazy_image, Airlight, refined_trans_map, L)
    
    return clear_image
Esempio n. 3
0
def smooth_tmap(img, tmap, constants):
    epsilon = constants.EPSILON
    radius = constants.GUIDED_RADIUS
    normI = (img - img.min()) / (img.max() - img.min())
    smooth_tmap = guided_filter(normI.astype(np.float32),
                                tmap.astype(np.float32), radius, epsilon)
    smooth_tmap = cv2.bilateralFilter(smooth_tmap.astype(np.float32), 0, 0.1,
                                      5)
    return smooth_tmap
def dehaze_raw(I,
               tmin=0.2,
               Amax=220,
               w=15,
               p=0.0001,
               omega=0.95,
               guided=False,
               r=40,
               eps=1e-3):
    """
    Get the dark channel prior, atmosphere light, transmission rate and refined transmission rate for raw RGB image data.

    Parameters:

    I: M * N * 3 data as numpy array for the hazy Image
    tmin: threshold of transmission rate
    Amax: threshold of atmosphere light
    w: window size of the dark channel prior
    p: percentage of pixels for estimating the atmosphere light
    omega: bias for the transmission estimate
    
    guided: whether to use the guided filter to fine the image 
    r: the radius of the guidance
    eps: epsilon for the guided filter

    Return:

    (Idark, A, rawt, refinedt) if guided = False, then rawt == refinedt
    """

    m, n, _ = I.shape
    Idark = get_dark_channel(I, w)

    A = get_atmosphere(I, Idark, p)
    A = np.minimum(A, Amax)
    print 'Atmosphere: ', A

    rawt = get_transmission(I, A, Idark, omega, w)
    print 'raw transmission rate',
    print 'between [%.4f, %.4f]' % (rawt.min(), rawt.max())

    rawt = refinedt = np.maximum(rawt, tmin)

    if guided:
        normI = (I - I.min()) / (I.max() - I.min())
        refinedt = guided_filter(normI, refinedt, r, eps)

    print 'refined transmission rate',
    print 'between [%.4f, %.4f]' % (refinedt.min(), refinedt.max())

    return Idark, A, rawt, refinedt
Esempio n. 5
0
def transmission(img, A, blocksize, bol):
    omega = 0.95
    imageGray = np.empty(img.shape, img.dtype)
    # imageGray = np.min(img, axis=2)
    # print(A)
    for i in range(3):
        imageGray[:, :, i] = img[:, :, i]/A[0, i]
    #print(imageGray)
    #print(A)
    # print(getDarkChannel(imageGray, blocksize))
    t = 1 - omega * getDarkChannel(imageGray, blocksize)
    # print(t)
    t[t<0.1]= 0.1

    if bol == True:
        normI = (img - img.min()) / (img.max() - img.min())
        t = guided_filter(normI, t, 40, 0.0001)
    #print(t)
    return t
def adaptiveExp_map(normI,w=15):   
    r=40
    eps=1e-3
    restored = RC_correction(normI,w)
    R = (restored*255).astype(np.uint8)
    I = (normI*255).astype(np.uint8)
    YjCrCb = cv2.cvtColor(R, cv2.COLOR_BGR2YCrCb)  
    YiCrCb = cv2.cvtColor(I, cv2.COLOR_BGR2YCrCb)   
    normYjCrCb = (YjCrCb - YjCrCb.min())/(YjCrCb.max() - YjCrCb.min())
    normYiCrCb = (YiCrCb - YiCrCb.min())/(YiCrCb.max() - YiCrCb.min())
    Yi = normYiCrCb[:,:,0]
    Yj = normYjCrCb[:,:,0]
    S = (Yj*Yi + 0.3*Yi**2)/(Yj**2 + 0.3*Yi**2)
    refinedS = guided_filter(normYiCrCb, S, r, eps) 
    M,N = S.shape
    rs = np.zeros((M,N,3))
    rs[:,:,0] = rs[:,:,1] = rs[:,:,2] = refinedS 
    OutputExp = restored*rs
    return (OutputExp - OutputExp.min())/(OutputExp.max() - OutputExp.min())
def dehaze_raw(I, tmin=0.2, Amax=220, w=15, p=0.0001,
               omega=0.95, guided=True, r=40, eps=1e-3):
    """Get the dark channel prior, atmosphere light, transmission rate
       and refined transmission rate for raw RGB image data.

    Parameters
    -----------
    I:      M * N * 3 data as numpy array for the hazy image
    tmin:   threshold of transmission rate
    Amax:   threshold of atmosphere light
    w:      window size of the dark channel prior
    p:      percentage of pixels for estimating the atmosphere light
    omega:  bias for the transmission estimate

    guided: whether to use the guided filter to fine the image
    r:      the radius of the guidance
    eps:    epsilon for the guided filter

    Return
    -----------
    (Idark, A, rawt, refinedt) if guided=False, then rawt == refinedt
    """
    m, n, _ = I.shape
    Idark = get_dark_channel(I, w)

    A = get_atmosphere(I, Idark, p)
    A = np.minimum(A, Amax)  # threshold A
    print 'atmosphere', A

    rawt = get_transmission(I, A, Idark, omega, w)
    print 'raw transmission rate',
    print 'between [%.4f, %.4f]' % (rawt.min(), rawt.max())

    rawt = refinedt = np.maximum(rawt, tmin)  # threshold t
    if guided:
        normI = (I - I.min()) / (I.max() - I.min())  # normalize I
        refinedt = guided_filter(normI, refinedt, r, eps)

    print 'refined transmission rate',
    print 'between [%.4f, %.4f]' % (refinedt.min(), refinedt.max())

    return Idark, A, rawt, refinedt
Esempio n. 8
0
File: DCP.py Progetto: yyfyan/DeHaze
def dehaze_1(im, tmin = 0.1, w = 15, p = 0.001,
           omega = 0.95, r = 40, eps = 1e-3, L = 256):
    '''
    p      percent of pixels
    W      window size
    omega  before transmission
    L      highest pixel value
    '''
    I = np.asarray(im, dtype=np.float64)
    
    m, n, _ = I.shape
    Idark = get_dark_channel(I, w)
    A = get_atmosphere(I, Idark, p)
    rawt = get_transmission(I, A, Idark, omega, w)
    normI = (I - I.min()) / (I.max() - I.min())  # normalize I
    refinedt = guidedfilter.guided_filter(normI, rawt, r, eps)
    refinedt = np.maximum(refinedt, tmin)
    clear_image = get_radiance(I, A, refinedt)
    
    return np.maximum(np.minimum(clear_image, L - 1), 0).astype(np.uint8) 
Esempio n. 9
0
def dehaze(I, tmin, w, alpha, omega, p, eps, reduce=False):
    m, n, _ = I.shape
    Idark, Ibright = get_illumination_channel(I, w)
    A = get_atmosphere(I, Ibright, p)

    init_t = get_initial_transmission(A, Ibright)
    if reduce:
        init_t = reduce_init_t(init_t)
    corrected_t = get_corrected_transmission(I, A, Idark, Ibright, init_t,
                                             alpha, omega, w)

    normI = (I - I.min()) / (I.max() - I.min())
    refined_t = guided_filter(normI, corrected_t, w, eps)
    J_refined = get_final_image(I, A, refined_t, tmin)

    enhanced = (J_refined * 255).astype(np.uint8)
    f_enhanced = cv2.detailEnhance(enhanced, sigma_s=10, sigma_r=0.15)
    f_enhanced = cv2.edgePreservingFilter(f_enhanced,
                                          flags=1,
                                          sigma_s=64,
                                          sigma_r=0.2)
    return enhanced
def dehaze_raw(I,
               t_min=0.2,
               atm_max=220,
               w=15,
               p=0.0001,
               omega=0.95,
               guided=True,
               r=40,
               eps=1e-3,
               flag_uw=False,
               depth_img=None):
    """Get the dark channel prior, atmosphere light, transmission rate
       and refined transmission rate for raw RGB image data.

    Parameters
    -----------
    I:      M * N * 3 data as numpy array for the hazy image
    t_min:  threshold of transmission rate
    atm_max:threshold of atmosphere light
    w:      window size of the dark channel prior
    p:      percentage of pixels for estimating the atmosphere light
    omega:  bias for the transmission estimate
    flag_uw:enable DCP for underwater imgs

    guided: whether to use the guided filter to fine the image
    r:      the radius of the guidance
    eps:    epsilon for the guided filter

    Return
    -----------
    (Idark, A, rawt, refinedt) if guided=False, then rawt == refinedt
    """

    # NOTE:Check if depth image was provided
    flag_use_depth = False
    if depth_img is not None:
        flag_use_depth = True

    # NOTE:First, get dark channel image
    # NOTE:Change color space if dealing with underwater images
    Iprime = np.zeros(I.shape, dtype=np.float64)
    if flag_uw:
        Iprime[:, :, 0] = 255. - I[:, :, 0]
        Iprime[:, :, 1] = 255. - I[:, :, 1]
        Iprime[:, :, 2] = I[:, :, 2]
    else:
        Iprime = I

    Idark = [] if flag_use_depth else get_dark_channel(Iprime, w)

    # NOTE:Estimate the atmospheric light, this is done always with the original image
    atm_light = get_atmosphere(I, Idark, p, depth_img)
    atm_light = np.minimum(atm_light, atm_max)
    print('atmosphere', atm_light)

    # NOTE:Estimate transmission image which correlates to depth
    if flag_use_depth:
        M, N, _ = I.shape
        white = np.full_like(np.zeros((M, N), dtype=np.float64),
                             max_color_val - 1)
        return white, atm_light, white, white
    else:
        rawt = get_transmission(Iprime, atm_light, omega, w)
        print('raw transmission rate between [%.4f, %.4f]' %
              (rawt.min(), rawt.max()))
        # NOTE: Refine transmission rate through guided filter (edge-preserving filter)
        rawt = refinedt = np.maximum(rawt, t_min)
        if guided:
            normI = (I - I.min()) / (I.max() - I.min())
            refinedt = guided_filter(normI, refinedt, r, eps)

        print('refined transmission rate between [%.4f, %.4f]' %
              (refinedt.min(), refinedt.max()))

        return Idark, atm_light, rawt, refinedt
def dehaze(I, tmin, w, alpha, omega, p, eps):
    m, n, _ = I.shape
    Idark, Ibright = get_illumination_channel(I, w)

    print("dark max:{} min:{}".format(
        np.max(Idark), np.min(Idark)))  # dark max:0.776470588235 min:0.0
    print("bright max:{} min:{}".format(
        np.max(Ibright),
        np.min(Ibright)))  # bright max:0.988235294118 min:0.00392156862745

    cv2.imwrite(folder + '/init_bright_channel_prior.png', Ibright * 255)
    cv2.imwrite(folder + '/init_dark_channel_prior.png', Idark * 255)

    white = np.full_like(Idark, L - 1)

    A = get_atmosphere(I, Ibright, p)
    print('atmosphere:{}'.format(A))

    #################################################################

    init_t = get_initial_transmission(A, Ibright)
    cv2.imwrite(folder + '/transmission_init.png', init_t * white)
    print('initial (bright) transmission rate between [%.4f, %.4f]' %
          (init_t.min(), init_t.max()))

    J_init = get_final_image(I, A, init_t, tmin)
    cv2.imwrite(folder + '/J_init.png', J_init * 255)

    #################################################################

    corrected_t, dark_t = get_corrected_transmission(I, A, Idark, Ibright,
                                                     init_t, alpha, omega, w)
    cv2.imwrite(folder + '/transmission_corrected.png', corrected_t * white)
    print('corrected transmission rate between [%.4f, %.4f]' %
          (corrected_t.min(), corrected_t.max()))

    cv2.imwrite(folder + '/transmission_a_dark.png', dark_t * white)
    print('dark transmission rate between [%.4f, %.4f]' %
          (dark_t.min(), dark_t.max()))

    J_corrected = get_final_image(I, A, corrected_t, tmin)
    cv2.imwrite(folder + '/J_corrected.png', J_corrected * 255)

    J_dark = get_final_image(I, A, dark_t, tmin)
    cv2.imwrite(folder + '/J_dark.png', J_dark * 255)

    #################################################################

    # guided filter
    normI = (I - I.min()) / (I.max() - I.min())  # min-max normalize I
    refined_t = guided_filter(normI, corrected_t, w, eps)
    refined_dark_t = guided_filter(normI, dark_t, w, eps)

    #refined_t = (refined_t - np.min(refined_t))/(np.max(refined_t) - np.min(refined_t)) # min-max normalization.
    #refined_dark_t = (refined_dark_t - np.min(refined_dark_t))/(np.max(refined_dark_t) - np.min(refined_dark_t)) # min-max normalization.

    cv2.imwrite(folder + '/refined.png', refined_t * white)
    print('refined transmission rate between [%.4f, %.4f]' %
          (refined_t.min(), refined_t.max()))

    J_refined = get_final_image(I, A, refined_t, tmin)
    cv2.imwrite(folder + '/J_refined.png', J_refined * 255)

    cv2.imwrite(folder + '/transmission_a_refined_dark.png',
                refined_dark_t * white)
    print('refined dark transmission rate between [%.4f, %.4f]' %
          (refined_dark_t.min(), refined_dark_t.max()))

    J_refined_dark = get_final_image(I, A, refined_dark_t, tmin)
    cv2.imwrite(folder + '/J_refined_dark.png', J_refined_dark * 255)