示例#1
0
def hisEqulColor(img):
    if len(img.shape) == 2:
        return hisEqul(img)

    ycrcb = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB)
    channels = cv2.split(ycrcb)
    cv2.equalizeHist(channels[0], channels[0])
    cv2.merge(channels, ycrcb)
    cv2.cvtColor(ycrcb, cv2.COLOR_YCR_CB2BGR, img)
    return img
    def refactor(self):
        for k in range(3):  # three channels
            # coarse between 0.5~0.5 coefficients between -1~1
            for i in range(len(self.coefficients[k]),
                           self.num):  # complete coefficient
                height = 2 * len(self.coefficients[k][i - 1][0])
                width = 2 * len(self.coefficients[k][i - 1][0][0])
                self.coefficients[k].append([
                    array([[0. for i in range(width)] for j in range(height)])
                    for k in range(3)
                ])
            for i in range(self.num):
                self.coarse_coef[k] = self.__lifting_scheme_reverse(
                    self.coarse_coef[k], self.coefficients[k][i][0], 0)
                fine_coef = self.__lifting_scheme_reverse(
                    self.coefficients[k][i][1], self.coefficients[k][i][2], 0)
                self.coarse_coef[k] = self.__lifting_scheme_reverse(
                    self.coarse_coef[k], fine_coef, 1)
            self.coarse_coef[k] = self.coarse_coef[k].astype(float) * iinfo(
                uint8).max  # convert back to integer
            self.coarse_coef[k] = self.coarse_coef[k].astype(uint8)

        self.img = cv2.merge(
            [self.coarse_coef[0], self.coarse_coef[1], self.coarse_coef[2]])
        self.img = cv2.cvtColor(self.img,
                                cv2.COLOR_YCrCb2BGR)  # convert YUV to RGB
示例#3
0
def transfertColor(src, ref, output,gamma):
    img_src = read(src)
    tonemap = reinhard(img_src)
    write("in1.jpg",tonemap)
    img_src = adjust_gamma(img_src,gamma)
    img_src = BGRtoLalphabeta(img_src)

    img_ref = read(ref)
    tonemap = reinhard(img_ref)
    write("in2.jpg",tonemap)
    img_ref = adjust_gamma(img_ref,gamma)
    img_ref = BGRtoLalphabeta(img_ref)

    mean_src, stddev_src = cv2.meanStdDev(img_src)
    mean_ref, stddev_ref = cv2.meanStdDev(img_ref)

    split_src = cv2.split(img_src)
    img_out = cv2.merge((computeColor(split_src[0], mean_src[0], stddev_src[0], mean_ref[0], stddev_ref[0]),
                         computeColor(split_src[1], mean_src[1], stddev_src[1], mean_ref[1], stddev_ref[1]),
                         computeColor(split_src[2], mean_src[2], stddev_src[2], mean_ref[2], stddev_ref[2])))
    img_out = LalphabetatoBGR(img_out)
    img_out = adjust_gamma(img_out,1./gamma)

    write(output, img_out)
    tonemap = logarithme(img_out)
    write("out.jpg",tonemap)
示例#4
0
    def addAlpha(self, img):

        b_chan, g_chan, r_chan = cv2.split(img)
        alpha_chan = np.ones(b_chan.shape, dtype=b_chan.dtype) * 50
        img = cv2.merge((b_chan, g_chan, r_chan, alpha_chan))

        return img
示例#5
0
def Histogram_EQ(processed_imgs,eq_direc):
    
    if not os.path.exists(eq_direc):
        os.mkdir(eq_direc)
        
    i = 0
    for file in os.listdir(processed_imgs):
        print('Equalizing photo: ',i)
        filename = f"{processed_imgs}/{i}.jpg"
        img_eq = cv2.imread(filename, 1)
        img_eq = cv2.cvtColor(img_eq, cv2.COLOR_HSV2RGB)
        R, G, B = cv2.split(img_eq)

        #apply CLAHE to each RGB channel
        clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
        output2_R = clahe.apply(R)
        output2_G = clahe.apply(G)
        output2_B = clahe.apply(B)
        
        #Merge the channels again
        img_eq = cv2.merge((output2_R, output2_G, output2_B))
        img = Image.open(filename)
        nameEq = f"{eq_direc}/{i}.jpg"

        img.save(nameEq)
        i +=1
    
    shutil.rmtree(processed_imgs)
示例#6
0
 def gray_world(img: np.ndarray):
     b, g, r = cv2.split(img)[:3]
     b_mean, g_mean, r_mean = np.mean(b), np.mean(g), np.mean(r)
     k = (b_mean + g_mean + r_mean) / 3
     kb, kg, kr = k / b_mean, k / g_mean, k / r_mean
     return cv2.merge(
         (cv2.addWeighted(b, kb, 0, 0, 0), cv2.addWeighted(g, kg, 0, 0, 0), cv2.addWeighted(r, kr, 0, 0, 0)))
def color():
    img = cv2.imread('Lenna.jpg')
    b_img,g_img,r_img = cv2.split(img)
    
    b_bin = histogram(b_img)
    g_bin = histogram(g_img)
    r_bin = histogram(r_img)

    b_img_eq = equalize(b_img,b_bin)
    g_img_eq = equalize(g_img,g_bin)
    r_img_eq = equalize(r_img,r_bin)

    b_bin_eq = histogram(b_img_eq)
    g_bin_eq = histogram(g_img_eq)
    r_bin_eq = histogram(r_img_eq)
    
    plt.subplot(2,3,1)
    plt.bar(np.arange(b_bin.shape[0]),b_bin,align = 'center',color = 'b')
    plt.subplot(2,3,4)
    plt.bar(np.arange(b_bin_eq.shape[0]),b_bin_eq,align = 'center',color = 'b')
    plt.subplot(2,3,2)

    plt.bar(np.arange(g_bin.shape[0]),g_bin,align = 'center',color = 'g')
    plt.subplot(2,3,5)
    plt.bar(np.arange(g_bin_eq.shape[0]),g_bin_eq,align = 'center',color = 'g')
    plt.subplot(2,3,3)
    
    plt.bar(np.arange(r_bin.shape[0]),r_bin,align = 'center',color = 'r')
    plt.subplot(2,3,6)
    plt.bar(np.arange(r_bin_eq.shape[0]),r_bin_eq,align = 'center',color = 'r')
    plt.show()
    # plt.savefig('color_hist_eq.png')
    plt.close()

    tmp = np.zeros_like(img)
    tmp[...,0] = b_img
    cv2.imwrite('b_img.jpg',tmp)
    tmp = np.zeros_like(img)
    tmp[...,1] = g_img
    cv2.imwrite('g_img.jpg',tmp)
    tmp = np.zeros_like(img)
    tmp[...,2] = r_img
    cv2.imwrite('r_img.jpg',tmp)

    tmp = np.zeros_like(img)
    tmp[...,0] = b_img_eq
    cv2.imwrite('b_img_eq.jpg',tmp)
    tmp = np.zeros_like(img)
    tmp[...,1] = g_img_eq
    cv2.imwrite('g_img_eq.jpg',tmp)
    tmp = np.zeros_like(img)
    tmp[...,2] = r_img_eq
    cv2.imwrite('r_img_eq.jpg',tmp)

    canvas = cv2.merge([b_img_eq,g_img_eq,r_img_eq])
    cv2.imwrite("bgr.jpg",canvas)
    ttt = cv2.imread("bgr.jpg")
    cv2.imshow("bgr", ttt)

    cv2.waitKey(0)
示例#8
0
def colorTransfer(src: str, ref: str, output: str, gamma: float):
    img_src = cv2.imread(src, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
    tonemap = reinhard(img_src)
    cv2.imwrite("in1.jpg", tonemap)
    img_src = adjust_gamma(img_src, gamma)
    img_src = BGRtoLab(img_src)

    img_ref = cv2.imread(ref, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
    tonemap = reinhard(img_ref)
    cv2.imwrite("in2.jpg", tonemap)
    img_ref = adjust_gamma(img_ref, gamma)
    img_ref = BGRtoLab(img_ref)

    mean_src, stddev_src = cv2.meanStdDev(img_src)
    mean_ref, stddev_ref = cv2.meanStdDev(img_ref)

    split_src = cv2.split(img_src)
    img_out = cv2.merge((computeColor(split_src[0], mean_src[0], stddev_src[0],
                                      mean_ref[0], stddev_ref[0]),
                         computeColor(split_src[1], mean_src[1], stddev_src[1],
                                      mean_ref[1], stddev_ref[1]),
                         computeColor(split_src[2], mean_src[2], stddev_src[2],
                                      mean_ref[2], stddev_ref[2])))
    img_out = LabtoRGB(img_out)
    img_out = adjust_gamma(img_out, 1. / gamma)

    cv2.imwrite(output, img_out)
    tonemap = logarithme(img_out)
    cv2.imwrite("out.jpg", tonemap)
    def random_bright_image(self, image, brightness_range):
        """
            Randomly brighten the given image.
            The intent is to allow a model to generalize across images trained on different lighting levels.
            Parameters
            ----------
                image : ndim np.array
                    image to be brightened
                brightness_range : tuple of ints
                    specifies the range from within the brightness value (in pixels)
                    should be chosen 
            Returns
            -------
                brightened image as np.array

        """
        hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
        h, s, v = cv2.split(hsv)

        start_range, end_range = brightness_range
        rand_val = random.randint(start_range, end_range)

        v = cv2.add(v, rand_val)
        v[v > 255] = 255
        v[v < 0] = 0
        final_hsv = cv2.merge((h, s, v))

        image = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2RGB)
        return np.copy(image)
示例#10
0
def img_fusion(img1, img2):
    print(img1.shape, img2.shape)
    # img1 = cv.resize(img1, (640, 480))
    # img2 = cv.resize(img2, (640, 480))
    # cv.imshow("img1", img1)
    print(img1.shape)
    # 分离YUV数据
    yuv1 = cv.cvtColor(img1, cv.COLOR_BGR2YCrCb)
    y1, u1, v1 = cv.split(yuv1)
    yuv2 = cv.cvtColor(img2, cv.COLOR_BGR2YCrCb)
    y2, u2, v2 = cv.split(yuv2)
    # cv.imshow("y1", y1)
    # cv.imshow("y2", y2)
    # Y值均衡化
    clahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    y2 = clahe.apply(y2)
    # Y像素范围压缩
    img_pixels_range(y1, 128)
    img_pixels_range(y2, 128)
    cv.imshow("y1", y1)
    cv.imshow("y2", y2)
    # y3 = cv.add(y1, y2)
    y3 = cv.addWeighted(y1, 0.6, y2, 0.4, 0)
    cv.imshow("y3", y3)
    img3 = cv.merge([y3, u1, v1])
    img3 = cv.cvtColor(img3, cv.COLOR_YCrCb2BGR)
    cv.imshow("img3", img3)
    cv.imwrite(r".04-picture-fusion_images/opencv.jpg", img3)
示例#11
0
文件: ssr.py 项目: EoralMilk/Learning
def singleScaleRetinex(img, size):
    # cv2.imshow('G', cv2.GaussianBlur(img, (size, size), 0))
    b_gray, g_gray, r_gray = cv2.split(img)
    Rb_ssr = ssr_c(b_gray, size)
    Rg_ssr = ssr_c(g_gray, size)
    Rr_ssr = ssr_c(r_gray, size)
    R = cv2.merge([Rb_ssr, Rg_ssr, Rr_ssr])
    return R
示例#12
0
 def display(self):
     if self.destroyedImg is not None:
         b, g, r = cv2.split(self.destroyedImg)
         img = Image.fromarray(cv2.merge((r, g, b)))
         tkImage = ImageTk.PhotoImage(image=img)
         canvas1.configure(image=tkImage)
         canvas1.image = tkImage
     if self.mask is not None:
         tkImage = ImageTk.PhotoImage(image=Image.fromarray(self.mask))
         canvas2.configure(image=tkImage)
         canvas2.image = tkImage
     if self.repaired is not None:
         b, g, r = cv2.split(self.repaired)
         img = Image.fromarray(cv2.merge((r, g, b)))
         tkImage = ImageTk.PhotoImage(image=img)
         canvas3.configure(image=tkImage)
         canvas3.image = tkImage
示例#13
0
 def whiten(img: np.ndarray):
     # # 白化
     channels = cv2.split(img)
     new_channels = []
     for channel in channels:
         mean_ = np.mean(channel.ravel())
         std_ = np.std(channel.ravel())
         new_channels.append((channel - mean_) / std_)
     return cv2.merge(new_channels)
def increase_sat(img):
    img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV).astype('float32')
    h, s, v = cv2.split(img)
    s = s + 200
    s = np.clip(s, 0, 255)
    imghsv = cv2.merge((h, s, v))
    imghsv = cv2.cvtColor(imghsv.astype('uint8'), cv2.COLOR_HSV2RGB)

    return imghsv
def increase_brightness(img, value=30):
    hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
    h, s, v = cv2.split(hsv)
    lim = 255 - value
    v[v > lim] = 255
    v[v <= lim] += value
    final_hsv = cv2.merge((h, s, v))
    img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
    return img
示例#16
0
 def spin(self, img, angle=1):
     if angle % 4 == 1:
         b, g, r = cv2.split(img)
         return cv2.merge((b.T, g.T, r.T))
     elif angle % 4 == 2:
         return self.mirror(img, 2)
     elif angle % 4 == 3:
         return self.mirror(self.spin(img, 1), 1)
     else:
         return img
示例#17
0
def img_ch_split(img):
    r, g, b = cv.split(img)
    # cv.imshow("r", r)
    # cv.imshow("g", g)
    # cv.imshow("b", b)
    img[:, :, 1] = 0
    img[:, :, 2] = 0
    cv.imshow("ch0", img)
    bgr = cv.merge([b, g, r])
    cv.imshow("bgr", bgr)
示例#18
0
    def preprocess(self, image):
        # split the image into channels
        B, G, R = cv2.split(image.astype('float32'))

        # subtract the means for each channel
        R -= self.rmean
        G -= self.gmean
        B -= self.bmean

        # merge the channels back together and return the image
        return cv2.merge([B, G, R])
示例#19
0
def base():
    if request.method == 'GET':
        return "<h1>Crop AI</h1>"
    if request.method == 'POST':
        if 'InputImg' not in request.files:
            print("No file part")
            return redirect(request.url)
        file = request.files['InputImg']
        if file.filename == '':
            print('No selected file')
            return redirect(request.url)
        if file and allowed_file(file.filename):
            filestr = request.files['InputImg'].read()
            img = cv2.imdecode(np.fromstring(filestr, np.uint8),
                               cv2.IMREAD_COLOR)

            img = cv2.resize(img, (96, 96), interpolation=cv2.INTER_AREA)

            hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

            # find the green color
            mask_green = cv2.inRange(hsv, (36, 0, 0), (86, 255, 255))
            # find the brown color
            mask_brown = cv2.inRange(hsv, (8, 60, 20), (145, 255, 255))
            # find the yellow color in the leaf
            mask_yellow = cv2.inRange(hsv, (5, 42, 143), (145, 255, 255))
            # find the black color in the leaf
            mask_black = cv2.inRange(hsv, (100, 100, 100), (127, 127, 127))

            # find any of the four colors(green or brown or yellow or black) in the image
            mask = cv2.bitwise_or(mask_green, mask_brown)
            mask = cv2.bitwise_or(mask, mask_yellow)
            mask = cv2.bitwise_or(mask, mask_black)

            # Bitwise-AND mask and original image
            res = cv2.bitwise_and(img, img, mask=mask)

            # Gaussian blur with 3x3 kernel
            blur_img = cv2.GaussianBlur(res, (3, 3), 0)

            # Histogram equalization
            B, G, R = cv2.split(blur_img)
            output_R = cv2.equalizeHist(R)
            output_G = cv2.equalizeHist(G)
            output_B = cv2.equalizeHist(B)
            img = cv2.merge((output_R, output_G, output_B))

            img = img / 255

            img_array = np.expand_dims(img, axis=0)

            output = label_dictionary[model.predict(img_array)[0].argmax()]

        return output
示例#20
0
def identify_rivers(image):
    image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    lower_blue = np.array([90, 20, 38])
    upper_blue = np.array([110, 255, 255])
    rivers_mask = cv2.inRange(image_hsv, lower_blue, upper_blue)
    rivers_bgr = cv2.bitwise_and(image, image, mask=rivers_mask)
    rivers_gray = cv2.cvtColor(rivers_bgr, cv2.COLOR_BGR2GRAY)
    _, rivers_alpha = cv2.threshold(rivers_gray, 0, 255, cv2.THRESH_BINARY)
    rivers_b, rivers_g, rivers_r = cv2.split(rivers_bgr)
    rivers_bgra = [rivers_b, rivers_g, rivers_r, rivers_alpha]
    return cv2.merge(rivers_bgra, 4)
示例#21
0
 def reshape(self,blocks):
     _,gImage,rImage = cv2.split(self.image)
     bImage = []
     for chunk in self.chunkRows(blocks,self.col/8):
         for numRow in range(8):
             for block in chunk:
                 bImage.extend(block[numRow])
     bImage = np.array(bImage).reshape(self.row,self.col)
     bImage = np.uint8(bImage)
     img = cv2.merge((bImage,gImage,rImage))
     return img
示例#22
0
def remove_background(image):
    """
    Removes background from image
    """
    # Paramters.
    BLUR = 21
    CANNY_THRESH_1 = 10
    CANNY_THRESH_2 = 30
    MASK_DILATE_ITER = 10
    MASK_ERODE_ITER = 10
    MASK_COLOR = (0.0, 0.0, 1.0)

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # Edge detection.
    edges = cv2.Canny(gray, CANNY_THRESH_1, CANNY_THRESH_2)
    edges = cv2.dilate(edges, None)
    edges = cv2.erode(edges, None)

    # Find contours in edges, sort by area
    contour_info = []
    contours, _ = cv2.findContours(edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)

    for c in contours:
        contour_info.append((
            c,
            cv2.isContourConvex(c),
            cv2.contourArea(c),
        ))
    contour_info = sorted(contour_info, key=lambda c: c[2], reverse=True)
    max_contour = contour_info[0]

    # Create empty mask.
    mask = np.zeros(edges.shape)
    cv2.fillConvexPoly(mask, max_contour[0], (255))

    # Smooth mask and blur it.
    mask = cv2.dilate(mask, None, iterations=MASK_DILATE_ITER)
    mask = cv2.erode(mask, None, iterations=MASK_ERODE_ITER)
    mask = cv2.GaussianBlur(mask, (BLUR, BLUR), 0)
    mask_stack = np.dstack([mask] * 3)

    # Blend masked img into MASK_COLOR background
    mask_stack = mask_stack.astype('float32') / 255.0
    image = image.astype('float32') / 255.0

    masked = (mask_stack * image) + ((1 - mask_stack) * MASK_COLOR)
    masked = (masked * 255).astype('uint8')

    c_red, c_green, c_blue = cv2.split(image)
    img_a = cv2.merge((c_red, c_green, c_blue, mask.astype('float32') / 255.0))

    return img_a * 255
示例#23
0
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
    r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1  # random gains
    hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
    dtype = img.dtype  # uint8

    x = np.arange(0, 256, dtype=np.int16)
    lut_hue = ((x * r[0]) % 180).astype(dtype)
    lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
    lut_val = np.clip(x * r[2], 0, 255).astype(dtype)

    img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
    cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)  # no return needed
def adapt_hist_equilization(img):

    #img = cv2.imread("test_001.jpg")
    R, G, B = cv2.split(img)

    output1_R = cv2.equalizeHist(R)
    output1_G = cv2.equalizeHist(G)
    output1_B = cv2.equalizeHist(B)

    equ = cv2.merge((output1_R, output1_G, output1_B))
    res = np.hstack((img, equ))  #stacking images side-by-side
    return res
示例#25
0
 def _clahe_rgb(rgb_array, clip_limit=2.0, tile_grid_size=(8, 8)):
     # convert RGB to LAB
     lab = cv2.cvtColor(rgb_array, cv2.COLOR_RGB2LAB)
     # apply clahe on LAB's L component.
     lab_planes = cv2.split(lab)
     clahe = cv2.createCLAHE(clipLimit=clip_limit,
                             tileGridSize=tile_grid_size)
     lab_planes[0] = clahe.apply(lab_planes[0])
     lab = cv2.merge(lab_planes)
     # remap LAB tp RGB.
     rgb = cv2.cvtColor(lab, cv2.COLOR_LAB2RGB)
     return rgb
示例#26
0
 def pre_dispose(self):#返回一个面上的颜色平均值HSVRGB
         img = cv2.GaussianBlur(self.frame,(7,7),0)
         b,g,r = cv2.split(img)
         avgb = cv2.mean(b)[0]
         avgg = cv2.mean(g)[0]
         avgr = cv2.mean(r)[0]
         k = (avgb+avgg+avgr)/3
         kb = k/avgb
         kg = k/avgg
         kr = k/avgr
         b = cv2.addWeighted(src1=b, alpha=kb, src2=0, beta=0, gamma=0)
         g = cv2.addWeighted(src1=g, alpha=kg, src2=0, beta=0, gamma=0)
         r = cv2.addWeighted(src1=r, alpha=kr, src2=0, beta=0, gamma=0)
         img = cv2.merge([b,g,r])
         img_hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
         h,s,v = cv2.split(img_hsv)
         v = cv2.equalizeHist(v)
         img_hsv =  cv2.merge([h,s,v])
         self.img_hsv = img_hsv
         img =  cv2.cvtColor(img_hsv,cv2.COLOR_HSV2BGR)
         self.img_bgr = img
         self.frame = img
示例#27
0
def export(img, output_path: str, size: int = 0, grayscale: bool = False):
    if size > 0:
        img = cv2.resize(img, (size, size), interpolation=cv2.INTER_AREA)

    if grayscale:
        img_gray = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))
        _, _, _, alpha = cv2.split(img)
        luminescence = img_gray[0]
        img = cv2.merge((luminescence, luminescence, luminescence, alpha))

    os.makedirs(os.path.dirname(output_path), exist_ok=True)

    cv2.imwrite(output_path, img)
    def changeBrightness(self, img, value):
        """ This function will take an image (img) and the brightness
            value. It will perform the brightness change using OpenCv
            and after split, will merge the img and return it.
        """

        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(hsv)
        lim = 255 - value
        v[v > lim] = 255
        v[v <= lim] += value
        final_hsv = cv2.merge((h, s, v))
        img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
        return img
    def __init__(self,file,mode=cv2.IMREAD_COLOR):
        self.file = file
        try:
            self.image = self.loadImg(file,mode)
        except:
            print("Load image error!")

        print(self.image.shape)
        #assert self.image != None #"Load image error!"
        
        """#change b g r channel order avoid color not correct when plot"""
        if mode == cv2.IMREAD_COLOR:  
            b,g,r = cv2.split(self.image)       # get b,g,r
            self.image = cv2.merge([r,g,b])     # switch it to rgb
示例#30
0
def equalized(image: np.array) -> np.array:
    """Equalizes the image using CLAHE."""
    clahe = cv2.createCLAHE(clipLimit=4)

    if is_colored(image):
        image = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
        lab_planes = cv2.split(image)
        lab_planes[0] = clahe.apply(lab_planes[0])
        image = cv2.merge(lab_planes)
        image = cv2.cvtColor(image, cv2.COLOR_LAB2BGR)
    else:
        image = clahe.apply(image)

    return image