Exemplo n.º 1
0
def Histogram_EQ(processed_imgs,eq_direc):
    
    if not os.path.exists(eq_direc):
        os.mkdir(eq_direc)
        
    i = 0
    for file in os.listdir(processed_imgs):
        print('Equalizing photo: ',i)
        filename = f"{processed_imgs}/{i}.jpg"
        img_eq = cv2.imread(filename, 1)
        img_eq = cv2.cvtColor(img_eq, cv2.COLOR_HSV2RGB)
        R, G, B = cv2.split(img_eq)

        #apply CLAHE to each RGB channel
        clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
        output2_R = clahe.apply(R)
        output2_G = clahe.apply(G)
        output2_B = clahe.apply(B)
        
        #Merge the channels again
        img_eq = cv2.merge((output2_R, output2_G, output2_B))
        img = Image.open(filename)
        nameEq = f"{eq_direc}/{i}.jpg"

        img.save(nameEq)
        i +=1
    
    shutil.rmtree(processed_imgs)
Exemplo n.º 2
0
def img_fusion(img1, img2):
    print(img1.shape, img2.shape)
    # img1 = cv.resize(img1, (640, 480))
    # img2 = cv.resize(img2, (640, 480))
    # cv.imshow("img1", img1)
    print(img1.shape)
    # 分离YUV数据
    yuv1 = cv.cvtColor(img1, cv.COLOR_BGR2YCrCb)
    y1, u1, v1 = cv.split(yuv1)
    yuv2 = cv.cvtColor(img2, cv.COLOR_BGR2YCrCb)
    y2, u2, v2 = cv.split(yuv2)
    # cv.imshow("y1", y1)
    # cv.imshow("y2", y2)
    # Y值均衡化
    clahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    y2 = clahe.apply(y2)
    # Y像素范围压缩
    img_pixels_range(y1, 128)
    img_pixels_range(y2, 128)
    cv.imshow("y1", y1)
    cv.imshow("y2", y2)
    # y3 = cv.add(y1, y2)
    y3 = cv.addWeighted(y1, 0.6, y2, 0.4, 0)
    cv.imshow("y3", y3)
    img3 = cv.merge([y3, u1, v1])
    img3 = cv.cvtColor(img3, cv.COLOR_YCrCb2BGR)
    cv.imshow("img3", img3)
    cv.imwrite(r".04-picture-fusion_images/opencv.jpg", img3)
class RealTimeEmotionDetector:
    CLAHE = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))

    vidCapture = None

    def __init__(self, classifier_model: ImageClassifier):
        self.__init_video_capture(camera_idx=0,
                                  frame_w=FRAME_WIDTH,
                                  frame_h=FRAME_HEIGHT)
        self.classifier = classifier_model

    def __init_video_capture(self, camera_idx: int, frame_w: int,
                             frame_h: int):
        self.vidCapture = cv2.VideoCapture(camera_idx)
        self.vidCapture.set(cv2.CAP_PROP_FRAME_WIDTH, frame_w)
        self.vidCapture.set(cv2.CAP_PROP_FRAME_HEIGHT, frame_h)

    def read_frame(self) -> np.ndarray:
        rect, frame = self.vidCapture.read()
        return frame

    def transform_img(self, img: np.ndarray) -> np.ndarray:
        # load the input image, resize it, and convert it to gray-scale
        gray_img = cv2.cvtColor(img,
                                cv2.COLOR_BGR2GRAY)  # convert to gray-scale
        resized_img = self.CLAHE.apply(gray_img)  # resize
        return resized_img

    def execute(self, wait_key_delay=33, quit_key='q', frame_period_s=0.75):
        frame_cnt = 0
        predicted_labels = ''
        old_txt = None
        rectangles = [(0, 0, 0, 0)]
        landmark_points_list = [[(0, 0)]]
        while cv2.waitKey(delay=wait_key_delay) != ord(quit_key):
            frame_cnt += 1

            frame = self.read_frame()
            if frame_cnt % (frame_period_s * 100) == 0:
                frame_cnt = 0
                predicted_labels = self.classifier.classify(
                    img=self.transform_img(img=frame))
                rectangles = self.classifier.extract_face_rectangle(img=frame)
                landmark_points_list = self.classifier.extract_landmark_points(
                    img=frame)
            for lbl, rectangle, lm_points in zip(predicted_labels, rectangles,
                                                 landmark_points_list):
                draw_face_rectangle(BoundingBox(*rectangle), frame)
                draw_landmark_points(points=lm_points, img=frame)
                write_label(rectangle[0], rectangle[1], label=lbl, img=frame)

                if old_txt != predicted_labels:
                    print('[INFO] Predicted Labels:', predicted_labels)
                    old_txt = predicted_labels

            cv2.imshow('Emotion Detection - Mimics', frame)

        cv2.destroyAllWindows()
        self.vidCapture.release()
class CameraClassifier:
    CLAHE = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))

    vidCapture = None

    def __init__(self, classifier_model: Classifier):
        self.__init_video_capture(camera_idx=0,
                                  frame_w=FRAME_WIDTH,
                                  frame_h=FRAME_HEIGHT)
        self.classifier = classifier_model

    def __init_video_capture(self, camera_idx: int, frame_w: int,
                             frame_h: int):
        self.vidCapture = cv2.VideoCapture(camera_idx)
        self.vidCapture.set(cv2.CAP_PROP_FRAME_WIDTH, frame_w)
        self.vidCapture.set(cv2.CAP_PROP_FRAME_HEIGHT, frame_h)

    def read_frame(self) -> np.ndarray:
        rect, frame = self.vidCapture.read()
        return frame

    def transform_img(self, img: np.ndarray) -> np.ndarray:
        # load the input image, resize it, and convert it to gray-scale
        gray_img = cv2.cvtColor(img,
                                cv2.COLOR_BGR2GRAY)  # convert to gray-scale
        resized_img = self.CLAHE.apply(gray_img)  # resize
        return resized_img

    def execute(self, wait_key_delay=33, quit_key='q'):
        frame_cnt = 0
        displayed_txt = ''
        old_txt = None
        x, y, w, h = 0, 0, 0, 0
        landmark_points = [(0, 0)]
        while cv2.waitKey(delay=wait_key_delay) != ord(quit_key):
            frame_cnt += 1

            frame = self.read_frame()
            if frame_cnt % 75 == 0:
                frame_cnt = 0
                displayed_txt = self.classifier.classify(
                    img=self.transform_img(img=frame))
                x, y, w, h = self.classifier.extract_face_rectangle(img=frame)
                landmark_points = self.classifier.extract_landmark_points(
                    img=frame)

            draw_face_rectangle(BoundingBox(x, y, w, h), frame)
            draw_landmark_points(points=landmark_points, img=frame)
            write_label(x, y, label=displayed_txt, img=frame)

            if old_txt != displayed_txt:
                print('>', displayed_txt)
                old_txt = displayed_txt

            cv2.imshow('Emotion Detection - Mimics', frame)

        cv2.destroyAllWindows()
        self.vidCapture.release()
Exemplo n.º 5
0
def data_preparation(path, image_size):
    normal_images = list()
    for img_name in glob.glob(pathname=path + '/normal/*'):
        img = load_img(path=img_name, color_mode='grayscale')
        img = img_to_array(img=img, data_format='channels_last')
        normal_images.append(img)

    normal_images = np.array(normal_images)
    print('number of normal chest xrays:', len(normal_images))

    covid_images = list()
    for img_name in glob.glob(pathname=path + '/covid19/*'):
        img = load_img(path=img_name, color_mode='grayscale')
        img = img_to_array(img=img, data_format='channels_last')
        covid_images.append(img)

    covid_images = np.array(covid_images)
    print('number of covid chest xrays:', len(covid_images))

    normal_labels = [0 for _ in range(len(normal_images))]
    covid_labels = [1 for _ in range(len(covid_images))]

    X = np.concatenate((covid_images, normal_images))
    y = np.array(covid_labels + normal_labels)

    X = np.array([
        cv2.resize(image, dsize=image_size, interpolation=cv2.INTER_CUBIC)
        for image in X
    ])
    X = np.array([np.expand_dims(a=image, axis=-1) for image in X])
    X = X.astype(dtype=np.uint8)

    # apply image enhancements and concat with the original image
    X_beasf = np.array([BEASF(image=image, gamma=1.5) for image in X])
    X_clahe = np.array([
        cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)).apply(image)
        for image in X
    ])
    X_clahe = np.array([np.expand_dims(a=image, axis=-1) for image in X_clahe])
    X = np.concatenate((X, X_beasf, X_clahe), axis=-1)

    X = np.array([X[idx] / 255. for idx in range(len(X))])

    print('number of total dataset images:', len(X))
    print('number of total dataset labels:', len(y))
    print('dataset shape:', X.shape)
    rnd_idx = np.random.choice(a=len(X), size=None)
    plt.imshow(X=X[rnd_idx].squeeze(), cmap='gray')
    plt.axis('off')
    plt.title(label='a random image from the dataset')
    plt.show()

    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.2,
                                                        random_state=18)
    return X_train, X_test, y_train, y_test
Exemplo n.º 6
0
def clahe_equalized(images):
    assert (len(images.shape) == 4)
    assert (images.shape[3] == 1)

    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    images_equalized = np.empty(images.shape)
    for i in range(images.shape[0]):
        images_equalized[i, :, :, 0] = clahe.apply(
            np.array(images[i, :, :, 0], dtype=np.uint8))

    return images_equalized
def CLAHE(path):
    """
    CV2 CLAHE function.
    :param path:
    :return:
    """
    img = cv.imread(path, 0)
    # create a CLAHE object (Arguments are optional).
    clahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    cl1 = clahe.apply(img)
    handler.plotFigs([img, cl1])
Exemplo n.º 8
0
 def _clahe_rgb(rgb_array, clip_limit=2.0, tile_grid_size=(8, 8)):
     # convert RGB to LAB
     lab = cv2.cvtColor(rgb_array, cv2.COLOR_RGB2LAB)
     # apply clahe on LAB's L component.
     lab_planes = cv2.split(lab)
     clahe = cv2.createCLAHE(clipLimit=clip_limit,
                             tileGridSize=tile_grid_size)
     lab_planes[0] = clahe.apply(lab_planes[0])
     lab = cv2.merge(lab_planes)
     # remap LAB tp RGB.
     rgb = cv2.cvtColor(lab, cv2.COLOR_LAB2RGB)
     return rgb
Exemplo n.º 9
0
def pre_processing(image_path):
    image = cv2.imread(image_path)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # remove normalization.

    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    image = clahe.apply(np.array(image, dtype=np.uint8))

    # remove gamma transform.

    return image
Exemplo n.º 10
0
def equalized(image: np.array) -> np.array:
    """Equalizes the image using CLAHE."""
    clahe = cv2.createCLAHE(clipLimit=4)

    if is_colored(image):
        image = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
        lab_planes = cv2.split(image)
        lab_planes[0] = clahe.apply(lab_planes[0])
        image = cv2.merge(lab_planes)
        image = cv2.cvtColor(image, cv2.COLOR_LAB2BGR)
    else:
        image = clahe.apply(image)

    return image
Exemplo n.º 11
0
def correct_brightness(img,
                       threshold=0.05,
                       min_white=150,
                       max_black=50,
                       _debug=True):
    blurred = cv2.medianBlur(img, 3)
    lab = cv2.cvtColor(blurred, cv2.COLOR_BGR2LAB)

    # adaptive histogram equalization
    lab[:, :, 0] = cv2.createCLAHE(clipLimit=0.7,
                                   tileGridSize=(8, 8)).apply(lab[:, :, 0])

    # calculate white and black point
    hist = calc_hist(lab[:, :, 0])
    cutoff = np.max(hist) * threshold
    white = np.max(np.arange(256)[hist > cutoff]) + 10
    white = max(white, min_white)
    black = np.min(np.arange(256)[hist > cutoff]) / 3
    black = min(black, max_black)

    # adjust lightness
    val_black = np.arange(0, 256) - black
    val_white = val_black / (white - black) * 255
    table = np.clip(val_white, 0, 255).astype(np.uint8)
    lab[:, :, 0] = cv2.LUT(lab[:, :, 0], table)

    # increase saturation
    sat = 127 * 0.15  # percent
    val_sat = (np.arange(0, 256) - sat) / (255 - 2 * sat) * 255
    table = np.clip(val_sat, 0, 255).astype(np.uint8)
    lab[:, :, 1] = cv2.LUT(lab[:, :, 1], table)
    lab[:, :, 2] = cv2.LUT(lab[:, :, 2], table)

    output = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)

    if _debug:
        import matplotlib.pyplot as plt
        plt.figure()
        plt.plot(hist)
        plt.plot(calc_hist(cv2.cvtColor(output, cv2.COLOR_BGR2LAB)[:, :, 0]))
        show_images([img, output], show=False)

    return output
Exemplo n.º 12
0
    def gotoimg(self, value):
        try:
            if not isinstance(value, int):
                raise TypeError("value is not of a type int")

            self.index = self.nextframe(imgnum=value)
            self.img = cv2.imread(self.flist[self.index], 0)
            self.clahe = cv2.createCLAHE(clipLimit=5.0, tileGridSize=(8, 8))
            self.cl = self.clahe.apply(self.img)
            self.axis.imshow(self.cl, 'gray')
            self.axis.axis('off')
            self.axis.set_title(os.path.basename(self.flist[self.index]),
                                color='white',
                                y=1.05,
                                fontsize=10)
            self.dynamic_canvas.draw()
        except BaseException as ex:
            QtWidgets.QMessageBox.critical(self, "Exception ocurred",
                                           "{0}".format(ex),
                                           QtWidgets.QMessageBox.Ok)
            raise ex
Exemplo n.º 13
0
def clahe(img, disp=False):
    """
    Apply CLACHE algorithm to image
    :param img: Input image of RGB format
    :param disp: Control display flag
    :output: filtered image
    """
    res = []
    clahe = cv2.createCLAHE(18, tileGridSize=(21, 21))
    for channel in np.dsplit(img, img.shape[-1]):
        res.append(clahe.apply(channel))
    res = np.dstack(res)

    k = 5
    t = 5
    res = cv2.GaussianBlur(res, (k, k), 0)  # Apply filter
    res = cv2.bilateralFilter(res, t, 75, 75)

    if disp:
        cv2.imshow("result", res)
    return res
Exemplo n.º 14
0
def random_brightness(image):
    """
    Randomly adjust brightness of the image.
    """

    # -----Converting image to LAB Color model-----------------------------------
    lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)

    # -----Splitting the LAB image to different channels-------------------------
    l, a, b = cv2.split(lab)

    # -----Applying CLAHE to L-channel-------------------------------------------
    clahe = cv2.createCLAHE(clipLimit=random.uniform(1, 5),
                            tileGridSize=(8, 8))
    cl = clahe.apply(l)

    # -----Merge the CLAHE enhanced L-channel with the a and b channel-----------
    limg = cv2.merge((cl, a, b))

    # -----Converting image from LAB Color model to RGB model--------------------
    final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
    return final
Exemplo n.º 15
0
def clahe_tune(img):
    """
    Apply CLACHE algorithm to image with trackbars to tune
    :param img: Input image of RGB format
    :output: filtered image
    """
    res = []
    tg = cv2.getTrackbarPos("tg", "track") + 1
    clahe = cv2.createCLAHE(clipLimit=cv2.getTrackbarPos("clip", "track"),
                            tileGridSize=(tg, tg))
    for channel in np.dsplit(img, img.shape[-1]):
        res.append(clahe.apply(channel))
    res = np.dstack(res)

    k = 2 * cv2.getTrackbarPos("kernel", "track") - 1
    t = cv2.getTrackbarPos("t", "track")

    res = cv2.GaussianBlur(res, (k, k), 0)  # Apply filter
    res = cv2.bilateralFilter(res, t, 75, 75)

    cv2.imshow("result", res)
    return res
Exemplo n.º 16
0
def histogram():

    inPath = "DW_202006/Fail_0630_14/"
    outPath = "DW_202006/Fail_0630_14_Result/"

    fileList = os.listdir(inPath)

    for file in fileList:
        imgName = inPath + file
        img = cv2.imread(imgName, cv2.IMREAD_GRAYSCALE)
        #print(imgName)
        #cv2.imshow('gray', img)

        # CLAHE 적용
        clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
        # 함수를 이용해 clahe 객체를 생성

        claheImg = clahe.apply(img)
        # clahe 객체의 apply() 메소드의 인자로 원본 이미지를 입력하여 CLAHE가 적용된 이미지를 획득

        #cv2.imshow('CLAHE', claheImg)

        outputFilename = outPath + file
        cv2.imwrite(outputFilename, claheImg)
Exemplo n.º 17
0
def normlise_intensity(image):
    """normalise the intensity of the image.

    using an adaptive method convert the image to LAB and normalise the
    intensity channel before merging and converting back to BGR.

    :param image: image to be normalised
    :type image: cv2 image
    :return: normalised image
    :rtype: cv2 image
    """
    # convert to LAB
    lab = convert.bgr_to_lab(image)
    # split into individual channels
    l, a, b = cv2.split(lab)

    # normalise the intensity
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    l = clahe.apply(l)

    # merge the image back together
    lab_img = cv2.merge([l, a, b])
    # convert back to BGR and return it
    return convert.lab_to_bgr(lab_img)
Exemplo n.º 18
0
def clahe_img(img):
    clahe = cv.createCLAHE(clipLimit=4, tileGridSize=(8, 8))  # CLAHE APPLICATION
    imc = clahe.apply(img)           
    return imc
Exemplo n.º 19
0
            image_data_array,
            rot,
            image_data_array.shape[1::-1],
            flags=cv2.INTER_LINEAR)  # rotate image
        mask = cv2.warpAffine(mask,
                              rot,
                              mask.shape[1::-1],
                              flags=cv2.INTER_LINEAR)  # rotate mask

        #----------------------------------------------------------------------------
        # Median filtering and CLAHE
        #----------------------------------------------------------------------------
        image_data_array = cv2.medianBlur(image_data_array,
                                          5)  # apply median blur

        clahe = cv2.createCLAHE(clipLimit=2,
                                tileGridSize=(8, 8))  # create CLAHE filter
        image_data_array = clahe.apply(image_data_array)  # apply CLAHE

        #----------------------------------------------------------------------------
        #Resizing
        #----------------------------------------------------------------------------
        image_resized = cv2.resize(image_data_array, (410, 308),
                                   interpolation=cv2.INTER_AREA)
        mask_resized = cv2.resize(
            mask, (410, 308),
            interpolation=cv2.INTER_AREA)  # resize mask to fit final image

        #----------------------------------------------------------------------------
        # Gaussian Blur
        #----------------------------------------------------------------------------
        image_resized = cv2.GaussianBlur(image_resized, (3, 3), 0)
Exemplo n.º 20
0
def proposed_sys():
    # grab a reference to the image panels
    global panelG

    # open a file chooser dialog and allow the user to select an input
    # image
    path = filedialog.askopenfilename()
    # Running Path
    path_out = '/home/anibe/Desktop/augustine/'

    bt_path_in = path
    bt_path_out = path_out + 'combined.png'

    # ensure a file path was selected
    if len(path) > 0:
        img = cv2.imread(bt_path_in)[:, :, 0]
        clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
        # create a CLAHE object (Arguments are optional).
        cl1 = clahe.apply(img)
        homo_filter = HomomorphicFilter(a=0.75, b=1.25)
        img_filtered = homo_filter.filter(I=img, filter_params=[30, 2])
        both_calc = cl1 + img_filtered
        cv2.imwrite(bt_path_out, both_calc)
    # convert the images to PIL format...
    edged = Image.fromarray(both_calc)

    #GET COMBINATION ALGORITHM ENTROPY
    def calcEntropy(both_calc):
        entropy = []
        hist = cv2.calcHist([both_calc], [0], None, [256], [0, 255])
        total_pixel = both_calc.shape[0] * img.shape[1]
        for item in hist:
            probability = item / total_pixel
            if probability == 0:
                en = 0
            else:
                en = -1 * probability * (np.log(probability) / np.log(2))
                entropy.append(en)

                sum_en = np.sum(entropy)
                return sum_en

    if __name__ == '__main__':
        img1 = cv2.imread(bt_path_out, cv2.IMREAD_GRAYSCALE)
        entropy1 = calcEntropy(img1)
        lbl = Label(root, fg="blue", text=(('Entropy:', entropy1)))
        lbl.pack(side="top", pady=6)
        print(entropy1)

    def psnr1(bt_path_in, bt_path_out):
        mse = np.mean((img / 1.0 - img / 1.0)**2)
        if mse < 1.0e-10:
            return 100
        return 10 * math.log10(255.0**2 / mse)

    if __name__ == '__main__':
        print(psnr1(bt_path_in, bt_path_out))
        print(
            skimage.metrics.peak_signal_noise_ratio(img,
                                                    both_calc,
                                                    data_range=255))
        lbl = Label(root,
                    fg="red",
                    text=(('PNSR:',
                           skimage.metrics.peak_signal_noise_ratio(
                               img, both_calc, data_range=255))))
        lbl.pack(side="top", pady=30)

    # ...and then to ImageTk format
    edged = ImageTk.PhotoImage(edged)
    # if the panels are None, initialize them
    if panelG is None:
        # the first panel will store our original image
        # while the second panel will store the edge map
        panelG = Label(image=edged)
        panelG.image = edged
        panelG.pack(side="right", padx=10, pady=10)

        # otherwise, update the image panels
    else:
        # update the pannels
        panelH.configure(image=edged)
        panelH.image = edged
Exemplo n.º 21
0
def histoEqual():
    # grab a reference to the image panels
    global panelY
    # open a file chooser dialog and allow the user to select an input
    # image
    path = openFile()

    # Running Path
    path_out = '/home/anibe/Desktop/augustine/'

    heq_path_out = path_out + 'equalized.png'
    heq_path_in = path

    # ensure a file path was selected
    if len(path) > 0:
        img = cv2.imread(heq_path_in)[:, :, 0]
        # create a CLAHE object (Arguments are optional).
        clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
        cl1 = clahe.apply(img)
        cv2.imwrite(heq_path_out, img)

    # convert the images to PIL format...
    edged = Image.fromarray(cl1)

    #GET ENTROPY
    def calcEntropy2(img):
        entropy = []
        hist = cv2.calcHist([img], [0], None, [256], [0, 255])
        total_pixel = img.shape[0] * img.shape[1]
        for item in hist:
            probability = item / total_pixel
            if probability == 0:
                en = 0
            else:
                en = -1 * probability * (np.log(probability) / np.log(2))
                entropy.append(en)

                sum_en = np.sum(entropy)
                return sum_en

    if __name__ == '__main__':
        img1 = cv2.imread(heq_path_out, cv2.IMREAD_GRAYSCALE)
        entropy = calcEntropy2(img1)
        lbl = Label(root, fg="blue", text=(('Entropy:', entropy)))
        lbl.pack(side="top", pady=6)
        print(entropy)

    def psnr2(heq_path_out):
        mse = np.mean((img / 255))
        if mse < 1.0e-10:
            return 100
        PIXEL_MAX = 1
        return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))

    if __name__ == '__main__':
        print(psnr2(heq_path_out))
        print(skimage.metrics.peak_signal_noise_ratio(img, img,
                                                      data_range=255))
        lbl = Label(root, fg="red", text=(('PNSR:', psnr2(heq_path_out))))
        lbl.pack(side="top", pady=30)

    # ...and then to ImageTk format
    edged = ImageTk.PhotoImage(edged)

    # if the panels are None, initialize them
    if panelY is None:
        # the first panel will store our original image
        panelY = Label(image=edged)
        panelY.image = edged
        panelY.pack(side="right", padx=10, pady=10)

        # otherwise, update the image panels
    else:
        # update the pannels
        panelY.configure(image=edged)
        panelY.image = edged
Exemplo n.º 22
0
def fix_l(img):
    l, a, b = cv2.split(img)
    clahe = cv2.createCLAHE()
    l = clahe.apply(l)

    return cv2.merge([l, a, b])
Exemplo n.º 23
0
    k = cv.waitKey(0)
    if k == 27:
        cv.destroyAllWindows()


def nothing(x):
    pass


cv.namedWindow('win', cv.WINDOW_NORMAL)
# 创建滑动条
bar = cv.createTrackbar('equ', 'win', 0, 10, nothing)
img = cv.imread('image/hist.jpg', 0)

while True:
    x = cv.getTrackbarPos('equ', 'win')
    # 进行均衡化
    clahe = cv.createCLAHE(clipLimit=x, tileGridSize=(8, 8))
    cl = clahe.apply(img)
    cv.imshow('win', cl)
    key = cv.waitKey(10)
    if (key == 27):
        break

# 2D直方图展示
img = cv.imread('image/hist.jpg')
hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)
hist = cv.calcHist([hsv], [0, 1], None, [150, 256], [0, 180, 0, 256])
plt.imshow(hist, interpolation='nearest')
plt.show()
Exemplo n.º 24
0
def clahe_demo(image):#局部置信的直方图均衡化
    gray = cv.cvtColor(image,cv.COLOR_BGR2GRAY)
    clahe = cv.createCLAHE(clipLimit=2.6,tileGridSize=(8,8))
    dst = clahe.apply(gray)
    cv.imshow("image",gray)
    cv.imshow("result",dst)
Exemplo n.º 25
0
IMG_PATH = ['./chest_xray_images/covid19/?', '1']
IMG_SHAPE = (320, 320, 3)

test_img = load_img(path=IMG_PATH[0], color_mode='grayscale')
test_img = img_to_array(img=test_img, data_format='channels_last')
test_img = cv2.resize(test_img,
                      dsize=IMG_SHAPE[:2],
                      interpolation=cv2.INTER_NEAREST)
test_img = np.expand_dims(test_img, axis=-1)
test_img = test_img.astype(np.uint8)
ref_img = copy.deepcopy(x=test_img)
temp_img = np.concatenate((test_img, test_img, test_img), axis=-1)

if IMG_SHAPE[-1] == 3:
    test_img_clahe = cv2.createCLAHE(clipLimit=2.0,
                                     tileGridSize=(8, 8)).apply(test_img)
    test_img_clahe = np.expand_dims(a=test_img_clahe, axis=-1)
    test_img_beasf = BEASF(image=test_img, gamma=1.5)
    test_img = np.concatenate((test_img, test_img_beasf, test_img_clahe),
                              axis=-1)
else:
    pass

test_img = test_img / 255.
# test_img = np.expand_dims(test_img, axis=0)
print('external image(s) shape:', test_img.shape)

# load model as a json file and load weights from .hdf5 file
json_file = open(file='./checkpoints/COVID-CXNet/COVID-CXNet_model.json',
                 mode='r')
model_json = json_file.read()
Exemplo n.º 26
0
    def initializeMatplolibObjects(self):
        try:
            print('\n Reading images and automatically adjusting contrast \n')

            n = len(self.flist)
            if n == 0:
                QtWidgets.QMessageBox.critical(
                    None, "Path not found",
                    "%s does not contain .tif files. Please check path and rerun script"
                    % self.indir, QtWidgets.QMessageBox.Ok)
                sys.exit()

            self.index = int(n / 2.0)
            self.evar = self.index + 1
            self.ui.image_number_spinBox.setMinimum(1)
            self.ui.image_number_spinBox.setMaximum(n)
            self.ui.image_number_spinBox.setValue(self.evar)

            self.figureView = Figure(figsize=(8, 8),
                                     dpi=100,
                                     facecolor='royalblue',
                                     edgecolor='white',
                                     linewidth=2)
            self.axis = self.figureView.add_subplot(111)
            self.img = cv2.imread(self.flist[self.index], 0)
            self.clahe = cv2.createCLAHE(clipLimit=5.0, tileGridSize=(8, 8))
            self.cl = self.clahe.apply(self.img)
            self.axis.imshow(self.cl, 'gray')
            self.axis.axis('off')
            self.axis.set_title(os.path.basename(self.flist[self.index]),
                                color='white',
                                y=1.05,
                                fontsize=10)

            # creating canvas and toolbar
            self.dynamic_canvas = FigureCanvasQTAgg(self.figureView)
            self.dynamic_canvas.setFocusPolicy(QtCore.Qt.ClickFocus)
            self.dynamic_canvas.setFocus()
            self.ui.image_gridLayout.addWidget(self.dynamic_canvas, 1, 1, 1, 3)
            self.toolbar = NavigationToolbar2QT(self.dynamic_canvas, self)
            self.toolbar.setFixedWidth(400)

            # Setting tool tips for relevant buttons on the navigation tool bar
            #https://matplotlib.org/3.1.1/users/navigation_toolbar.html
            toolTipDict = {
                "Reset original view":
                "Reset original view (h or r or home)",
                "Back to previous view":
                "Back to previous view (c or left arrow or backspace)",
                "Forward to next view":
                "Forward to next view (v or right arrow)",
                "Left button pans, Right button zooms\nx/y fixes axis, CTRL fixes aspect":
                "Left button pans, Right button zooms\nx/y fixes axis, CTRL fixes aspect (p)",
                "Zoom to rectangle\nx/y fixes axis, CTRL fixes aspect":
                "Zoom to rectangle\nx/y fixes axis, CTRL fixes aspect (o)",
                "Save the figure":
                "Save the figure (ctrl + s)",
            }
            actionList = self.toolbar.actions()
            for action in actionList:
                ss = action.toolTip()
                item = toolTipDict.get(ss)
                if (item is not None):
                    action.setToolTip(item)

            self.ui.footer_gridLayout.addWidget(self.toolbar, 1, 6, 1, 2)
        except BaseException as ex:
            _runException(None, ex)
Exemplo n.º 27
0
plt.subplot(223), plt.imshow(img2, 'gray')
plt.subplot(224)
plt.plot(cdf_m, color = 'b')
plt.hist(img2.flatten(),256,[0,256], color = 'r')
plt.xlim([0,256])
plt.legend(('cdf','histogram'), loc = 'upper left')

plt.show()
'''

# CLAHE (Contrast Limited Adaptive Histogram Equalization)

# The first histogram equalization we just saw, considers the global contrast of the image. In many cases, it is not a good idea. For example, below image shows an input image and its result after global histogram equalization.

# It is true that the background contrast has improved after histogram equalization. But compare the face of statue in both images. We lost most of the information there due to over-brightness. It is because its histogram is not confined to a particular region as we saw in previous cases (Try to plot histogram of input image, you will get more intuition).

# So to solve this problem, adaptive histogram equalization is used. In this, image is divided into small blocks called “tiles” (tileSize is 8x8 by default in OpenCV). Then each of these blocks are histogram equalized as usual. So in a small area, histogram would confine to a small region (unless there is noise). If noise is there, it will be amplified. To avoid this, contrast limiting is applied. If any histogram bin is above the specified contrast limit (by default 40 in OpenCV), those pixels are clipped and distributed uniformly to other bins before applying histogram equalization. After equalization, to remove artifacts in tile borders, bilinear interpolation is applied.

img = cv2.cvtColor(cv2.imread('resource/clahe_demo.jpg'), cv2.COLOR_BGR2GRAY)
cv2.imshow('orign', img)

equ = cv2.equalizeHist(img)
cv2.imshow('normal eq', equ)

clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
cl1 = clahe.apply(img)
cv2.imshow('clahe', cl1)

cv2.waitKey(0)
cv2.destroyAllWindows()
Exemplo n.º 28
0
def clahe_demo(image):
    gray = cv.cvtColor(image, cv.COLOR_BGRA2GRAY)
    clahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    dst = clahe.apply(gray)
    cv.imshow("clahe_demo", dst)