Пример #1
0
    def show(self, saveto="1_results/output.png"):
        tns = np.array(self._BasicRetrieval__trainset)
        tts = np.array(self._BasicRetrieval__testset)

        y_tns = self.__y_train
        y_tts = self.__predicted_y

        half = super(SVMImageRetrieval, self)._half

        def concatenate(a_set, y_label, desired_label):
            pool = a_set[y_label == desired_label]
            pool = [half(img) for img in pool]
            return cv2.hconcat(pool)

        res = list()
        for ii, timg in enumerate(tts):
            # train = concatenate(tns, y_tns, y_tts[ii])
            add_note_on_the_picture(timg, self.__test_cnames[ii])
            pool = tns[y_tns == y_tts[[ii]]]
            pool = cv2.hconcat([half(img) for img in pool])

            # tmp = cv2.hconcat(half(timg))
            tmp = cv2.hconcat([half(timg), pool])
            res.append(tmp)

        for i, item in enumerate(res):
            cv2.imshow("PRESS ANY KEY " + str(i), item)
        # cv2.imwrite(saveto, vis)
        cv2.waitKey(0)
Пример #2
0
 def sift_detection(img: ndarray):
     gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
     sift = cv2.xfeatures2d.SIFT_create()
     kp = sift.detect(gray)
     cv2.drawKeypoints(gray, kp, img)
     add_note_on_the_picture(img, "SIFT Detection (key: 7)", label_center=(0, 0))
     return img
Пример #3
0
 def adaptive_gaussian_thresholding(img: ndarray):
     img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
     img = cv2.medianBlur(img, 5)
     img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                 cv2.THRESH_BINARY, 11, 2)
     add_note_on_the_picture(img, "Adaptive Gaussian thresholding (key: 4)", label_center=(0, 0))
     return img
Пример #4
0
def display_texts(img: ndarray, texts: list):
    from lib.common import FONT, FONT_SCALE, LINE_TYPE
    (label_width, label_height), _ = cv2.getTextSize(texts[0], FONT,
                                                     FONT_SCALE, LINE_TYPE + 2)
    cimg = img.copy()
    x = (cimg.shape[0] - label_width) // 2
    for i, text in enumerate(texts):
        y = (label_height * 1.5) * i
        add_note_on_the_picture(cimg, text, label_center=(x, int(y)))

    return cimg
Пример #5
0
    def my_sobel_edge_detection(img: ndarray):
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img = Filter._normalize(img)

        gauss_filter = Helper.make_gaussian(5)
        sobelmask_x = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
        sobelmask_y = np.array([[1, 2, 1],  [0, 0, 0],  [-1, -2, -1]])

        img = Filter.convolve(img, gauss_filter)
        sobel_x = Filter.convolve(img, sobelmask_x)
        sobel_y = Filter.convolve(img, sobelmask_y)
        sobel = np.sqrt(sobel_x ** 2 + sobel_y ** 2)
        sobel = np.ascontiguousarray(sobel)

        add_note_on_the_picture(sobel, "My Smart Approach (Key 2)")

        return sobel
Пример #6
0
def show_task_1():
    """
    Load the image Lenna.png using OpenCV and display it side by side as both a
    grayscale image and a color image.
    :return: None
    """
    Lenna = cv2.imread('../Data/Lenna.png')
    LennaBW = cv2.cvtColor(Lenna, cv2.COLOR_BGR2GRAY)
    LennaBW = cv2.cvtColor(LennaBW, cv2.COLOR_GRAY2BGR)

    doubled_lenna = hstack((LennaBW, Lenna))
    add_note_on_the_picture(doubled_lenna)

    while cv2.waitKey(100) != KEYS.SPACE:
        cv2.imshow('Exercise 1.1: Lenna', doubled_lenna)

    cv2.destroyAllWindows()
Пример #7
0
    def show(self, option=Render.BLACK_AND_WHITE):
        if option is Render.CLUSTERS_CENTERS:
            tmp = self.img.copy()
            for k in range(self.k):
                tmp[self.clustered_img == k] = self.cluster_centers[k]
        elif option is Render.BLACK_AND_WHITE:
            tmp = self.clustered_img.copy()
            tmp = tmp.astype("uint8") * (255 // self.k)

        else:
            tmp = self.img.copy()
            for k in range(self.k):
                tmp[self.clustered_img == k] = k_to_rgb(k, self.k)

        add_note_on_the_picture(tmp,
                                "Eps: " + str(round(self.eps, 5)),
                                label_center=(0, 0))
        return tmp
Пример #8
0
    def sobel_edge_detection(img: ndarray):

        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img = Filter._normalize(img)
        img = cv2.GaussianBlur(img, (5, 5), 0)

        sobelmask_x = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
        sobelmask_y = np.array([[1, 2, 1],  [0, 0, 0],  [-1, -2, -1]])

        sobel_x = cv2.filter2D(img, -1, sobelmask_x)
        sobel_y = cv2.filter2D(img, -1, sobelmask_y)

        sobel = np.sqrt(sobel_x ** 2 + sobel_y ** 2)
        sobel = np.ascontiguousarray(sobel)

        add_note_on_the_picture(sobel, "OpenCV approach (Key 1)")

        return sobel
Пример #9
0
    def my_silly_sobel_edge_detection(img: ndarray):
        def silly_convolute(A: ndarray, B: ndarray):
            output = np.zeros(A.shape)

            n, m = A.shape
            k, l = B.shape

            n = n - n % k
            m = m - m % l

            k //= 2
            l //= 2

            divisor = np.sum(np.abs(B))

            for i in range(k, n - k):
                for j in range(l, m - l):
                    left_chunk = A[i-k:i+1+k, j-l:j+1+l]
                    left_chunk = np.multiply(left_chunk, B)
                    output[i][j] = np.sum(left_chunk)/divisor

            return output

        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img = Filter._normalize(img)

        gauss_filter = Helper.make_gaussian(3)
        sobelmask_x = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
        sobelmask_y = np.array([[1, 2, 1],  [0, 0, 0],  [-1, -2, -1]])

        img = silly_convolute(img, gauss_filter)
        sobel_x = silly_convolute(img, sobelmask_x)
        sobel_y = silly_convolute(img, sobelmask_y)

        sobel = np.sqrt(sobel_x ** 2 + sobel_y ** 2)
        sobel = np.ascontiguousarray(sobel)

        add_note_on_the_picture(sobel, "FPS 0.01", label_center=(0, 0))
        add_note_on_the_picture(sobel, "Stupid approach (Key: 3)")

        return sobel
Пример #10
0
 def to_yuv(img: ndarray, label=True):
     img = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
     if label: add_note_on_the_picture(img, "YUV colour space (key: 3)", label_center=(0, 0))
     return img
Пример #11
0
 def to_lab(img: ndarray, label=True):
     img = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
     if label: add_note_on_the_picture(img, "LAB colour space (key: 2)", label_center=(0, 0))
     return img
Пример #12
0
 def gaussian_blur(img: ndarray, kernel_size=5):
     img = cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
     add_note_on_the_picture(img, "Gaussian Blur (key: 9)", label_center=(0, 0))
     return img
Пример #13
0
 def nothing(img: ndarray, label=True):
     if label: add_note_on_the_picture(img, "Nothing (key: 0)", label_center=(0, 0))
     return img
Пример #14
0
 def canny_edge_detection(img: ndarray):
     img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
     img = cv2.GaussianBlur(img, (5, 5), 0)
     img = cv2.Canny(img, 100, 200)
     add_note_on_the_picture(img, "Canny Edge Detection (key: 6)", label_center=(0, 0))
     return img
Пример #15
0
 def otsu_thresholding(img: ndarray):
     img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
     blur = cv2.GaussianBlur(img, (5, 5), 0)
     _, otsu = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
     add_note_on_the_picture(otsu, "Otsu's thresholding (key: 5)", label_center=(0, 0))
     return otsu
Пример #16
0
        gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        gray = np.float32(gray)

        k = 0.04
        threshold = 0.01

        harris_cv = cv2.cornerHarris(gray, 3, 3, k)
        my_harris = myCornerHaris(gray, k, threshold)

        harris_cv_thres = np.zeros(harris_cv.shape)
        harris_cv_thres[harris_cv > threshold * harris_cv.max()] = [255]

        img[my_harris == 255] = [0, 255, 0]
        diff = np.sum(np.absolute(my_harris - harris_cv_thres))

        add_note_on_the_picture(harris_cv_thres, text="OpenCV Corners")
        add_note_on_the_picture(my_harris,       text="My Corners")
        add_note_on_the_picture(img, text="diff: " + str(diff))

        res = cv2.hconcat((harris_cv_thres.astype(np.uint8), my_harris.astype(np.uint8)))
        res = cv2.hconcat([cv2.cvtColor(res, cv2.COLOR_GRAY2RGB), img])

        while cv2.waitKey(10) != KEYS.SPACE:
            cv2.imshow('Hariss Corner Detection (SPACE TO SHOW NEXT)', res)
            cv2.imwrite("3_results/" + path.split('/')[-1], res)