def eroding(i, j, open_i, rgb_img2, method):
    kernel = np.ones((i, i), np.uint8)
    eroding = cv2.erode(open_i, kernel, iterations=j)
    display(eroding, "Eroded-the-image")
    count_white_pixels(eroding)
    _, section_1, section_2 = modified_annotate(eroding, rgb_img2)
    return section_1, section_2
def eroding(i, j, open_i, rgb_img2, method):
    kernel = np.ones((i, i), np.uint8)
    eroding = cv2.erode(open_i, kernel, iterations=j)
    display(eroding, "Eroded-the-image" + method)
    rgb_img2, _, _ = annotate(eroding, rgb_img2)
    save(
        eroding, "Set10-After morphological operation_" + method + "_kernel_" +
        str(i) + "iteration_" + str(j))
    save(
        rgb_img2, "Set10-annotated_" + method + "_kernel_" + str(i) +
        "iteration_" + str(j))
def homo_morph(img,name,filter_type):
    d_zero = 30
    c = 2
    y_h = 1.5
    y_l = 0.75
    
    #Step 1: Take the log of the image
    I_log = np.log1p(np.array(img,dtype='float'))
    
    #Step 2: Take Fast Fourier Transform
    I_fft =np.fft.fft2(I_log) #2 Dimensional Fourier Transform
    
    if(filter_type=="g"):
        #Step 3: Apply the Gaussian Filter
        I_shape = I_fft.shape
        P = I_shape[0]/2
        Q = I_shape[1]/2 
        U, V = np.meshgrid(range(I_shape[0]), range(I_shape[1]), sparse=False, indexing='ij')
        Duv = np.sqrt((((U-P)**2+(V-Q)**2))).astype(float)  #np.sqrt is redundant as we are going to square this quantity in the next step
        H = np.exp((-(c*(Duv**2))/(d_zero**2)))
        H_u = (y_h - y_l) * (1-H) + y_l
        
        H_u = np.fft.fftshift(H_u)
        I_fft_filt = H_u*I_fft
    
    #Butterworth Filter
    if(filter_type=='b'):
        n = 3
        d_zero = 120
        y_h = 1.002
        y_l = 1.115
        I_shape = I_fft.shape
        P = I_shape[0]/2
        Q = I_shape[1]/2 
        U, V = np.meshgrid(range(I_shape[0]), range(I_shape[1]), sparse=False, indexing='ij')
        Duv = np.sqrt((((U-P)**2+(V-Q)**2))).astype(float)
        H = 1/(1+(d_zero/Duv)**(2*n))
        H_u = (y_h - y_l) * (1-H) + y_l
        H_u = np.fft.fftshift(H_u)
        I_fft_filt = H_u * I_fft
    
    
    #Step 4: Do the Inverse FFT
    I_filt = np.fft.ifft2(I_fft_filt)
            
    #Step 5: Take Exponential of the components
    I = np.exp(np.real(I_filt))-1
    final_img = np.uint8(I)
    display(final_img, name)
    return final_img
def function1():
    img1 = cv2.imread(r"E:\SmartIoTLab\Images\For_Study\Set10\1.jpeg", 0)
    img2 = cv2.imread(r"E:\SmartIoTLab\Images\For_Study\Set10\2.jpeg", 0)
    rgb_img1 = cv2.imread(r"E:\SmartIoTLab\Images\For_Study\Set10\1.jpeg", 1)
    rgb_img2 = cv2.imread(r"E:\SmartIoTLab\Images\For_Study\Set10\2.jpeg", 1)

    combine(rgb_img1, rgb_img2, "Set10-Combined-RGB")

    thresh2 = cv2.adaptiveThreshold(img1,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
            cv2.THRESH_BINARY,11,2)
    thresh3 = cv2.adaptiveThreshold(img2,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
            cv2.THRESH_BINARY,11,2)

    diff = cv2.absdiff(thresh2, thresh3)
    display(diff, "Displaying-Diff")
    rgb_img2 = cv2.imread(r"E:\SmartIoTLab\Images\For_Study\Set10\2.jpeg", 1)
    eroding(3, 1, diff, rgb_img2, "Method-1")
def function2():
    b = np.zeros((175, 2))
    # =============================================================================
    #     for cval in range(2,9):
    #             for ksize in range(7,24):
    #                 if(ksize%2==1):
    #                     print("Kernel Size "+str(ksize))
    #                     print("C Val "+str(cval))
    # =============================================================================
    #for tval in range(35,36):
    #print("Threshold Value "+str(tval))
    for i in range(0, 174):
        img1 = cv2.imread(
            "E:\\SmartIoTLab\\Images\\Dataset\\Dataset3\\" + str(i) + ".jpeg",
            0)
        img2 = cv2.imread(
            "E:\\SmartIoTLab\\Images\\Dataset\\Dataset3\\" + str(i + 1) +
            ".jpeg", 0)
        #display(img2,"displaying-images")
        rgb_img2 = cv2.imread(
            "E:\\SmartIoTLab\\Images\\Dataset\\Dataset2\\" + str(i + 1) +
            ".jpeg", 1)
        diff = cv2.absdiff(img1, img2)
        # =============================================================================
        #         plt.hist(diff.ravel(),256,[0,256])
        #         plt.show()
        # =============================================================================
        #display(diff,"Showing difference")
        #thresh1 = cv2.adaptiveThreshold(diff,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,ksize,cval)
        #thresh1 = cv2.adaptiveThreshold(diff,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,ksize,cval)
        _, thresh1 = cv2.threshold(diff, 25, 255, cv2.THRESH_BINARY)
        #display(thresh1,"Showing thresholded")
        inverted = thresh1
        #cv2.bitwise_not(thresh1,inverted)  #(src,dest)
        display(inverted, "Showing inverted")
        section_1, section_2 = eroding(3, 1, inverted, rgb_img2, "Method-2")
        #print("Output: Section1: "+str(section_1)+", Section2: "+str(section_2))
        b[i][0] = section_1
        b[i][1] = section_2
    #print("Printing B")
    #print(b)
    find_accuracy(ground_truth, b)
示例#6
0
# -*- coding: utf-8 -*-
import cv2
from ip_functions import display,homo_morph
import pymunk.matplotlib_util
i = 18
img1 = cv2.imread("E:\SmartIoTLab\Images\Brightness_difference_getting_captured_3x3\in\\original"+str(i)+".jpeg",0)
matplotlib_utils.impixelinfo(None, img1)
imgColor = cv2.imread("E:\SmartIoTLab\Images\Brightness_difference_getting_captured_3x3\in\\original"+str(i)+".jpeg",1)
img2 = cv2.imread("E:\SmartIoTLab\Images\Brightness_difference_getting_captured_3x3\in\\original"+str(i+1)+".jpeg",0)
img3 = homo_morph(img1, "img1-gauss","g")
img4 = homo_morph(img2, "img2-gauss","g")
diff=cv2.absdiff(img1,img2)
display(diff,"Displaying-Difference")
_,thresh = cv2.threshold(diff,27,255,cv2.THRESH_BINARY)
#display(thresh1,"Showing thresholded")
display(thresh,"Showing inverted")
image, contours, heirarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
for i in range(0, len(contours)):
    print("Printing Area "+str(cv2.contourArea(contours[i])))
    print("Printing Arc Length "+str(cv2.arcLength(contours[i],True)))
    if(cv2.contourArea(contours[i])>=10 and cv2.contourArea(contours[i])<=50000):
        imgColor = cv2.drawContours(imgColor, [contours[i]], 0, (0,255,0) ,1)
display(imgColor,"Thresholded_image")
    I_filt = np.fft.ifft2(I_fft_filt)
            
    #Step 5: Take Exponential of the components
    I = np.exp(np.real(I_filt))-1
    final_img = np.uint8(I)
    display(final_img, name)
    return final_img
    #color_img = cv2.cvtColor(final_img,cv2.COLOR_GRAY2RGB)
    #display(color_img, name+"1")

#Step 0: Read the image
img1 = cv2.imread(r"E:\SmartIoTLab\Images\Contours\28.jpeg",0)
img2 = cv2.imread(r"E:\SmartIoTLab\Images\Contours\29.jpeg",0)
# =============================================================================
# =============================================================================
display(img1,"Gray-scale Image")
# save(img1,"gray-original8")
# =============================================================================
# =============================================================================
img3 = homo_morph(img1,"Image-8-Gaussian","g")
img4 = homo_morph(img2,"Image-8-Gaussian","g")

total = avg_diff(img1,img2)

X = np.random.randint(10,size=(720, 480),dtype=np.uint8)
for m in range (0,720):
    for n in range(0,480):
        val1=img3[m][n]
        val2=img4[m][n]
        if(val1 == 0 or val2 == 0):
            if(val1 == 0):
示例#8
0
for i in range(0, 720):
    for j in range(0, 480):
        val1 = img1[i][j]
        val2 = img2[i][j]
        if (val1 == 0 or val2 == 0):
            if (val1 == 0):
                temp = val2
            else:
                temp = val1
        else:
            if (val1 >= 182 or val2 >= 182):
                temp = 0
            else:
                if (val1 >= val2):
                    temp = abs(val1 - val2)
                else:
                    temp = abs(val2 - val1)
        X[i][j] = temp
ret, thresh1 = cv2.threshold(X, 27, 255, 0)
display(thresh1, "Thresholded-Manual")
img3 = cv2.absdiff(img1, img2)
ret, thresh1 = cv2.threshold(img3, 27, 255, 0)
display(thresh1, "Thresholded-Library")
image, contours, heirarchy = cv2.findContours(thresh1, cv2.RETR_TREE,
                                              cv2.CHAIN_APPROX_SIMPLE)

for i in range(0, len(contours)):
    print("Printing Area " + str(cv2.contourArea(contours[i])))
    print("Printing Arc Length " + str(cv2.arcLength(contours[i], True)))
img5 = cv2.drawContours(img4, contours, -1, (0, 255, 0), 1)
display(img5, "Image_with_contour")
import cv2
from ip_functions import display
img = cv2.imread(r"E:\SmartIoTLab\Images\Brightness_difference_getting_captured_3x3\in\original4.jpeg")[:,:,0]
img1 = cv2.imread(r"E:\SmartIoTLab\Images\Brightness_difference_getting_captured_3x3\in\original4.jpeg",0)
img2 = cv2.imread(r"E:\SmartIoTLab\Images\Brightness_difference_getting_captured_3x3\in\original4.jpeg")
print(img.shape)
display(img,"Image1")
display(img1,"Image2")
b = img2.copy()
# set green and red channels to 0
b[:, :, 1] = 0
b[:, :, 2] = 0
cv2.imshow("R-RGB",b)
cv2.waitKey(0)
def function2():
    b = np.zeros((329, 2))
    # =============================================================================
    #     for cval in range(2,9):
    #             for ksize in range(7,24):
    #                 if(ksize%2==1):
    #                     print("Kernel Size "+str(ksize))
    #                     print("C Val "+str(cval))
    # =============================================================================
    #for tval in range(35,36):
    #print("Threshold Value "+str(tval))
    for i in range(0, 1):
        #"E:\\SmartIoTLab\\Images\\Dataset\\Dataset4\\woboundary\\"+str(i)+".jpeg"
        img1 = cv2.imread("E:\\SmartIoTLab\\Images\\Contours\\239.jpeg", 0)
        img2 = cv2.imread("E:\\SmartIoTLab\\Images\\Contours\\240.jpeg", 0)

        img1 = homo_morph(img1, "Image-8-Gaussian", "g")
        img2 = homo_morph(img2, "Image-8-Gaussian", "g")

        display(img2, "displaying-images")
        rgb_img2 = cv2.imread(
            "E:\\SmartIoTLab\\Images\\Dataset\\Dataset4\\woboundary\\" +
            str(i + 1) + ".jpeg", 1)
        X = np.random.randint(10, size=(720, 480), dtype=np.uint8)
        for m in range(0, 720):
            for n in range(0, 480):
                val1 = img1[m][n]
                val2 = img2[m][n]
                if (val1 == 0 or val2 == 0):
                    if (val1 == 0):
                        temp = val2
                    else:
                        temp = val1
                else:
                    if (val1 >= 182 or val2 >= 182):
                        temp = 0
                    else:
                        if (val1 >= val2):
                            temp = abs(val1 - val2)
                        else:
                            temp = abs(val2 - val1)
                X[m][n] = temp
        #diff=cv2.absdiff(img1,img2)
# =============================================================================
#         plt.hist(diff.ravel(),256,[0,256])
#         plt.show()
# =============================================================================
        display(X, "Showing difference")
        #thresh1 = cv2.adaptiveThreshold(diff,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,ksize,cval)
        #thresh1 = cv2.adaptiveThreshold(diff,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,ksize,cval)
        _, thresh1 = cv2.threshold(X, 27, 255, cv2.THRESH_BINARY)
        #display(thresh1,"Showing thresholded")
        inverted = thresh1
        #cv2.bitwise_not(thresh1,inverted)  #(src,dest)
        display(inverted, "Showing inverted")
        section_1, section_2 = eroding(2, 1, inverted, rgb_img2, "Method-2")

        print(section_1)
        print(section_2)
        #print("Output: Section1: "+str(section_1)+", Section2: "+str(section_2))
        b[i][0] = section_1
        b[i][1] = section_2
示例#11
0
# -*- coding: utf-8 -*-
import cv2
import numpy as np
from ip_functions import display

img1 = cv2.imread("E:\\SmartIoTLab\\Images\\Contours\\3.jpeg")
img1 = img1.copy()
img3 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img2 = cv2.imread("E:\\SmartIoTLab\\Images\\Contours\\4.jpeg", 0)
img2 = img2.copy()
diff = cv2.absdiff(img3, img2)
#_,thresh1 = cv2.threshold(diff,27,255,cv2.THRESH_BINARY)
ret, thresh = cv2.threshold(diff, 27, 255, 0)
display(thresh, "Thresholded")
image, contours, heirarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                                              cv2.CHAIN_APPROX_SIMPLE)
for i in range(0, len(contours)):
    print("Printing Area " + str(cv2.contourArea(contours[i])))
    print("Printing Arc Length " + str(cv2.arcLength(contours[i], True)))
img1 = cv2.drawContours(img2, contours, -1, (0, 255, 0), 1)
display(img1, "Thresholded_image")
示例#12
0
# -*- coding: utf-8 -*-
#Contrast Limited Adaptive Histogram Equalization
import numpy as np
from ip_functions import display
import cv2
img = cv2.imread(
    r'E:\SmartIoTLab\Images\Brightness_difference_getting_captured_3x3\in\original1.jpeg',
    0)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
cl1 = clahe.apply(img)
display(cl1, "CLAHE-Image")