def testingMethod(img: numpy.ndarray): greyImg = cv2.cvtColor(src=img, code=cv2.COLOR_RGB2GRAY) hog = HOGDescriptor((8, 16), (8, 8), (8, 8), (8, 8), 9) # 16x32 -> col,row -> x,y hogDescriptors = [] for i in range(0, 1000, 4): roi = greyImg[i:i + 16, i:i + 8] hogDescriptors.append(hog.compute(img=roi)) # create bf for knn matching bf = cv2.BFMatcher() description = numpy.array(hogDescriptors) # convert to numpy.ndarray matches = bf.knnMatch(description, description, k=2) minimumDistance = 10000 for i, (m, n) in enumerate(matches): if minimumDistance > n.distance - m.distance: minimumDistance = n.distance - m.distance print(' m query index = ', m.queryIdx, '| m train index = ', m.trainIdx, '| n query index = ', n.queryIdx, '| n train index = ', n.trainIdx, '| n distance - ', n.distance, '| m distance - ', m.distance, ' | distance : ', n.distance - m.distance) print('total matches', len(matches)) print('minimum matches :', minimumDistance)
def flannBasedMachHOG(image: numpy.uint8) -> None: """this will detect image keypoints using SIFT algorithm and match them using flannBasedMatch""" hog = HOGDescriptor((8, 8), (8, 8), (8, 8), (8, 8), 9) # 16x32 -> col,row -> x,y hogDescriptor1 = [] hogDescriptor2 = [] for i in range(1, 20): # iterating to get 10 descriptors hogDescriptor1.append(hog.compute(img=image[90 - i:240 - i, 100:300])) hogDescriptor2.append(hog.compute(img=image[70 + i:220 + i, 300:500])) # converting arrays to numpy.ndarray of data type float32 print('length of all descriptors - ', len(hogDescriptor2)) print('length - ', len(hogDescriptor2[1])) print('length - ', len(hogDescriptor1[2])) print('type - ', type(numpy.float32(hogDescriptor1))) hogDescriptor1 = numpy.float32(hogDescriptor1) hogDescriptor2 = numpy.float32(hogDescriptor2) # FLANN parameters FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) search_params = dict(checks=50) # or pass empty dictionary flann = cv2.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(hogDescriptor1, hogDescriptor2, k=2) print('length of matches : ', len(matches), ' type of matches - ', type(matches)) # ratio test as per Lowe's paper count = 0 for i, (m, n) in enumerate(matches): if abs(m.distance - n.distance) < 0.8: count += 1 print('distance : ', (n.distance - m.distance), m.trainIdx, n.trainIdx) # print(n.distance - m.distance) # n has the higher value than m (always - did not encounter other way round) print('total count of matches : ', count)
def calculateHOGofSuspectAreas(): print('ok....') hog = HOGDescriptor((8, 8), (8, 8), (8, 8), (8, 8), 9) # 16x32 -> col,row -> x,y # hogDescriptor1 = hog.compute(img=cv2.imread('/home/waasala/workspace/gimp/colorFlower.jpeg')[0:128, 0:64]) # hogDescriptor2 = hog.compute(img=cv2.imread('/home/waasala/workspace/gimp/colorFlower.jpeg')[0:128, 0:64]) image = ProcessImages.resizeImage( 0, cv2.imread('/home/waasala/workspace/gimp/colorBird.jpeg') ) #imread('/home/waasala/workspace/PycharmProjects/OpenCVBasic/cloningDetection/image.png')) hogDescriptor1 = hog.compute(img=image[90:240, 100:300]) hogDescriptor2 = hog.compute(img=image[70:220, 300:500]) # # hogDescriptor1 = hog.detect(img=cv2.imread('/home/waasala/workspace/gimp/colorFlower.jpeg')[350:550, 350:550]) # # hogDescriptor2 = hog.detect(img=cv2.imread('/home/waasala/workspace/gimp/colorFlower.jpeg')[250:550, 250:550]) print('type of hog descriptor_1 - ', type(hogDescriptor1), ' first value of descriptor_1 - ', hogDescriptor1[0]) print('type of hog descriptor_2 - ', type(hogDescriptor2), ' first value of descriptor_2 - ', hogDescriptor2[0]) print('distance : ', scipy.spatial.distance.euclidean(hogDescriptor1, hogDescriptor2)) image[90:240, 100:300, 0] = 0 image[55:205, 300:500, 1] = 0 cv2.imshow('test', hogDescriptor1) cv2.waitKey(0) cv2.destroyAllWindows()
def calculsteHOGofSuspectAreas(suspectAreas: dict): hog = HOGDescriptor() returnVAl = hog.compute(img=suspectAreas.area1) print(type(returnVAl))
def matchWithHOGKNN(noSIFTkeySegValList: list, segments: numpy.ndarray, imageColor: numpy.ndarray, threshold=0.004) -> dict: """this will return array of numpy.ndarray of matched areas""" greyImage = cv2.cvtColor(src=imageColor, code=cv2.COLOR_RGB2GRAY) '''prepare the HOG descriptor''' # detection_window_col = 8 # detection_window_row = 16 # hog = HOGDescriptor((detection_window_col, detection_window_row), (8, 8), (8, 8), (8, 8), # 9) # 16x32 -> col,row -> x,y # detection_window_col = 16 # detection_window_row = 32 # hog = HOGDescriptor((detection_window_col, detection_window_row), (16, 16), (8, 8), (8, 8), # 9) # 16x32 -> col,row -> x,y detection_window_col = 32 detection_window_row = 32 hog = HOGDescriptor((detection_window_col, detection_window_row), (16, 16), (8, 8), (8, 8), 9) # 16x32 -> col,row -> x,y ''''preparing a dictionary to hold the descriptors of all segments''' dictionary = {} '''dictionary to hold the matched patches with key->segmentValue | value->array_of_matched_segments''' matchedSegments = {} min_height = 10000 # for testing purposes min_width = 10000 # for testing purposes '''assuming cloned area is not in the same segment - iterating over the list of segment values those without enough keys detected''' for segVal in noSIFTkeySegValList: rows, cols = numpy.where(segments == segVal) if max(rows) - min(rows) < min_height: # for testing purposes min_height = max(rows) - min(rows) # for testing purposes if max(cols) - min(cols) < min_width: # for testing purposes min_width = max(cols) - min(cols) # for testing purposes '''checking if the segment is large enough to run the window''' if ((max(rows) - min(rows)) > detection_window_row) & ( (max(cols) - min(cols)) > detection_window_col): '''iterating the HOG sliding window''' for y in range(min(rows), max(rows) - detection_window_row, 4): for x in range(min(cols), max(cols) - detection_window_col, 4): roi = greyImage[y:y + detection_window_row, x:x + detection_window_col] # [y1:y2, x1:x2] '''add the descriptor with segment value''' # temp_h_o_g_descriptors.append(hog.compute(img=roi)) dictionary[SegVal(segVal)] = hog.compute(img=roi) # '''after iterating the HOG kernel over the segment, add the descriptors to the dictionary''' # dictionary[segVal] = temp_h_o_g_descriptors print('segments with proper size for HOG : ', len(dictionary)) print('minimum height of roi -', min_height, ' | minimum width of roi -', min_width) '''after iterating over all the segments that are without the required amount of key points, compare between the descriptors of each segment obtained here in the dictionary key->segment_value | value->HOG descriptor''' minimum_distance = 10000 # for testing purposes '''prepare a numpy array of HOG descriptors''' hog_descriptors = numpy.array(list(dictionary.values())) list_of_keys = numpy.array(list(dictionary.keys())) print('every thing is fine since descriptors and keys are of same length-', len(hog_descriptors), ',', len(list_of_keys)) # print('check with final item : ',hog_descriptors[758] == hog_descriptors[645],'segment value : ',) # print('check final item test 2 : ', hog_descriptors[49415] == hog_descriptors[9143]) # print('check final item test 3 : ', hog_descriptors[49415] == hog_descriptors[6252]) '''prepare the BF matcher''' bf = cv2.BFMatcher() matches = bf.knnMatch(hog_descriptors, hog_descriptors, k=2) '''iterating over matches to get the best matches''' for i, (m, n) in enumerate(matches): distance = abs(n.distance - m.distance) # if distance < minimum_distance: # for testing purposes # minimum_distance = distance # for testing purposes if distance < 0.4: print(' m query index = ', m.queryIdx, '| m train index = ', m.trainIdx, '| n query index = ', n.queryIdx, '| n train index = ', n.trainIdx, '| n distance - ', n.distance, '| m distance - ', m.distance, ' | distance : ', distance) '''obtain the segment values for matched descriptors''' keyValue1 = list_of_keys[m.queryIdx].segmentValue keyValue2 = list_of_keys[n.trainIdx].segmentValue # tested combinations - # (m.queryIdx,n.trainIdx):"seem to be fine a little" # (m.queryIdx,m.trainIdx):"I think this is not working" '''need to make sure that the descriptors arenot of the same segment''' if keyValue1 != keyValue2: print('above is taken') '''fill into the return array of matches''' if keyValue1 in matchedSegments: matchedSegments[keyValue1].append(keyValue2) elif keyValue2 in matchedSegments: matchedSegments[keyValue2].append(keyValue1) else: matchedSegments[keyValue1] = [keyValue2] print('the minimum distance rechorded : ', minimum_distance) return matchedSegments
def matchWithHOGDescriptors(noSIFTkeySegValList: list, segments: numpy.ndarray, imageColor: numpy.ndarray, threshold=15) -> dict: """this will return array of numpy.ndarray of matched areas""" greyImage = cv2.cvtColor(src=imageColor, code=cv2.COLOR_RGB2GRAY) '''prepare the HOG descriptor''' detection_window_col = 8 detection_window_row = 8 hog = HOGDescriptor((detection_window_col, detection_window_row), (8, 8), (8, 8), (8, 8), 9) # 16x32 -> col,row -> x,y ''''preparing a dictionary to hold the descriptors of all segments''' dictionary = {} '''dictionary to hold the matched patches with key->segmentValue | value->array_of_matched_segments''' matchedSegments = {} min_height = 10000 # for testing purposes min_width = 10000 # for testing purposes '''assuming cloned area is not in the same segment - iterating over the list of segment values those without enough keys detected''' for segVal in noSIFTkeySegValList: rows, cols = numpy.where(segments == segVal) '''initializing a dictionary to hold hog descriptors for the segment''' temp_h_o_g_descriptors = [] if max(rows) - min(rows) < min_height: # for testing purposes min_height = max(rows) - min(rows) # for testing purposes if max(cols) - min(cols) < min_width: # for testing purposes min_width = max(cols) - min(cols) # for testing purposes '''checking if the segment is large enough to run the window''' if ((max(rows) - min(rows)) > detection_window_row) & ( (max(cols) - min(cols)) > detection_window_col): '''iterating the HOG sliding window''' for y in range(min(rows), max(rows) - detection_window_row, 4): for x in range(min(cols), max(cols) - detection_window_col, 4): roi = greyImage[y:y + detection_window_row, x:x + detection_window_col] # [y1:y2, x1:x2] '''check if the area of interest is more then the kernel size of HOG descriptor''' temp_h_o_g_descriptors.append(hog.compute(img=roi)) '''after iterating the HOG kernel over the segment, add the descriptors to the dictionary''' dictionary[segVal] = temp_h_o_g_descriptors print('segments with proper size for HOG : ', len(dictionary)) print('minimum height of roi -', min_height, ' | minimum width of roi -', min_width) '''after iterating over all the segments that are without the required amount of key points, compare between the descriptors of each segment obtained here in the dictionary key->segment_value | value->2D_array_of_descriptors''' minimum_distance = 10000 # for testing purposes for index1, keyValue1 in enumerate(dictionary): for index2, keyValue2 in enumerate(dictionary): if index2 > index1: '''iterating over the each descriptor of the segment''' for descriptor1 in dictionary[keyValue1]: for descriptor2 in dictionary[keyValue2]: distance = scipy.spatial.distance.euclidean( descriptor1, descriptor2) if distance < minimum_distance: # for testing purposes minimum_distance = distance # for testing purposes if distance < threshold: '''fill into the return array of matches''' if keyValue1 in matchedSegments: matchedSegments[keyValue1].append(keyValue2) elif keyValue2 in matchedSegments: matchedSegments[keyValue2].append(keyValue1) else: matchedSegments[keyValue1] = [keyValue2] print('the minimum distance rechorded : ', minimum_distance) return matchedSegments