def similarity(img1, img2, w, h, nb_bins, metric): '''Computes similarity between image 1 and image 2 by applying the given metric \ to HOG representations of image 1 and 2. HOG representations are computed over \ cells with size w x h and with nb_bins bins. Returns a float in [0,1]''' # Computation of HOG representations of img1 and img2 hog_similar_1 = HOG(img1, w, h, nb_bins, False) hog_similar_2 = HOG(img2, w, h, nb_bins, False) I, J, k = np.shape(hog_similar_1) metric_values = np.zeros((I, J)) # Computation of similarity concatened_hog1 = [] concatened_hog2 = [] for i in range(I): for j in range(J): concatened_hog1 = np.concatenate( (concatened_hog1, hog_similar_1[i][j])) concatened_hog2 = np.concatenate( (concatened_hog2, hog_similar_2[i][j])) return metric(concatened_hog1, concatened_hog2) # This is similarity value
def hog(img): global hog_object img = img.astype(np.uint8) if hog_object == None: # Build the hog object just once hog_object = HOG(28, 28, 4, 8, 20) rv = hog_object.computeHOG(img) #hog_object.display_HOG() return rv
def get_featuremap(self, frame, new_x1, new_y1, new_x2, new_y2, scale): """Important slice operation""" # pad_scale_roi = frame[max(new_y1, 0):new_y2+1, max(new_x1,0):new_x2 + 1] pad_scale_roi = get_border_roi(new_x1, new_y1, new_x2, new_y2, frame) if scale != 1.0: half_w = (new_x2 - new_x1 + 1) / 2 * (scale - 1) half_h = (new_y2 - new_y1 + 1) / 2 * (scale - 1) new_x1 = int(np.ceil(new_x1 - half_w)) new_y1 = int(np.ceil(new_y1 - half_h)) new_x2 = int(np.floor(new_x2 + half_w)) new_y2 = int(np.floor(new_y2 + half_h)) """Important slice operation""" # pad_scale_roi = frame[max(new_y1, 0):new_y2+1, max(new_x1, 0):new_x2+1] # frame follow H x W sequenc pad_scale_roi = get_border_roi(new_x1, new_y1, new_x2, new_y2, frame) if self.pad != 0: half_w = (new_x2 - new_x1 + 1) / 2 * (self.pad - 1) half_h = (new_y2 - new_y1 + 1) / 2 * (self.pad - 1) tmp_x1 = int(np.ceil(new_x1 - half_w)) tmp_y1 = int(np.ceil(new_y1 - half_h)) tmp_x2 = int(np.floor(new_x2 + half_w)) tmp_y2 = int(np.floor(new_y2 + half_h)) """Important slice operation""" # pad_scale_roi = frame[max(tmp_y1, 0):tmp_y2+1, max(tmp_x1, 0):tmp_x2+1] # frame fol pad_scale_roi = get_border_roi(tmp_x1, tmp_y1, tmp_x2, tmp_y2, frame, i=2) fix_roi = cv2.resize(pad_scale_roi, dsize=(self.fixed_size[1], self.fixed_size[1])) # dsize follow W x H (64x64) fix_roi = np.asarray(fix_roi, dtype=np.float) # np.array fix_roi = fix_roi / 255.0 - 0.5 # normalize fix_roi = np.power(fix_roi, self.gamma) # gamma correct fix_roi = fix_roi * self.hann # Hanning filter if self.hog: fix_roi = fix_roi + 0.5 hog = HOG(window=fix_roi, cell_size=4, bin_size=8, gamma=1.0) self.adapt = 0.1 hog.init_mag_angle() fix_roi = hog.get_window_grad() return fix_roi, [new_x1, new_y1, new_x2, new_y2]
def run(): logging.getLogger().setLevel(logging.WARNING) d = Dataset() #d.use_images_in_folder("/home/simon/Datasets/ImageNet_Natural/images/") #d.use_images_in_folder("/home/simon/Datasets/ICAO_german/") d.use_images_in_folder("/home/simon/Datasets/desko_ids/images_unique/") #d.use_images_in_folder("/home/simon/Datasets/croatianFishDataset-final/") #d.use_images_in_folder("/home/jaeger/data/croatianFishDataset1-5Dir/") d.create_labels_from_path() d.fill_split_assignments(1) #d.read_from_file("/home/simon/Datasets/CUB_200_2011/cropped_scaled_alex.txt","imagepaths","string") #d.read_from_file("/home/simon/Datasets/CUB_200_2011/tr_ID.txt","split_assignments","int") #d.read_from_file("/home/simon/Datasets/CUB_200_2011/labels.txt","labels","int") c = Classification() c.add_algorithm(Resize(512, 320)) # #c.add_algorithm(Noise('saltpepper',0.1)) p = ParallelAlgorithm() # p1 = AlgorithmPipeline() p1.add_algorithm(HOG()) p1.add_algorithm(SpatialPyramid()) # #p1.add_algorithm(MinMaxNormalize()) p1.add_algorithm(NormNormalize()) p.add_pipeline(p1) p2 = AlgorithmPipeline() p2.add_algorithm(Resize(64, 32)) p2.add_algorithm(Colorname()) p2.add_algorithm(SpatialPyramid()) p2.add_algorithm(NormNormalize()) # #p2.add_algorithm(MinMaxNormalize()) p.add_pipeline(p2) c.add_algorithm(p) # #c.add_algorithm(MinMaxNormalize()) #c.add_algorithm(NormNormalize()) # c.add_algorithm(MeanCalculator()) #c.add_algorithm(Resize(32,24)) c.add_algorithm(MulticlassSVM()) # #c.train(d) # #for path, gt_label in zip(d.imagepaths, d.labels): # # logging.info("Predicted class for " + path + " is " + str(c.predict(path).data[0]) + " (GT: " + str(gt_label) + ")") ## Caffe features #c.add_algorithm(Caffe("","","fc7")) #c.add_algorithm(MulticlassSVM()) #with open('run_evaluation.py', 'r') as fin: # print(fin.read()) mean_acc, mean_mAP = Evaluation.random_split_eval( d, c, absolute_train_per_class=1, runs=1) #mean_acc,mean_mAP = Evaluation.fixed_split_eval(d,c) logging.warning("Total accuracy is " + str(mean_acc)) logging.warning("Total mAP is " + str(mean_mAP))
def get_instance_converter(algorithm, args): if algorithm == "color": return ColorHistogram(args.bits) elif algorithm == "bow": return BagOfWords(args.clusters) elif algorithm == "img": return OriginalImg() elif algorithm == "hog": return HOG() return None
def _get_feat(self, db, f_class): if f_class == 'color': f_c = Color() elif f_class == 'daisy': f_c = Daisy() elif f_class == 'edge': f_c = Edge() elif f_class == 'gabor': f_c = Gabor() elif f_class == 'hog': f_c = HOG() return f_c.make_samples(db, verbose=False)
def method_select(name): if name == "color": ret = Color() elif name == "edge": ret = Edge() elif name == "gabor": ret = Gabor() elif name == "daisy": ret = Daisy() elif name == "HOG": ret = HOG() return ret
def __init__(self, detector, extractor, keywords, root=os.path.dirname(__file__)+os.path.sep+"imm"+os.path.sep, estension=".jpg"): #def __init__(self, detector, extractor, keywords, root="./imm2/", estension=".jpg"): super(FeaturesFile, self).__init__() self.detector = detector self.extractor = extractor self.keywords = keywords self.root = root self.estension = estension if detector== "HOG" and extractor == "HOG": self.obj = HOG() else: self.obj = detectAndExtract(detector,extractor)
def Hog_and_Landmarks(train_x,val_x,test_x): Train_x1, Val_x1, Test_x1 = Landmarks(train_x, val_x, test_x) Train_x2, Val_x2, Test_x2 = HOG(train_x, val_x, test_x) print("Train_x1:") print(np.shape(Train_x1)) print("Train_x2:") print(np.shape(Train_x2)) Train_x3 = np.concatenate((Train_x1,Train_x2), axis=1) print("Train_x3:") print(np.shape(Train_x3)) Val_x3 = np.concatenate((Val_x1, Val_x2), axis=1) Test_x3 = np.concatenate((Test_x1, Test_x2), axis=1) return Train_x3, Val_x3, Test_x3
def __init__(self, db, f_class=None, d_type='L1'): self.NGT_dir = 'NGT_{}_{}'.format(f_class,d_type) self.NGT_path = b'' self.fearure = f_class self.SQLdb = SQLite() if f_class == 'daisy': self.f_c = Daisy() self.NGT_path = b'NGT/NGT_daisy_'+d_type.encode() elif f_class == 'edge': self.f_c = Edge() self.NGT_path = b'NGT/NGT_edge_'+d_type.encode() elif f_class == 'hog': self.f_c = HOG() self.NGT_path = b'NGT/NGT_hog_'+d_type.encode() elif f_class == 'vgg': self.f_c = VGGNetFeat() self.NGT_path = b'NGT/NGT_vgg_'+d_type.encode() elif f_class == 'res': self.f_c = ResNetFeat() self.NGT_path = b'NGT/NGT_res_'+d_type.encode() if not os.path.exists(os.path.join(NGT_dir,self.NGT_dir)): samples = self.f_c.make_samples(db, verbose=False) dim = 0 try: dim = samples[0]['hist'].shape[0] except: pass images= [] objects = [] for i, row in enumerate(samples): vector = row['hist'] link = row['img'] lable = row['cls'] data = {'index':i,'link':link,'lable':lable} images.append(data) objects.append(vector) self.SQLdb.updateMuti(f_class,images) # cPickle.dump(images, open(os.path.join(NGT_dir, sample_cache), "wb", True)) ngtpy.create(path=self.NGT_path, dimension=dim, distance_type=d_type) self.index = ngtpy.Index(self.NGT_path) self.index.batch_insert(objects) self.index.save() self.index = ngtpy.Index(self.NGT_path)
weight = int(weight) if name not in features.keys() or weight < 1: raise Exception return name, weight except: raise argparse.ArgumentTypeError( f"\nFeature must be 'name:weight'\n\tname in {features.keys()}\n\tweight >= 1" ) features = { "color": Color(), "daisy": Daisy(), "edge": Edge(), "gabor": Gabor(), "hog": HOG(), "vgg": VGGNetFeat(), "res": ResNetFeat(), } if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-n", "--neighbor", help="neighbor by class", type=int, default=3) parser.add_argument( "-c", help="Copy images in a result path (src/CBIR/result/retrieval/)", action="store_true")
from HOG import HOG from DB import Database import numpy as np import matplotlib.pyplot as plt from skimage import io import os db = Database() a = HOG() #root_dir = 'query_images' #img_files = os.listdir(root_dir) #print(img_files) # #results = [] #n = 3 #for img_name in img_files: # d = a.create_dis_list(os.path.join(root_dir,img_name)) # results.append(d[:n]) # #print(len(results)) size = len(db) db_fv = np.load('hog_fv_data.npy') dis_list = [] for i in range(size): first = db_fv[i] print('\rprocess: {}/{}'.format(i + 1, size), end='') min_dis = 9999999.9 index = -1
# retrieve by edge method = Edge() samples = method.make_samples(db) query = samples[query_idx] _, result = infer(query, samples=samples, depth=depth, d_type=d_type) print(result) # retrieve by gabor method = Gabor() samples = method.make_samples(db) query = samples[query_idx] _, result = infer(query, samples=samples, depth=depth, d_type=d_type) print(result) # retrieve by HOG method = HOG() samples = method.make_samples(db) query = samples[query_idx] _, result = infer(query, samples=samples, depth=depth, d_type=d_type) print(result) # retrieve by VGG method = VGGNetFeat() samples = method.make_samples(db) query = samples[query_idx] _, result = infer(query, samples=samples, depth=depth, d_type=d_type) print(result) # retrieve by resnet method = ResNetFeat() samples = method.make_samples(db)
# # retrieve by edge # method = Edge() # samples = method.make_samples(db) # query = method.histogram(linkInput) # result = inferInput(query, samples=samples, depth=depth, d_type=d_type) # print(result) # # retrieve by gabor # method = Gabor() # samples = method.make_samples(db) # query = samples[query_idx] # _, result = infer(query, samples=samples, depth=depth, d_type=d_type) # print(result) # #retrieve by HOG method = HOG() samples = method.make_samples(db) query = method.get_featInput(img) start_time = time.time() result = inferInput(query, samples=samples, depth=depth, d_type=d_type) end_time = time.time() print('total run-time: %f ms' % ((end_time - start_time) * 1000)) print(result) # retrieve by VGG method = VGGNetFeat() samples = method.make_samples(db) query = method.get_featInput(img) start_time = time.time()
def test(db, query_idx): results = {} # retrieve by color method = Color() samples = method.make_samples(db) query = samples[query_idx] # print(samples) img = scipy.misc.imread(query['img']) # print(query) _, result = infer(query, samples=samples, depth=depth, d_type=d_type) # results.append(result[0]['cls']) inc(results, result[0]['cls']) # # retrieve by daisy # method = Daisy() # samples = method.make_samples(db) # query = samples[query_idx] # _, result = infer(query, samples=samples, depth=depth, d_type=d_type) # # results.append(result[0]['cls']) # inc(results, result[0]['cls']) # # # retrieve by edge method = Edge() samples = method.make_samples(db) query = samples[query_idx] # print(samples) query = samples[query_idx] img = scipy.misc.imread(query['img']) _, result = infer(query, samples=samples, depth=depth, d_type=d_type) # results.append(result[0]['cls']) inc(results, result[0]['cls']) # # # retrieve by gabor # # method = Gabor() # # samples = method.make_samples(db) # # query = samples[query_idx] # # _, result = infer(query, samples=samples, depth=depth, d_type=d_type) # # print(result) # # inc(results, result[0]['cls']) # # retrieve by HOG method = HOG() samples = method.make_samples(db) query = samples[query_idx] _, result = infer(query, samples=samples, depth=depth, d_type=d_type) # results.append(result[0]['cls']) inc(results, result[0]['cls']) # # retrieve by VGG method = VGGNetFeat() samples = method.make_samples(db) query = samples[query_idx] _, result = infer(query, samples=samples, depth=depth, d_type=d_type) # results.append(result[0]['cls']) inc(results, result[0]['cls']) # # retrieve by resnet method = ResNetFeat() samples = method.make_samples(db) query = samples[query_idx] _, result = infer(query, samples=samples, depth=depth, d_type=d_type) # results.append(result[0]['cls']) inc(results, result[0]['cls']) import os from PIL import Image print(results) finalresult = max(results.items(), key=operator.itemgetter(1))[0] #string=".../database/"+finalresult+"/" string = "./database/" + finalresult + "/" print(string) a = 1 for file in os.listdir(string): a += 1 tempimg = Image.open(string + file) tempimg.show() print(string + file) if (a == 10): break print("Final result is: ", finalresult) scipy.misc.imshow(img)
import dataset import argparse ''' Set up the argument parser which will get the CSV file and location where model is to be stored''' argparser = argparse.ArgumentParser() argparser.add_argument("-d", "--dataset", required = True, help = "path to the dataset file") argparser.add_argument("-m", "--model", required = True, help = "path to where the model will be stored") args = vars(argparser.parse_args()) (digits, labels) = dataset.load_data(args["dataset"]) hog = HOG(orientations = 18, pixelsPerCell = (10, 10), cellsPerBlock = (1, 1), normalise = True) data = [] # Add histogram for each digit in a list for digit in digits: digit = dataset.deskew(digit) hist = hog.describe(digit.reshape((28,28))) data.append(hist) # Set up and train the model SVC_model = LinearSVC() SVC_model.fit(data, labels) # Save the model to file joblib.dump(SVC_model, args["model"], compress = 3)
def faceDetector(img): # Used for NMS BoxesFace = [] BoxesNose = [] # Resize too large images to speed up the detection process while (img.shape[0] > 360): img = cv2.resize(img,(int(np.ceil(0.8 * img.shape[1])),int(np.ceil(0.8 * img.shape[0])))) while (img.shape[1] > 360): img = cv2.resize(img,(int(np.ceil(0.8 * img.shape[1])),int(np.ceil(0.8 * img.shape[0])))) # Face detection # decide the window size for face detection depends on the smaller # dimension of the pic # minimum window size for face detection if img.shape[0] < img.shape[1]: minWinSizeFace= max((32,32),(int(np.ceil(0.1*img.shape[0])),int(np.ceil(0.1*img.shape[0])))) winSizeFace = (img.shape[0],img.shape[0]) else: minWinSizeFace= max((32,32),(int(np.ceil(0.1*img.shape[1])),int(np.ceil(0.1*img.shape[1])))) winSizeFace = (img.shape[1],img.shape[1]) #print("Minimum window size for face",minWinSizeFace) # Loop untill the current window size smaller then the limit while winSizeFace >= minWinSizeFace: #print("Current window size for face",winSizeFace) # Calculate the step size of the sliding window depends on current # window size StepSizeFace = max(1,int(np.ceil(0.2 * min(winSizeFace[1],winSizeFace[0])))) yf = 0 while yf + winSizeFace[0] <= img.shape[0]: xf = 0 while xf + winSizeFace[1] <= img.shape[1]: # To show the sliding window TempImageFace = np.copy(img) cv2.rectangle(TempImageFace,(xf,yf),(xf + winSizeFace[1],yf + winSizeFace[0]),(0,0,255),1) cv2.imshow('window',TempImageFace) cv2.waitKey(1) time.sleep(0.0025) # Get the HOG vector of current window CroppedImage = img[yf:yf + winSizeFace[0],xf:xf + winSizeFace[1]] feature = HOG(CroppedImage) # If the current window is face and it's not inside another # face if FaceModel.predict([feature]) == 1 and (len(BoxesFace) == 0 or validBox(BoxesFace,(xf,yf),(xf + winSizeFace[1],yf + winSizeFace[0]))) : BoxesFace.append([xf,yf,winSizeFace[1],winSizeFace[0]]) # cv2.rectangle(img,(xf,yf),(xf + winSizeFace[1],yf + winSizeFace[0]),(0,255,0),2) # Nose detection # Max size for window to detect nose depends on the face # window size maxWinSizeNose = (int(np.ceil(0.5 * winSizeFace[0])),int(np.ceil(0.3 * winSizeFace[1]))) winSizeNose = max((1,1),(int(np.ceil(0.25 * winSizeFace[0])),int(np.ceil(0.15 * winSizeFace[1])))) NoseFound = False # Loop untill the current window size smaller then the # limit while (not NoseFound) and winSizeNose < maxWinSizeNose: # Calculate the step size of the sliding window depends # on current window size StepSizeNose = max(1,int(np.ceil(0.5 * min(winSizeNose[0],winSizeNose[1])))) yn = yf while (not NoseFound) and yn + winSizeNose[0] <= yf + winSizeFace[0]: xn = xf while xn + winSizeNose[1] <= xf + winSizeFace[1]: # To show the sliding window TempImageNose = np.copy(TempImageFace) cv2.rectangle(TempImageNose,(xn,yn),(xn + winSizeNose[1],yn + winSizeNose[0]),(255,255,0),1) cv2.imshow('window',TempImageNose) cv2.waitKey(1) time.sleep(0.0025) # Get HOG vector of the current window of nose CroppedNose = img[yn:yn + winSizeNose[0],xn:xn + winSizeNose[1]] featureNose = HOG(CroppedNose) # If the current window is nose if(NoseModel.predict([featureNose]) == 1): BoxesNose.append([xn,yn,winSizeNose[1],winSizeNose[0],xf,yf]) cv2.rectangle(img,(xn,yn),(xn + winSizeNose[1],yn + winSizeNose[0]),(255,100,0),1) NoseFound = True xn+=StepSizeNose yn+=StepSizeNose winSizeNose = (int(np.ceil(1.1 * winSizeNose[0])),int(np.ceil(1.1 * winSizeNose[1]))) xf+=StepSizeFace yf+=StepSizeFace winSizeFace = (int(np.ceil(0.75 * winSizeFace[0])),int(np.ceil(0.75 * winSizeFace[1]))) cv2.destroyAllWindows() return BoxesFace,BoxesNose,img
class FeaturesFile(object): """ Permette di creare file di features """ detector = None extractor = None keywords = None root = None estension = None obj = None def __init__(self, detector, extractor, keywords, root=os.path.dirname(__file__)+os.path.sep+"imm"+os.path.sep, estension=".jpg"): #def __init__(self, detector, extractor, keywords, root="./imm2/", estension=".jpg"): super(FeaturesFile, self).__init__() self.detector = detector self.extractor = extractor self.keywords = keywords self.root = root self.estension = estension if detector== "HOG" and extractor == "HOG": self.obj = HOG() else: self.obj = detectAndExtract(detector,extractor) def creaFileFeatures(self,key=None): if key == None: ## PREPROCESSING PERCORSI IMMAGINI fm = FileManager() file_positive = self.root+self.keywords+"/"+self.keywords+"_" + self.detector + "_" + self.extractor + ".csv" # Creo una lista contenente tutte le immagini (jpg) positive (Filtro estension) listArrayPositive = fm.listNoHiddenFiles(self.root+self.keywords,self.estension) ## SAVE FEATURES IN FILES # Controllo se e' gia' presente, altrimenti leggo il file e restituisco array if not os.path.isfile(file_positive): print "Nuovo File Features Positive " + self.detector + " + " + self.extractor + " creato." X_positive = [] for k in range(0,len(listArrayPositive)): # Da eliminare file csv ed altri base_name = self.root+self.keywords+"/"+self.keywords+fm.correggi(k)+ self.estension print "Immagini " + str(k) + " -> " + base_name ret = (self.obj.elabora(base_name)) #[0:cut_features] X_positive.append(ret) fm.arrayToCsv(X_positive,file_positive) else: print "File Features Positive " + self.detector + " + " + self.extractor + " esiste gia." X_positive = fm.csvToArray(file_positive) else: ## PREPROCESSING PERCORSI IMMAGINI fm = FileManager() file_positive = self.root+key+"/"+key+"_" + self.detector + "_" + self.extractor + ".csv" # Creo una lista contenente tutte le immagini (jpg) positive (Filtro estension) listArrayPositive = fm.listNoHiddenFiles(self.root+key,self.estension) ## SAVE FEATURES IN FILES # Controllo se e' gia' presente, altrimenti leggo il file e restituisco array if not os.path.isfile(file_positive): print "Nuovo File Features Positive " + self.detector + " + " + self.extractor + " creato." X_positive = [] for k in range(0,len(listArrayPositive)): # Da eliminare file csv ed altri base_name = self.root+key+"/"+key+fm.correggi(k)+ self.estension print "Immagini " + str(k) + " -> " + base_name ret = (self.obj.elabora(base_name)) #[0:cut_features] X_positive.append(ret) fm.arrayToCsv(X_positive,file_positive) else: print "File Features Positive " + self.detector + " + " + self.extractor + " esiste gia." X_positive = fm.csvToArray(file_positive) return X_positive def negativeFeatures(self): ## PREPROCESSING PERCORSI IMMAGINI fm = FileManager() #file_negative = self.root+self.keywords+"/"+self.keywords+"_" + self.detector + "_" + self.extractor + "_negative.csv" # List dir contiene tutte le cartelle della root img listDir = fm.listNoHiddenDir(self.root) # Cerco l'indice della cartella della Keywords indexKeywords = listDir.index(self.keywords) # Vado ad eliminare nella lista cartelle quella della keywords listDir.pop(indexKeywords) X_negative = [] for i in range(0,len(listDir)): file_negative = self.root+listDir[i]+"/"+listDir[i]+"_" + self.detector + "_" + self.extractor + ".csv" if not os.path.isfile(file_negative): self.creaFileFeatures(listDir[i]) else: X = fm.csvToArray(file_negative) X_negative = X_negative + X return X_negative def negativeFeaturesSingleCat(self,category): ## PREPROCESSING PERCORSI IMMAGINI fm = FileManager() X_negative = [] #for i in range(0,len(listDir)): file_negative = self.root+category+"/"+category+"_" + self.detector + "_" + self.extractor + ".csv" if not os.path.isfile(file_negative): self.creaFileFeatures(category) else: X = fm.csvToArray(file_negative) X_negative = X_negative + X return X_negative def getFeatures(self,category): X_positive = self.creaFileFeatures() X_negative = self.negativeFeatures() #X_negative = self.negativeFeaturesSingleCat(category) return X_positive, X_negative def getObj(self): return self.obj def getKeyword(self): return self.keywords
import cv2 ''' Set up argument parser to get the previously stored model's path and the path of the image which is to be tested''' argparser = argparse.ArgumentParser() argparser.add_argument("-m", "--model", required = True, help = "path to where the model will be stored") argparser.add_argument("-i", "--image", required = True, help = "path to the image file") args = vars(argparser.parse_args()) # Load the model from path SVC_model = joblib.load(args["model"]) hog = HOG(orientations = 18, pixelsPerCell = (10, 10), cellsPerBlock = (1, 1), normalise = True) # Load the image, convert it to grayscale, blur it, and obtain the edges image = cv2.imread(args["image"]) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) blurred = cv2.GaussianBlur(gray, (5, 5), 0) edged = cv2.Canny(blurred, 30, 150) # Find all the contours in the image (_, contours, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # Find all the bounding rectangles of the contours rects = [cv2.boundingRect(contour) for contour in contours] # Loop over all the rectangles for rect in rects: