def compare(file1, file2): print "Starting [compare]" c1 = get_contour(cv2.imread(file1)) c2 = get_contour(cv2.imread(file2)) # Warning: bug in OpenCV 3.1.0. Always return 0. print cv2.createShapeContextDistanceExtractor().computeDistance(c1[0], c2[0]) print "Done"
def result_calculation(self, img_1, img_2): sd = cv2.createShapeContextDistanceExtractor() try: d2 = sd.computeDistance(img_1,img_2) return d2 except Exception as e: print(e) return 100
def test_computeDistance(self): a = self.get_sample('samples/data/shape_sample/1.png', cv.IMREAD_GRAYSCALE) b = self.get_sample('samples/data/shape_sample/2.png', cv.IMREAD_GRAYSCALE) _, ca, _ = cv.findContours(a, cv.RETR_CCOMP, cv.CHAIN_APPROX_TC89_KCOS) _, cb, _ = cv.findContours(b, cv.RETR_CCOMP, cv.CHAIN_APPROX_TC89_KCOS) hd = cv.createHausdorffDistanceExtractor() sd = cv.createShapeContextDistanceExtractor() d1 = hd.computeDistance(ca[0], cb[0]) d2 = sd.computeDistance(ca[0], cb[0]) self.assertAlmostEqual(d1, 26.4196891785, 3, "HausdorffDistanceExtractor") self.assertAlmostEqual(d2, 0.25804194808, 3, "ShapeContextDistanceExtractor")
def shape_context_distance(icon_contour: np.ndarray, image_contour: np.ndarray) -> float: """Calculates the shape context distance bewteen two contours. Arguments: icon_contour: A list with shape (n, 1, 2). Represents the template icon contour image_contour: A list with shape (n, 1, 2). Represents the image patch contour. (Note: function will fail unless the number of channels is 2.) Returns: float: the shape context distance between the two contours. """ extractor = cv2.createShapeContextDistanceExtractor() return extractor.computeDistance(icon_contour, image_contour)
def test_computeDistance(self): a = cv.imread(os.path.join(MODULE_DIR, 'samples/data/shape_sample/1.png'), cv.IMREAD_GRAYSCALE) b = cv.imread(os.path.join(MODULE_DIR, 'samples/data/shape_sample/2.png'), cv.IMREAD_GRAYSCALE) if a is None or b is None: raise unittest.SkipTest("Missing files with test data") ca, _ = cv.findContours(a, cv.RETR_CCOMP, cv.CHAIN_APPROX_TC89_KCOS) cb, _ = cv.findContours(b, cv.RETR_CCOMP, cv.CHAIN_APPROX_TC89_KCOS) hd = cv.createHausdorffDistanceExtractor() sd = cv.createShapeContextDistanceExtractor() d1 = hd.computeDistance(ca[0], cb[0]) d2 = sd.computeDistance(ca[0], cb[0]) self.assertAlmostEqual(d1, 26.4196891785, 3, "HausdorffDistanceExtractor") self.assertAlmostEqual(d2, 0.25804194808, 3, "ShapeContextDistanceExtractor")
def test_computeDistance(self): a = self.get_sample('samples/data/shape_sample/1.png', cv.IMREAD_GRAYSCALE) b = self.get_sample('samples/data/shape_sample/2.png', cv.IMREAD_GRAYSCALE) ca, _ = cv.findContours(a, cv.RETR_CCOMP, cv.CHAIN_APPROX_TC89_KCOS) cb, _ = cv.findContours(b, cv.RETR_CCOMP, cv.CHAIN_APPROX_TC89_KCOS) hd = cv.createHausdorffDistanceExtractor() sd = cv.createShapeContextDistanceExtractor() d1 = hd.computeDistance(ca[0], cb[0]) d2 = sd.computeDistance(ca[0], cb[0]) self.assertAlmostEqual(d1, 26.4196891785, 3, "HausdorffDistanceExtractor") self.assertAlmostEqual(d2, 0.25804194808, 3, "ShapeContextDistanceExtractor")
cv2.imshow('original2', img2) gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) t, binary2 = cv2.threshold(gray2, 127, 255, cv2.THRESH_BINARY) contours2, hierarchy = cv2.findContours(binary2, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) cnt2 = contours2[0] o3 = cv2.imread('img10-2.jpg') img3 = o3 cv2.imshow('original3', img3) gray3 = cv2.cvtColor(img3, cv2.COLOR_BGR2GRAY) t, binary3 = cv2.threshold(gray3, 127, 255, cv2.THRESH_BINARY) contours3, hierarchy = cv2.findContours(binary3, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) cnt3 = contours3[0] sd = cv2.createShapeContextDistanceExtractor() d1 = sd.computeDistance(cvt1, cnt1) print('The distance with itself d1 = ', d1) d2 = sd.computeDistance(cnt1, cnt2) print('The distance between rotation item = ', d2) d3 = sd.computeDistance(cnt1, cnt3) print('The distance between different part = ', d3) cv2.waitKey() cv2.destroyAllWindows()
def extrai(path, identificador): color = cv2.imread(path, -1) color = cv2.resize(color, (0, 0), fx = 0.3, fy = 0.3) imgOriginal = color.copy() color = utils.removeSombras(color) utils.save('semSombra.jpg', color, id=identificador) #imgOriginal, color = recuperaAreaAssinada(color.copy(), imgOriginal, identificador) #utils.save('antesGray.jpg', color, id=identificador) imgGray = cv2.cvtColor(color, cv2.COLOR_BGR2GRAY) imgPbOriginal = imgGray.copy() utils.save('pb1.jpg', imgGray, id=identificador) #imgGray = rotate_bound(imgGray, 90) #utils.save('pb2.jpg', imgGray) #imgGray = cv2.blur(imgGray, (blurI, blurI)) #utils.save('blur.jpg', imgGray) utils.save('AntesThr.jpg', imgGray, id=identificador) imgGray, contours, hierarchy = extraiContornos(imgGray, identificador) utils.save('thr.jpg', imgGray, id=identificador) cnts2 = sorted(contours, key=functionSort, reverse=True)[0:5] printaContornoEncontrado(imgOriginal, cnts2, identificador) cnts2 = sorted(cnts2, key=functionSortPrimeiroPapel) printaOrdem(imgOriginal, cnts2, identificador) originalEmGray = cv2.cvtColor(imgOriginal, cv2.COLOR_BGR2GRAY) #originalHisto = cv2.equalizeHist(originalEmGray) originalHisto = originalEmGray lista = dict() cntArr = dict() #ratioDilatacao = recuperaRatioDilatacao(cnts2, imgPbOriginal, identificador) for i, c in enumerate(cnts2): x, y, w, h = cv2.boundingRect(c) b = 0 #print('{} x={} - y{}'.format(i,x,y)) utils.save('imgPbSemSombra2-{}.jpg'.format(i), imgPbOriginal, id=identificador) roi = imgPbOriginal[y-b:y + h+b, x-b:x + w+b] utils.save('roi_{}.jpg'.format(i), roi, id=identificador) #utils.save('_1_hist_{}.jpg'.format(i), roi) #roi = utils.resize(roi, width=300, height=300) resized = roi.copy() #resized = cv2.blur(resized, (blurI,blurI)) #utils.save('__{}_blur1.jpg'.format(i), resized) #resized = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY) #resized = cv2.blur(resized, (5,5)) retval, resized = cv2.threshold(resized, 120, 255, type = cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU) resized = utils.removeContornosPqnosImg(resized) utils.save('t_{}.jpg'.format(i), resized, id=identificador) cv2.waitKey(0) #print('ratioDilatacao ' + str(ratioDilatacao)) resized = utils.dilatation(resized, ratio=0.3) utils.save('t1_{}.jpg'.format(i), resized, id=identificador) im2, contours2, hierarchy = cv2.findContours(resized, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) print('Ajustando espacos') contours2, resized = ajustaEspacosContorno(contours2, resized) print('espacos ajustados') cnts = sorted(contours2, key=functionSort, reverse=True)[0] #roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY) novaMat = np.zeros(roi.shape, dtype = "uint8") cv2.drawContours(novaMat, [cnts], -1, 255, -1) #novaMat = cv2.resize(novaMat, (200,200), interpolation = cv2.INTER_AREA) #lista[i] = mahotas.features.zernike_moments(novaMat, 21) lista[i] = cnts cntArr[i] = cnts utils.save('_img_{}.jpg'.format(i), novaMat, id=identificador) #utils.show(color) hd = cv2.createHausdorffDistanceExtractor() sd = cv2.createShapeContextDistanceExtractor() out = "" sizeOut = "" resultadoApi = True imgResultado = imgOriginal.copy() for idx1 in range(0,1): #recupera apenas a primeira imagem e a compara com as outras item1 = lista[idx1] altura1, largura1 = calculaAlturaLargura(item1) soma = 0 for idx2 in range(0,5): item2 = lista[idx2] altura2, largura2 = calculaAlturaLargura(item2) #sizeOut += 'Altura {} - {} = {} / {}\n'.format(altura1, altura2, abs(altura1 - altura2), calcPercentual(largura1, largura2)) #sizeOut += 'Largura {} - {} = {} / {}\n'.format(largura1, largura2, abs(largura1 - largura2), calcPercentual(largura1, largura2)) sizeOut += 'Dimensao {} x {} \n'.format(largura2, altura2) tamanhoCompativel = alturaLarguraCompativel(altura1, largura1, altura2, largura2) #match = hd.computeDistance(item1, item2) #match = cv2.matchShapes(cntArr[idx1], cntArr[idx2], 1, 0.0) ida = sd.computeDistance(item1, item2) volta = sd.computeDistance(item2, item1) #ida = dist.euclidean(item1, item2) #volta = dist.euclidean(item2, item1) ida = round(ida, 5) volta = round(volta, 5) out += '{} vs {} ({}) == {} - {}\n'.format(idx1, idx2, tamanhoCompativel, ida, volta) #BGR if ( idx2 == 0 ): imgResultado = contorna(imgResultado, cnts2[idx2], (0,255,0)) #sucesso elif ( ida < 10 and volta < 10 and tamanhoCompativel == True): imgResultado = contorna(imgResultado, cnts2[idx2], (0,255,0)) #sucesso else: imgResultado = contorna(imgResultado, cnts2[idx2], (0,0,255)) #falha resultadoApi = False pathTxt = utils.buildPath(identificador, path="calc.txt") with open(pathTxt, "w") as text_file: text_file.write(sizeOut) text_file.write('\n') text_file.write(out) utils.save(names.RESULTADO, imgResultado, id=identificador) return resultadoApi
def extrai(path, pathCnh, identificador): paramsDb = db.select() valorAceitavel = paramsDb[1] valorAceitavelCnh = paramsDb[1] whTolerancia = paramsDb[2] pxWhiteTolerancia = paramsDb[3] paramsOut = """ PARAMETROS Tolerancia Pontos: {0} Tolerancia Pontos CNH: {1} Variacao no tamanho: {2}% Tolerancia densidade: {3}%\n """.format(valorAceitavel, valorAceitavelCnh, whTolerancia, pxWhiteTolerancia) densidadeOut = "" cnhColor = cv2.imread(pathCnh, -1) existeCnh = (cnhColor is not None) if (existeCnh == True): print("Existe") color = cv2.imread(path, -1) color = cv2.resize(color, (0, 0), fx=0.3, fy=0.3) imgOriginal = color.copy() color = utils.removeSombras(color) utils.save('semSombra.jpg', color, id=identificador) imgGray = cv2.cvtColor(color, cv2.COLOR_BGR2GRAY) imgPbOriginal = imgGray.copy() utils.save('pb1.jpg', imgGray, id=identificador) imgGray, contours, hierarchy = extraiContornos(imgGray, identificador) utils.save('thr.jpg', imgGray, id=identificador) cnts2 = sorted(contours, key=sortAltura, reverse=True) assinaturas = list() for i, c in enumerate(cnts2): x, y, w, h = cv2.boundingRect(c) existeEntre = existeEntreAlgumaFaixa(assinaturas, y, h) if existeEntre == False: assinaturas.append((y - 5, y + h + 5)) imgCopy = imgOriginal.copy() larguraImg = imgOriginal.shape[1] for ass in assinaturas: cv2.rectangle(imgCopy, (50, ass[0]), (larguraImg - 50, ass[1]), (255, 0, 0), 2) utils.save('identificadas_ass.jpg', imgCopy, id=identificador) if len(assinaturas) != 5: msgEx = "Numero de assinaturas encontradas ({}) é diferente do esperado (5)".format( len(assinaturas)) raise QtdeAssinaturasException(msgEx, identificador) assinaturas = sorted(assinaturas) lista = dict() #ratioDilatacao = recuperaRatioDilatacao(cnts2, imgPbOriginal, identificador) for i, ass in enumerate(assinaturas): roi = imgPbOriginal[ass[0]:ass[1], 0:larguraImg] utils.save('roi_{}.jpg'.format(i), roi, id=identificador) #roi = utils.resize(roi, width=300, height=300) resized = roi.copy() #resized = cv2.blur(resized, (blurI,blurI)) #utils.save('__{}_blur1.jpg'.format(i), resized) #resized = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY) #resized = cv2.blur(resized, (5,5)) retval, resized = cv2.threshold(resized, 120, 255, type=cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU) utils.save('th_roi_{}.jpg'.format(i), resized, id=identificador) resized, densidade = utils.removeContornosPqnosImg(resized) utils.save('t_{}.jpg'.format(i), resized, id=identificador) #cv2.waitKey(0) #print('ratioDilatacao ' + str(ratioDilatacao)) #resized = utils.dilatation(resized, ratio=0.4) utils.save('t1_{}.jpg'.format(i), resized, id=identificador) im2, contours2, hierarchy = cv2.findContours(resized, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) contours2, resized = utils.ajustaEspacosContorno(contours2, resized) cnts = sorted(contours2, key=functionSort, reverse=True)[0] novaMat = np.zeros(roi.shape, dtype="uint8") cv2.drawContours(novaMat, [cnts], -1, 255, -1) xA, yA, wA, hA = cv2.boundingRect(cnts) square = novaMat[yA:yA + hA, xA:xA + wA] utils.save('square_{}.jpg'.format(i), square, id=identificador) #moment = mahotas.features.zernike_moments(square, 21) densidadeOut += "Densidade {} = {}\n".format(i, densidade) lista[i] = cnts, ass, square, densidade #utils.show(color) hd = cv2.createHausdorffDistanceExtractor() sd = cv2.createShapeContextDistanceExtractor() out = "" outCnh = "" sizeOut = "" resultadoApi = True imgResultado = imgOriginal.copy() percentCnh = [] for idx1 in range( 0, 1): #recupera apenas a primeira imagem e a compara com as outras item1 = lista[idx1][0] square1 = lista[idx1][2] dens1 = lista[idx1][3] altura1, largura1 = calculaAlturaLargura(item1) soma = 0 item1 = transformaItem(square1, altura1, largura1, identificador, idx1) itemCnh = None if (existeCnh == True): itemCnh, squareCnh = cnh.validaAssinaturaCnh( cnhColor, square1, identificador) itemCnh = transformaItem(squareCnh, altura1, largura1, identificador, 6) print("Contornos img_6 = " + str(len(itemCnh))) for idx2 in range(0, 5): print("Processando imagem " + str(idx2)) item2 = lista[idx2][0] ass = lista[idx2][1] square2 = lista[idx2][2] dens2 = lista[idx2][3] altura2, largura2 = calculaAlturaLargura(item2) sizeOut += 'Dimensao {} x {} \n'.format(largura2, altura2) tamanhoCompativel = alturaLarguraCompativel( altura1, largura1, altura2, largura2, whTolerancia) densidadeCompativel = calcDensidadeCompativel( dens1, dens2, pxWhiteTolerancia) item2 = transformaItem(square2, altura1, largura1, identificador, idx2) print("Contornos img_" + str(idx2) + " = " + str(len(item2))) #match = hd.computeDistance(item1, item2) if (idx1 != idx2): idaSD = round(sd.computeDistance(item1, item2), 5) voltaSD = round(sd.computeDistance(item2, item1), 5) idaHD = round(hd.computeDistance(item1, item2), 5) voltaHD = round(hd.computeDistance(item2, item1), 5) idaMM = calculaMoment(item1, idx1, item2, idx2, identificador) voltaMM = idaMM else: idaSD = 0 voltaSD = 0 idaHD = 0 voltaHD = 0 idaMM = 0 voltaMM = 0 if (existeCnh == True): idaCnh = round(sd.computeDistance(item2, itemCnh), 5) voltaCnh = round(sd.computeDistance(itemCnh, item2), 5) percentSimCnh = calculaSimilaridade(idaCnh, voltaCnh, valorAceitavelCnh) outCnh += '{} == {} - {} = {}%\n'.format( idx2, idaCnh, voltaCnh, percentSimCnh) percentCnh.append(percentSimCnh) #ida = dist.euclidean(item1, item2) #volta = dist.euclidean(item2, item1) out += '{} vs {} (T{}, D{}) \n'.format(idx1, idx2, tamanhoCompativel, densidadeCompativel) out += '----SD: {} - {} \n'.format(idaSD, voltaSD) out += '----HD: {} - {} \n'.format(idaHD, voltaHD) out += '----MH: {} - {} \n'.format(idaMM, voltaMM) #BGR if (idaSD < valorAceitavel and voltaSD < valorAceitavel and tamanhoCompativel == True and densidadeCompativel == True): imgResultado = contorna(imgResultado, larguraImg, ass, (0, 255, 0)) #sucesso else: imgResultado = contorna(imgResultado, larguraImg, ass, (0, 0, 255)) #falha resultadoApi = False pathTxt = utils.buildPath(identificador, path="calc.txt") with open(pathTxt, "w") as text_file: text_file.write(paramsOut) text_file.write('\n') text_file.write(sizeOut) text_file.write('\n') text_file.write(densidadeOut) text_file.write('\n') text_file.write(out) text_file.write('\n') text_file.write(outCnh) utils.save(names.RESULTADO, imgResultado, id=identificador) return { 'folhaAssinatura': resultadoApi, 'resultadoCnh': False, 'percentCnh': percentCnh }
def create_model(features, ncomp=1): gmm = GMM(n_components=ncomp, covariance_type='diag') gmm.fit(features) return gmm clr_model = create_model(rgb_data, ncomp=2) kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)) kernel2 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) shp_model = pickle.load(open(model_file, 'rb')) out_file = open(out_filename, 'w') writer = csv.writer(out_file) sc = cv2.createShapeContextDistanceExtractor(nAngularBins=12, nRadialBins=5, innerRadius=0.125, outerRadius=2) sc.setRotationInvariant(True) for f in files[2:]: ind = 0 if not (f.endswith('.JPG') or f.endswith('.jpg')): continue print(f) img = cv2.imread(os.path.join(img_dir, f)) h, w = img.shape[:2] img_vis = img.copy() #img_hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV) X = img.reshape(-1, 3) prob = clr_model.score(X)
#!/usr/bin/python # coding=utf-8 import cv2 as cv import time import numpy as np from utility.threshold import getTresholdByReExtrem from img_auto_canny_thresh import auto_canny import random as rng mysc = cv.createShapeContextDistanceExtractor() mync = cv.createHausdorffDistanceExtractor() def get_main_cnt(): """ 找到主要的轮廓 :return: """ def match_shape(f1, f2): img1 = cv.imread(f1, 0) img2 = cv.imread(f2, 0) # ########################################################use canny# ##################################### t1 = time.time() canny_thr1 = auto_canny(img1) query_canny = cv.Canny(img1, canny_thr1, canny_thr1 * 2, apertureSize=3) canny_thr2 = auto_canny(img2)
def run(self): if self.query_path == '': return sim_dict = {} sim_dict_flipped = {} img = cv_imread(self.query_path) resized_q = cv2.resize(img, (self.resize_w, self.resize_h)) if self.do_resize else img resized_qbinary = cv_toBinary(resized_q, self.do_smooth) self.img_query_group.clear() self.counter_query_group.clear() for i in range(360 // self.rot_angle): rot = cv_rotate(img, i * self.rot_angle) rot_flip = cv_rotate(cv2.flip(img, 1), i * self.rot_angle) resized_q = cv2.resize(rot, (self.resize_w, self.resize_h)) if self.do_resize else rot resized_qbinary = cv_toBinary(resized_q, self.do_smooth) resized_q_flip = cv2.resize( rot_flip, (self.resize_w, self.resize_h)) if self.do_resize else rot_flip resized_qbinary_flip = cv_toBinary(resized_q_flip, self.do_smooth) c0, _ = cv2.findContours(resized_qbinary, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) cq = self._sample_contour_points(max( c0, key=len), self.sample_rate) if self.do_sample else max( c0, key=len) c0_flip, _ = cv2.findContours(resized_qbinary_flip, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) cq_flip = self._sample_contour_points( max(c0_flip, key=len), self.sample_rate) if self.do_sample else max(c0, key=len) self.img_query_group.append(rot) self.img_query_group.append(rot_flip) self.counter_query_group.append(cq) self.counter_query_group.append(cq_flip) if self.method == 'hu': for i, image_path in enumerate(self.img_paths): self.step_signal.emit(i + 1) sim = measure_img_sim.hu_moment_sim(self.query_path, image_path) sim_dict[image_path] = sim sorted_list = sorted(sim_dict.items(), key=lambda x: x[1]) elif self.method == 'emd': for image_path in self.img_paths: sim = measure_img_sim.earth_movers_distance( self.query_path, image_path) sim_dict[image_path] = sim sorted_list = sorted(sim_dict.items(), key=lambda x: x[1]) elif self.method == 'ssim': for image_path in self.img_paths: sim = measure_img_sim.structural_sim(self.query_path, image_path) sim_dict[image_path] = sim sorted_list = sorted(sim_dict.items(), key=lambda x: x[1], reverse=True) else: ''' Shape based methods ''' if self.method == 'idsc': dist = euclidean idsc = IDSC() query_descriptor = idsc.describe(resized_qbinary) for image_path in self.img_paths: print('comparing' + image_path) npy_path = image_path.replace('.jpg', '.npy') if not os.path.exists(npy_path): img_descriptor = idsc.describe(cv_toBinary(image_path), self.do_smooth) np.save(npy_path, img_descriptor) else: img_descriptor = np.load(npy_path) sim = dist(query_descriptor.flat, img_descriptor.flat) sim_dict[image_path] = sim sorted_list = sorted(sim_dict.items(), key=lambda x: x[1]) elif self.method == 'scd': scd = cv2.createShapeContextDistanceExtractor() scd.setRotationInvariant(self.rotation_invariant) for i, image_path in enumerate(self.img_paths): print(image_path, end=" ") self.step_signal.emit(i + 1) resized_mbinary = cv_toBinary( cv2.resize(cv_imread(image_path), (self.resize_w, self.resize_h)) if self.do_resize else cv_imread(image_path), self.do_smooth) blank = 255 - np.zeros(resized_mbinary.shape, np.uint8) c1, _ = cv2.findContours(resized_mbinary, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) cm = self._sample_contour_points( max(c1, key=len), self.sample_rate) if self.do_sample else max(c1, key=len) if self.visualize: resized_mbinary = cv2.drawContours(cv2.cvtColor( resized_mbinary, cv2.COLOR_GRAY2BGR), [cm], 0, (0, 0, 255), thickness=1) cv2.imshow('aa', resized_mbinary) cv2.waitKey(1) if self.save_countours: print(cm.shape) save_path = image_path.replace('\\', '/') save_path = save_path.replace('轮廓图', 'counters') name = save_path.split('/')[-1] save_dir = save_path.replace(name, '') if not os.path.exists(save_dir): os.makedirs(save_dir) for i in cm: cv2.circle(blank, (i[0][0], i[0][1]), radius=1, color=(0, 0, 0), thickness=-1) # blank[int(i[0][1]), int(i[0][0])] = 0 cv2.imencode('.jpg', blank)[1].tofile(save_path) print(len(cm), end=" ") scd.setIterations(self.iterations) min_dist = sys.maxsize flipped = False rot_angle = 0 for i, cq in enumerate(self.counter_query_group): dist = scd.computeDistance(cq, cm) if dist < min_dist: min_dist = dist flipped = i % 2 == 1 rot_angle = i // 2 * self.rot_angle print(dist, "flip:", flipped, "rot_angle:", rot_angle) sim_dict[image_path] = (min_dist, flipped, rot_angle) sorted_list = sorted(sim_dict.items(), key=lambda x: x[1][0]) elif self.method == 'hausdorff': hsd = cv2.createHausdorffDistanceExtractor() for i, image_path in enumerate(self.img_paths): print(image_path) self.step_signal.emit(i + 1) resized_mbinary = cv_toBinary( cv2.resize(cv_imread(image_path), (self.resize_w, self.resize_h)) if self.do_resize else cv_imread(image_path), self.do_smooth) c1, _ = cv2.findContours(resized_mbinary, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS) cm = self._sample_contour_points( max(c1, key=len), self.sample_rate) if self.do_sample else max(c1, key=len) sim = hsd.computeDistance(cq, cm) sim_dict[image_path] = sim sorted_list = sorted(sim_dict.items(), key=lambda x: x[1]) self.done_signal.emit(sorted_list) self.quit()