def test_FFT_fft_vs_raw_calculation(self): img = self.image gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) ngray = Normalization.equalizeHistograms(gray) fftImage = self.fft.fft(ngray) tmpFftImage = numpy.fft.fft2(ngray) tmpBool = (fftImage==tmpFftImage).all() self.assertTrue(tmpBool)
def test_Features_detect_kp_ORB_vs_clean_class(self): img = self.image gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) ngray = Normalization.equalizeHistograms(gray) kp_ORB = self.feats.detect_kp_ORB(ngray) tmpfeats = Features() tmp_kp_ORB = tmpfeats.detect_kp_ORB(ngray) self.assertEqual(kp_ORB,tmp_kp_ORB)
def test_Edges_sumCanny_vs_raw_calculation(self): img = self.image gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) ngray = Normalization.equalizeHistograms(gray) sumCann = self.edges.sumCanny(ngray,1,255) tmpCann = cv2.Canny(ngray,1,255) tmpSumCann = numpy.sum(tmpCann) self.assertEqual(tmpSumCann,sumCann)
def test_equalizeHistograms(self): equalHist = Normalization.equalizeHistograms(self.img) img = self.img gr = img if len(img.shape)==3: gr = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) tmpEqualHist = cv2.equalizeHist(gr) equalityBool = (equalHist == tmpEqualHist).all() self.assertTrue(equalityBool)
def test_Features_numberKeyPoints_vs_clean_class(self): img = self.image gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) ngray = Normalization.equalizeHistograms(gray) kp_ORB = self.feats.detect_kp_ORB(ngray) tmpfeats = Features() tmpfeats.detect_kp_ORB(ngray) pts = self.feats.numberKeyPoints() tmpPts = tmpfeats.numberKeyPoints() self.assertEqual(pts,tmpPts)
def test_Edges_Canny_vs_raw_calculation(self): """!!!Note: This is a helper method called within Edges.sumCanny(). """ img = self.image gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) ngray = Normalization.equalizeHistograms(gray) cann = self.edges.Canny(ngray,1,255) tmpCann = cv2.Canny(ngray,1,255) tmpBool = (cann==tmpCann).all() self.assertTrue(tmpBool)
def test_Features_meanKeyPointSize_vs_clean_class(self): img = self.image gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) ngray = Normalization.equalizeHistograms(gray) self.feats.detect_kp_ORB(ngray) mean = self.feats.meanKeyPointSize() tmpfeats = Features() tmpfeats.detect_kp_ORB(ngray) tmpMean = numpy.mean(tmpfeats.keypointsizes) self.assertEqual(mean,tmpMean)
def test_Laplacian_sum_vs_raw_calculation(self): img = self.image gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) ngray = Normalization.equalizeHistograms(gray) kern = 9 lapSum = self.lap.sum(ngray,kern) tmpLap = Laplacian() gl = tmpLap.calculate(ngray,kern,) glo = gl>10 tmpLapSum = numpy.sum(glo) self.assertEqual(lapSum,tmpLapSum)
def test_Features_detect_kp_ORB_vs_raw_calculation(self): img = self.image gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) ngray = Normalization.equalizeHistograms(gray) self.feats.detect_kp_ORB(ngray) orbdetector = cv2.FeatureDetector_create('ORB') keypointsizes=[] keypoints = orbdetector.detect(ngray, None) for k in keypoints: keypointsizes.append(k.size) keypointsizes = numpy.array(keypointsizes) tmpBool = (self.feats.keypointsizes==keypointsizes).all() self.assertTrue(tmpBool)
def test_Laplacian_calculate_vs_raw_calcuation(self): """!!!Note: This is a helper method called from within Laplacian.sum(). """ img = self.image gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) ngray = Normalization.equalizeHistograms(gray) kern = 3 lapCalc = self.lap.calculate(img,kern,) tmpLapCalc = cv2.Laplacian(gray,cv2.CV_16U,ksize=kern) tmpBool = (lapCalc==tmpLapCalc).all() self.assertTrue(tmpBool)
def imageHandler(imgfile): retStrings = [] if imgfile in seen: return if os.path.isdir(os.path.join(args.directory, imgfile)): temp = os.listdir(os.path.join(args.directory, imgfile)) for f in temp: images.append(os.path.join(imgfile, f)) return # silently skip the bin files that have the gps data if imgfile.endswith('bin'): return # alert to other files that were skipped if not (imgfile.endswith('png') | imgfile.endswith('jpg')): sys.stderr.write("Skipped file: " + imgfile + "\n") return if args.verbose: sys.stderr.write("Parsing " + imgfile + "\n") retStrings.append( imgfile + "\t" ) img = ImageIO.cv2read(os.path.join(args.directory, imgfile)) gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) ngray = Normalization.equalizeHistograms(gray) ngray = cv2.GaussianBlur(ngray, (3,3), 0) feats.detect_kp_ORB(ngray) retStrings.append( str(feats.numberKeyPoints()) + "\t" + str(feats.medianKeyPointSize()) + "\t" + str(feats.meanKeyPointSize()) ) for i in range(15): retStrings.append("\t" + str(feats.numKeyPoints(i*10))) retStrings.append("\n") return retStrings
for f in temp: images.append(os.path.join(imgfile, f)) continue # silently skip the bin files that have the gps data if imgfile.endswith('bin'): continue # alert to other files that were skipped if not (imgfile.endswith('png') | imgfile.endswith('jpg')): sys.stderr.write("Skipped file: " + imgfile + "\n") continue if args.verbose: sys.stderr.write("Parsing " + imgfile + "\n") fout.write( imgfile + "\t" ) img = ImageIO.cv2read(os.path.join(args.directory, imgfile)) gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) ngray = Normalization.equalizeHistograms(gray) ngray = cv2.GaussianBlur(ngray, (3,3), 0) feats.detect_kp_ORB(ngray) fout.write( str(feats.numberKeyPoints()) + "\t" + str(feats.medianKeyPointSize()) + "\t" + str(feats.meanKeyPointSize()) ) for i in range(15): fout.write("\t" + str(feats.numKeyPoints(i*10))) fout.write("\n")
def imageWriter(images,seen,args,fout,classification,stats,fft,lap,edge): for imgfile in images: if imgfile in seen: continue if os.path.isdir(os.path.join(args.directory, imgfile)): temp = os.listdir(os.path.join(args.directory, imgfile)) for f in temp: images.append(os.path.join(imgfile, f)) continue #rewrite of above if statement #if os.path.isdir(os.path.join(args.directory,imgfile)): # temp = os.listdir(os.path.join(args.directory,imgfile)) # pool = Pool() # pool.map(images.append, os.path.join(imgfile, f)) # pool.close() # pool.join() # continue if not args.all and imgfile not in classification: continue # silently skip the bin files that have the gps data if imgfile.endswith('bin'): continue # alert to other files that were skipped if not (imgfile.endswith('png') | imgfile.endswith('jpg')): sys.stderr.write("Skipped file: " + imgfile + "\n") continue if args.verbose: sys.stderr.write("Parsing " + imgfile + "\n") fout.write( imgfile + "\t" ) if imgfile in classification: fout.write( classification[imgfile] + "\t") else: fout.write( "unknown\t" ) img = ImageIO.cv2read(os.path.join(args.directory, imgfile)) gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) fout.write( ('\t'.join(map(str, [stats.min(gray), stats.max(gray), stats.median(gray), stats.mean(gray)]))) + "\t" ) ngray = Normalization.equalizeHistograms(gray) # apply a gaussian blur to remove edge effects ngray = cv2.GaussianBlur(ngray, (3,3), 0) fout.write( ('\t'.join(map(str, [stats.min(ngray), stats.max(ngray), stats.median(ngray), stats.mean(ngray)]))) + "\t") for i in range(3): imp = img[:,:,i] fout.write( ('\t'.join(map(str, [stats.min(imp), stats.max(imp), stats.median(imp), stats.mean(imp)]))) + "\t" ) fout.write( str(fft.energy(gray)) + "\t" + str(fft.energy(ngray)) + "\t") if args.features: feats.detect_kp_ORB(ngray) fout.write( str(feats.numberKeyPoints()) + "\t" + str(feats.medianKeyPointSize()) + "\t" + str(feats.meanKeyPointSize()) + "\t") for i in range(15): fout.write( str(feats.numKeyPoints(i*10)) + "\t") else: fout.write("0\t0\t0\t"); for i in range(15): fout.write("0\t") for i in range(15): k=2*i+1 fout.write( str(lap.sum(ngray, k)) + "\t") for i in range(25): t2 = 10*i fout.write( str(edge.sumCanny(ngray, 1, t2)) + "\t") #edge.sumCanny(gray) # Contour detection ctr = Contours.contours(ngray) for i in range(5): threshold=50*i ctr.withCanny(1, threshold) if ctr.numberOfContours() == 0: fout.write( "0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t" ) else: try: fout.write( "\t".join(map(str, [ctr.numberOfContours(), ctr.numberOfClosedContours(), ctr.numberOfOpenContours(), ctr.totalContourArea(), cv2.contourArea(ctr.largestContourByArea()), ctr.totalPerimeterLength()])) + "\t") ctr.linelengths() fout.write( "\t".join(map(str, [ctr.maxLineLength(), ctr.meanLineLength(), ctr.medianLineLength(), ctr.modeLineLength()])) + "\t") except Exception as e: sys.stderr.write("There was an error calculating the contours for " + imgfile +": " + e.message + "\n") fout.write( "0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t" ) fout.write("\n")
A test program to make sure that the Normalization methods work. This normalizes the images, and displays them. ''' #test = 'test.jpg' test = "/home/redwards/Dropbox/ComputerVision/TestCode/test.png" im = ImageIO.cv2read(test) print im.shape print "Testing Tylers normalization" tn = Normalization.Tyler(im) print "Testing histogram equalization" he = Normalization.equalizeHistograms(im) heo = numpy.ones_like(im) heo[:,:,0]=he heo[:,:,1]=he heo[:,:,2]=he print "Simple normaliztion" nh = Normalization.simpleNorm(im) print nh.shape partone = numpy.vstack([im, heo]) parttwo = numpy.vstack([tn, nh]) allim = numpy.hstack([partone, parttwo]) print "Press any key to exit\n"