def binary_steg_reveal(steg_image, out): inp = cv.LoadImage(steg_image) steg = LSBSteg(inp) bin = steg.unhideBin() f = open(out, "wb") f.write(bin) f.close()
def detect(self, obj, event): # First, reset image, in case of previous detections: active_handle = self.get_active('Media') media = self.dbstate.db.get_media_from_handle(active_handle) self.load_image(media) min_face_size = (50, 50) # FIXME: get from setting self.cv_image = cv2.LoadImage(self.full_path, cv2.CV_LOAD_IMAGE_GRAYSCALE) o_width, o_height = self.cv_image.width, self.cv_image.height cv2.EqualizeHist(self.cv_image, self.cv_image) cascade = cv2.Load(HAARCASCADE_PATH) faces = cv2.HaarDetectObjects(self.cv_image, cascade, cv2.CreateMemStorage(0), 1.2, 2, cv2.CV_HAAR_DO_CANNY_PRUNING, min_face_size) references = self.find_references() rects = [] o_width, o_height = [ float(t) for t in (self.cv_image.width, self.cv_image.height) ] for ((x, y, width, height), neighbors) in faces: # percentages: rects.append((x / o_width, y / o_height, width / o_width, height / o_height)) self.draw_rectangles(rects, references)
def detect_faces(self, image_filename): """ Detects all faces and returns a list with images and corresponding coordinates""" logging.debug( 'Start method "detect_faces" for file %s (face-detector.py)' % image_filename) cascade = cv.Load(parameter.cascadefile) # load face cascade image = cv.LoadImage(image_filename) # loads and converts image # detect and save coordinates of detected faces coordinates = cv.HaarDetectObjects( image, cascade, cv.CreateMemStorage(), parameter.scaleFactor, parameter.minNeighbors, parameter.flags, parameter.min_facesize) # Convert to greyscale - better results when converting AFTER facedetection with viola jones if image.channels == 3: logging.debug( 'Bild %s wird in Graustufenbild umgewandelt (face-detector.py)' % image_filename) grey_face = (cv.CreateImage((image.width, image.height), 8, 1)) # Create grey-scale Image cv.CvtColor(image, grey_face, cv.CV_RGB2GRAY ) # convert Image to Greyscale (necessary for SURF) image = grey_face logging.debug( '%d faces successfully detected in file %s (face-detector.py)' % (len(coordinates), image_filename)) return image, coordinates
def getNextImage(self): """returns a image which can be used for detection""" #return kinect frame if self.source == "kinect_depth": # if standalone is true, the depth images will be retrieved directly from the kinect if self.standalone == True: if self.imageDepth == 8: return (kv.GetDepth8(), time.time()) elif self.imageDepth == 11 or self.imageDepth == 16: return (kv.GetDepth11(), time.time()) else: print "Illegal image depth: '" + str( self.imageDepth) + "'. Using 8 bit" return (kv.GetDepth8(), time.time()) # the depth image will be retrieved from the memory (using architecture) else: last = self.vid_mem_reader.get_latest_image(mtimes=True) if self.vid_mem_reader.get_status(self.source): self.fail_count = 0 else: self.fail_count += 1 # Make sure the image is not too old img, mtime = last[0] age = time.time() - mtime if age > 2: self.fail_count += 1 return None, None return last[0] elif ros_pattern.match(self.source): last = self.vid_mem_reader.get_latest_image(mtimes=True) if (not self.vid_mem_reader.get_status(self.source)) or not last: self.fail_count += 1 return None, None # Make sure the image is not too old img, mtime = last[0] age = time.time() - mtime if age > 2: self.fail_count += 1 return None, None self.fail_count = 0 img = convert_16to8(img) return (img, mtime) # return testimage (used for testing elif self.source == "file": self.framenumber += 1 if self.framenumber == len(self.filelist): self.framenumber = 0 return (cv.LoadImage(self.filelist[self.framenumber], cv.CV_LOAD_IMAGE_GRAYSCALE), time.time())
def get_latest_image_file(self, source): dirname = "/dev/shm/images/" + source if not os.path.exists(dirname): self.logger.error( "Directory %s does not exist; returning empty image" % dirname) return None, None files = glob.glob(os.path.join(dirname, "*.png")) + glob.glob( os.path.join(dirname, "*.jpg")) if not files: self.logger.error( "No images in directory %s; returning empty image" % dirname) return None, None #Load the actual file linkname = os.path.join(dirname, "lastimage") try: filename = os.path.realpath(linkname) except OSError as e: if e.errno != 2: raise # OSError 2 = File not found; use fallback filename = linkname if linkname == filename or not os.path.exists(filename): # if anybody knows how to suppress warning when there's nothing in the folder please add it here... second_last_file_name = os.popen( "ls -tr " + dirname + " | head -n $(expr $(ls " + dirname + " | wc -l) - 1) | tail -n 1").read().replace("\n", "") #Load the actual file filename = os.path.join(dirname, second_last_file_name) try: img = cv.LoadImage(filename) mtime = os.path.getmtime(filename) return mtime, img except: return None, None
def imgcompare(image1, image2): img1 = cv2.LoadImage(image1) hist1 = createHist(img1) img2 = cv2.LoadImage(image2) hist2 = createHist(img2) return cv2.CompareHist(hist1, hist2, cv2.CV_COMP_CORREL)
#!/usr/bin/env python # -*- coding:utf8 -*- import cv2 # 读图片 image = cv2.LoadImage('G:/python/opencv_dir/wx.jpg', cv2.CV_LOAD_IMAGE_COLOR) # Load the image # Or just: image=cv.LoadImage('img/image.png') cv2.NamedWindow('a_window', cv2.CV_WINDOW_AUTOSIZE) # Facultative cv2.ShowImage('a_window', image) # Show the image
c = (float(imgSize[0]/2.0), float(imgSize[1]/2.0)) imgRes = cv.CreateImage((rad*3, int(360)), 8, 3) #cv.LogPolar(image,imgRes,c,50.0, cv.CV_INTER_LINEAR+cv.CV_WARP_FILL_OUTLIERS) cv.LogPolar(image,imgRes,c,60.0, cv.CV_INTER_LINEAR+cv.CV_WARP_FILL_OUTLIERS) return (imgRes) # Window creation for showing input, output cv.NamedWindow("input", cv.CV_WINDOW_AUTOSIZE) cv.NamedWindow("output", cv.CV_WINDOW_AUTOSIZE) cv.NamedWindow("normalized", cv.CV_WINDOW_AUTOSIZE) eyesList = os.listdir('images/eyes') key = 0 while True: eye = getNewEye(eyesList) frame = cv.LoadImage("images/eyes/"+eye) iris = cv.CloneImage(frame) output = getPupil(frame) iris = getIris(output) cv.ShowImage("input", frame) cv.ShowImage("output", iris) normImg = cv.CloneImage(iris) normImg = getPolar2CartImg(iris,radius) cv.ShowImage("normalized", normImg) key = cv.WaitKey(3000) # seems like Esc with NumLck equals 1048603 if (key == 27 or key == 1048603): break cv.DestroyAllWindows()
import cv2 im = cv2.LoadImage("contours.jpg", cv2.CV_LOAD_IMAGE_GRAYSCALE) cv2.imwrite('contours_gray.jpg', imgray) # dst_32f = cv.CreateImage(cv.GetSize(im), cv.IPL_DEPTH_32F, 1) # # neighbourhood = 3 # aperture = 3 # k = 0.01 # maxStrength = 0.0 # threshold = 0.01 # nonMaxSize = 3 # # cv.CornerHarris(im, dst_32f, neighbourhood, aperture, k) # # minv, maxv, minl, maxl = cv.MinMaxLoc(dst_32f) # # dilated = cv.CloneImage(dst_32f) # cv.Dilate(dst_32f, dilated) # By this way we are sure that pixel with local max value will not be changed, and all the others will # # localMax = cv.CreateMat(dst_32f.height, dst_32f.width, cv.CV_8U) # cv.Cmp(dst_32f, dilated, localMax, cv.CV_CMP_EQ) #compare allow to keep only non modified pixel which are local maximum values which are corners. # # threshold = 0.01 * maxv # cv.Threshold(dst_32f, dst_32f, threshold, 255, cv.CV_THRESH_BINARY) # # cornerMap = cv.CreateMat(dst_32f.height, dst_32f.width, cv.CV_8U) # cv.Convert(dst_32f, cornerMap) #Convert to make the and # cv.And(cornerMap, localMax, cornerMap) #Delete all modified pixels #
def getIndividualContoursRectangles( contours): #Return the bounding rect for every contours contourscopy = contours rectangleList = [] while contourscopy: x, y, w, h = cv.BoundingRect(contourscopy) rectangleList.append((x, y, w, h)) contourscopy = contourscopy.h_next() return rectangleList if __name__ == "__main__": orig = cv.LoadImage( r"C:\git\Python-Snippets\Image Recognition\images\D2C-Logins - RunDate 2019-10-03 - Part (22).image" ) #Convert in black and white res = cv.CreateImage(cv.GetSize(orig), 8, 1) cv.CvtColor(orig, res, cv.CV_BGR2GRAY) #Operations on the image openCloseImage(res) dilateImage(res, 2) erodeImage(res, 2) smoothImage(res, 5) thresholdImage(res, 150, cv.CV_THRESH_BINARY_INV) #Get contours approximated contourLow = getContours(res, 3)
element = cv.CreateStructuringElementEx(pos * 2 + 1, pos * 2 + 1, pos, pos, element_shape) cv.Erode(src, dest, element, 1) cv.ShowImage("Erosion & Dilation", dest) def Dilation(pos): element = cv.CreateStructuringElementEx(pos * 2 + 1, pos * 2 + 1, pos, pos, element_shape) cv.Dilate(src, dest, element, 1) cv.ShowImage("Erosion & Dilation", dest) if __name__ == "__main__": if len(sys.argv) > 1: src = cv.LoadImage(sys.argv[1], cv.CV_LOAD_IMAGE_COLOR) else: url = 'https://code.ros.org/svn/opencv/trunk/opencv/samples/c/fruits.jpg' filedata = urllib2.urlopen(url).read() imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1) cv.SetData(imagefiledata, filedata, len(filedata)) src = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR) image = cv.CloneImage(src) dest = cv.CloneImage(src) cv.NamedWindow("Opening & Closing", 1) cv.NamedWindow("Erosion & Dilation", 1) cv.ShowImage("Opening & Closing", src) cv.ShowImage("Erosion & Dilation", src) cv.CreateTrackbar("Open", "Opening & Closing", 0, 10, Opening) cv.CreateTrackbar("Close", "Opening & Closing", 0, 10, Closing)
# @Description: createImage.py # @Author: 孤烟逐云zjy # @Date: 2020/5/3 9:49 # @SoftWare: PyCharm # @CSDN: https://blog.csdn.net/zjy123078_zjy # @博客园: https://www.cnblogs.com/guyan-2020/ import cv2 as cv im = cv.LoadImage('./images/photo01.jpg') # get the img thum = cv.CreateImage((im.width / 2, im.height / 2), 8, 3) cv.Resize(im, thum) cv.SaveImage('thum.jpg', thum)
mFilter = cv.CreateMat(mat.rows,mat.cols,cv.CV_32FC2) for i in range(mat.rows): for j in range(mat.cols): if flag == 0: mFilter[i,j] = (0,0) else: mFilter[i,j] = mat[i,j] for i in range(mat.rows/2-num,mat.rows/2+num): for j in range(mat.cols/2-num,mat.cols/2+num): if flag == 0: mFilter[i,j] = mat[i,j] else: mFilter[i,j] = (0,0) return mFilter image = cv.LoadImage('lena.jpg',0) mFFT = FFT(image) mIFFt = IFFT(mFFT) iAfter = FImage(mFFT) mLP = Filter(mFFT) mIFFt1=IFFT(mLP) iLP = FImage(mLP) iRestore = Restore(mIFFt1) mHP = Filter(mFFT,1) mIFFt2 = IFFT(mHP) iHP = FImage(mHP) iRestore2 = Restore(mIFFt2) cv.ShowImage('image',image) cv.ShowImage('iAfter',iAfter)
def binary_steg_hide(image, binary, result): carrier = cv.LoadImage(image) steg = LSBSteg(carrier) steg.hideBin(binary) steg.saveImage(result)
def main(): global current_image global current_img_file_name global has_roi global roi_x0 global roi_y0 global roi_x1 global roi_y1 iKey = 0 files = glob.glob(image_file_glob) if len(files) == 0: print("No files match glob pattern") return files = [os.path.abspath(f) for f in files] files.sort() # init GUI cv.NamedWindow(window_name, 1) cv.SetMouseCallback(window_name, on_mouse, None) sys.stderr.write("Opening directory...") # init output of rectangles to the info file #os.chdir(input_directory) sys.stderr.write("done.\n") current_file_index = 0 while True: current_img_file_name = files[current_file_index] num_of_rec = 0 sys.stderr.write( "Loading current_image (%d/%d) %s...\n" % (current_file_index + 1, len(files), current_img_file_name)) try: current_image = cv.LoadImage(current_img_file_name, 1) except IOError: sys.stderr.write("Failed to load current_image %s.\n" % current_img_file_name) return -1 # Work on current current_image #cv.ShowImage(window_name, current_image) redraw() # Need to figure out waitkey returns. # <Space> = 32 add rectangle to current image # <left> = 81 save & next # <right> = 83 save & prev # <a> = 97 add rect to table # <b> = 98 toggle file is background or not # <d> = 100 remove old rect # <q> = 113 exit program # <s> = 115 save rect table # <x> = 136 skip image iKey = cv.WaitKey(0) % 255 # This is ugly, but is actually a simplification of the C++. #sys.stderr.write(str(iKey) + '\n') if draging: continue if iKey == 81: current_file_index -= 1 if current_file_index == -1: current_file_index = len(files) - 1 clear_roi() elif iKey == 83: current_file_index += 1 if current_file_index == len(files): current_file_index = 0 clear_roi() elif iKey == 113: cv.DestroyWindow(window_name) return 0 elif iKey == 97: rect_table.setdefault(current_img_file_name, set()).add( (roi_x0, roi_y0, roi_x1 - roi_x0, roi_y1 - roi_y0)) clear_roi() write_rect_table() redraw() elif iKey == 98: if current_img_file_name in background_files: background_files.remove(current_img_file_name) else: background_files.add(current_img_file_name) elif iKey == 100: remove_rect(cur_mouse_x, cur_mouse_y) elif iKey == 115: write_rect_table() elif iKey == 136: sys.stderr.write("Skipped %s.\n" % current_file_index)
import cv2 import tesseract gray = cv2.LoadImage('sample.png', cv2.CV_LOAD_IMAGE_GRAYSCALE) cv2.Threshold(gray, gray, 231, 255, cv2.CV_THRESH_BINARY) api = tesseract.TessBaseAPI() api.Init(".", "eng", tesseract.OEM_DEFAULT) api.SetVariable("tessedit_char_whitelist", "0123456789abcdefghijklmnopqrstuvwxyz") api.SetPageSegMode(tesseract.PSM_SINGLE_WORD) tesseract.SetCvImage(gray, api) print api.GetUTF8Text()
import cv2 imageBuffer = cv2.LoadImage( 'images/digits_sudoku2.png' ) nW = 468 nH = 99 smallerImage = cv2.CreateImage( (nH, nW), imageBuffer.depth, imageBuffer.nChannels ) cv2.Resize( imageBuffer, smallerImage , interpolation=cv2.CV_INTER_CUBIC ) cv2.SaveImage( 'images/digits_sudoku3.png', smallerImage )
ret, frame = video_capture.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30), flags=0) # Draw a rectangle around the faces for (x, y, w, h) in faces: cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2) roi_gray = gray[y:y + h, x:x + w] roi_color = image[y:y + h, x:x + w] face_file_name = "img.jpg" cv2.imwrite(face_file_name, roi_color) src = cv2.LoadImage("img.jpg", cv2.CV_LOAD_IMAGE_COLOR) src0 = cv2.LoadImage("img0.jpg", cv2.CV_LOAD_IMAGE_COLOR) # Display the resulting frame cv2.imshow('Face found', image) if cv2.waitKey(1) & 0xFF == ord('q'): break # When everything is done, release the capture video_capture.release() cv2.destroyAllWindows()
import cv2 as cv img = cv.LoadImage("friend1.jpg") image_size = cv.GetSize(img) #获取图片的大小 greyscale = cv.CreateImage(image_size, 8, 1) #建立一个相同大小的灰度图像 cv.CvtColor(img, greyscale, cv.CV_BGR2GRAY) #将获取的彩色图像,转换成灰度图像 storage = cv.CreateMemStorage(0) #创建一个内存空间,人脸检测是要利用,具体作用不清楚 cv.EqualizeHist(greyscale, greyscale) #将灰度图像直方图均衡化,貌似可以使灰度图像信息量减少,加快检测速度 # detect objects cascade = cv.Load('haarcascade_frontalface_alt2.xml') #加载Intel公司的训练库 #检测图片中的人脸,并返回一个包含了人脸信息的对象faces faces = cv.HaarDetectObjects(greyscale, cascade, storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, (50, 50)) #获得人脸所在位置的数据 j = 0 #记录个数 for (x, y, w, h), n in faces: j += 1 cv.SetImageROI(img, (x, y, w, h)) #获取头像的区域 cv.SaveImage("face" + str(j) + ".jpg", img) #保存下来
server = True name = "PictureServer" if server else "PictureClient" host = "localhost" port = "Pictures" if not server: # The client loads images to send path = os.path.join(os.environ['BORG'], "brain", "data", "models", "RobotLab_1", "*.jpg") path = os.path.expanduser(path) image_names = glob.glob(path) images = [] for filename in image_names: print "Loading %s..." % filename img = cv.LoadImage(filename) images.append(img) print "Loaded %d images" % len(images) # Start the BinarySocket comp = True if not server else False sock = BinarySocket(host, port, server=server, bufsize=1024 * 256, compress=comp, compress_level=9) client = sock if not server else None ticker = Ticker(5)
import cv2 as cv import tesseract gray = cv.LoadImage('Capture.JPG', cv.CV_LOAD_IMAGE_GRAYSCALE) cv.Threshold(gray, gray, 231, 255, cv.CV_THRESH_BINARY) api = tesseract.TessBaseAPI() api.Init(".", "eng", tesseract.OEM_DEFAULT) api.SetVariable("tessedit_char_whitelist", "0123456789abcdefghijklmnopqrstuvwxyz") api.SetPageSegMode(tesseract.PSM_SINGLE_WORD) tesseract.SetCvImage(gray, api) print(api.GetUTF8Text())