def test(): #start = time.time() # src1 = cv.LoadImage("target_all_nl1.png", 0) src2 = cv.LoadImage("target_all.png", 0) # crop area pt1 = (100, 100) pt2 = (200, 200) #get img size and compare # convert cvMat to IplImage crop1 = cv.GetImage(src1[pt1[1]:pt2[1], pt1[0]:pt2[0]]) crop2 = cv.GetImage(src2[pt1[1]:pt2[1], pt1[0]:pt2[0]]) # save image cv.SaveImage("c1.jpg", crop1) cv.SaveImage("c2.jpg", crop2) print type(crop2) print type(src2) #cv2.GetMat # compute hist1 = compute_histogram(crop1) hist2 = compute_histogram(crop2) # compare sc = cv.CompareHist(hist1, hist2, cv.CV_COMP_CHISQR) print sc
def reload_mosaic(mosaic): '''reload state into mosaic''' state = mpstate.camera_state regions = [] last_thumbfile = None last_joe = None joes = cuav_joe.JoeIterator(state.joelog.filename) for joe in joes: print joe if joe.thumb_filename == last_thumbfile or last_thumbfile is None: regions.append(joe.r) last_joe = joe last_thumbfile = joe.thumb_filename else: try: composite = cv.LoadImage(last_joe.thumb_filename) thumbs = cuav_mosaic.ExtractThumbs(composite, len(regions)) mosaic.set_brightness(state.settings.brightness) mosaic.add_regions(regions, thumbs, last_joe.image_filename, last_joe.pos) except Exception: pass regions = [] last_joe = None last_thumbfile = None if last_joe: try: composite = cv.LoadImage(last_joe.thumb_filename) thumbs = cuav_mosaic.ExtractThumbs(composite, len(regions)) mosaic.set_brightness(state.settings.brightness) mosaic.add_regions(regions, thumbs, last_joe.image_filename, last_joe.pos) except Exception: pass
def main(): global degree_value global pi global s global fps if getArgs() == 1: return degree_value = 2 * pi / n #drawImage(0) #return writer = cv.CreateVideoWriter("original.avi", 0, 7 * s, (width, height)) writer2 = cv.CreateVideoWriter("output.avi", 0, fps, (width, height)) nFrames = int(14 * s) addon = pi * 2 / 7 c = addon drawImage(0) for i in range(nFrames): c = c + addon drawImage(c) img2 = cv.LoadImage(img_name) cv.WriteFrame(writer, img2) nFrames = int(2 * fps) addon2 = pi * 2 * s / fps c2 = addon2 drawImage(0) for j in range(nFrames): c2 = c2 + addon2 drawImage(c2) img2 = cv.LoadImage(img_name) cv.WriteFrame(writer2, img2)
def imgResizer(crop_img_string, width, height,crop_color_img): image1 = cv.LoadImage(crop_img_string,cv.CV_LOAD_IMAGE_GRAYSCALE) dst1 = cv.CreateImage((width,height), 8, 1) cv.Resize(image1,dst1,interpolation=cv.CV_INTER_LINEAR) cv.SaveImage('Z_Resized_image.png', dst1) # dst0 = cv.CreateImage((width,height), 8, 3) # dst0 = cv2.resize(crop_color_img,(width,height)) image2 = cv.LoadImage('Z_Cropped_color_image.png') dst2 = cv.CreateImage((width,height), 8, 3) cv.Resize(image2,dst2,interpolation=cv.CV_INTER_LINEAR) cv.SaveImage('Z_Resized_color_image.png', dst2) return dst1,dst2
def load_sample(name=None): if len(argv) > 1: img0 = cv.LoadImage(argv[1], cv.CV_LOAD_IMAGE_COLOR) elif name is not None: try: img0 = cv.LoadImage(name, cv.CV_LOAD_IMAGE_COLOR) except IOError: urlbase = 'https://raw.github.com/Itseez/opencv/master/samples/c/' file = name.split('/')[-1] filedata = urllib2.urlopen(urlbase + file).read() imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1) cv.SetData(imagefiledata, filedata, len(filedata)) img0 = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR) return img0
def load_sample(name=None): if len(argv) > 1: img0 = cv.LoadImage(argv[1], cv.CV_LOAD_IMAGE_COLOR) elif name is not None: try: img0 = cv.LoadImage(name, cv.CV_LOAD_IMAGE_COLOR) except IOError: urlbase = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/' file = name.split('/')[-1] filedata = urllib2.urlopen(urlbase + file).read() imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1) cv.SetData(imagefiledata, filedata, len(filedata)) img0 = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR) return img0
def render_outline_image(image_id, threshold): im=cv.LoadImage("Image"+str(image_id)+".bmp", cv.CV_LOAD_IMAGE_COLOR) gray = cv.CreateImage((im.width, im.height), 8, 1) edge = cv.CreateImage((im.width, im.height), 8, 1) im_bw1 = cv.CreateImage((im.width, im.height), 8, 1) cv.CvtColor(im, gray, cv.CV_BGR2GRAY) cv.Not(gray, edge) im_white=cv.LoadImage("white.bmp", cv.CV_LOAD_IMAGE_COLOR) white = cv.CreateImage((im_white.width, im_white.height), 8, 1) cv.Canny(gray, edge, threshold, 125 * 3, 3) # cv.Not(white, edge) cv.SaveImage("edge_image.png", edge) jpg1 = wx.Image('edge_image.png', wx.BITMAP_TYPE_ANY).ConvertToBitmap() os.remove("edge_image.png") return jpg1
def process(infile): image = cv.LoadImage(infile) # if image: faces = detect_object(image) if faces: # 本项目中严格控制一张图片只有一个人脸,所以以下循环直接return for (x1, y1, x2, y2) in faces: file_name = infile + '.jpg' Image.open(infile).convert('RGB').crop( (x1, y1, x2, y2)).save(file_name) img1 = cv.LoadImage(file_name) re_img = cv.CreateImage((image_size, image_size), 8, 3) cv.Resize(img1, re_img, cv.CV_INTER_LINEAR) cv.SaveImage(file_name, re_img) return infile + '.jpg'
def drop(host, port, image, payload): # here is where we set all of our proxy settings if PROXY_SUPPORT == "ON": auth_handler = urllib2.HTTPBasicAuthHandler() auth_handler.add_password( realm='RESTRICTED ACCESS', uri=PROXY_URL, # PROXY SPECIFIED ABOVE user=USERNAME, # USERNAME SPECIFIED ABOVE passwd=PASSWORD) # PASSWORD SPECIFIED ABOVE opener = urllib2.build_opener(auth_handler) urllib2.install_opener(opener) #Grab our file file from the web server and save it to a file req = urllib2.Request('http://%s:%s/%s' % (host, port, image)) message = urllib2.urlopen(req) localFile = open('temp.png', 'w') localFile.write(message.read()) localFile.close() #Destego binary inp = cv.LoadImage('temp.png') steg = LSBSteg(inp) bin = steg.unhideBin() f = open(payload, "wb") #Write the binary back to a file f.write(bin) f.close() os.system('rm temp.png')
def binary_steg_reveal(steg_image, out): inp = cv.LoadImage(steg_image) steg = LSBSteg(inp) bin = steg.unhideBin() f = open(out, "wb") f.write(bin) f.close()
def convertToGreyImage(imagePath,newImagePath): image = cv.LoadImage(imagePath) new = cv.CreateImage(cv.GetSize(image), image.depth, 1) for i in range(image.height): for j in range(image.width): new[i,j] = 0.3 * image[i,j][0] + 0.59 * image[i,j][1] + 0.11 * image[i,j][2] cv.SaveImage(newImagePath,new)
def __init__(self, orig): self.name = orig self.image = cv.LoadImage(orig) self.region = None self.drag_start = None cv.NamedWindow("Image") cv.SetMouseCallback("Image", self.on_mouse)
def ocr(): import cv2.cv as cv api = tesseract.TessBaseAPI() api.Init(".", "eng", tesseract.OEM_DEFAULT) api.SetPageSegMode(tesseract.PSM_AUTO) image = cv.LoadImage("eurotext.jpg", cv.CV_LOAD_IMAGE_GRAYSCALE) tesseract.SetCvImage(image, api) text = api.GetUTF8Text() conf = api.MeanTextConf() print(text, len(text)) print("Cofidence Level: %d %%" % conf) print("Confidences of All words") header("Method 1", "*" * 10) confOfText = api.AllWordConfidences() print(confOfText) print("Number of Words:") print("counted by tesseract: %d" % len(confOfText)) print("counted by me: %d[%d]" % (countWords(text), countWords2(text))) if len(confOfText) != countWords(text): print("Why the words counted by tesseract are different from mine!!!!") header("Method 2", "*" * 10) confs = tesseract.AllWordConfidences(api) print(confs, len(confs))
def capture(): """ Using the intel training set to capture the face in the video. Most of them are frameworks in OpenCV. """ j = 0 g = os.walk("origin") for path, d, filelist in g: for filename in filelist: img = cv.LoadImage(os.path.join(path, filename)) image_size = cv.GetSize(img) greyscale = cv.CreateImage(image_size, 8, 1) cv.CvtColor(img, greyscale, cv.CV_BGR2GRAY) storage = cv.CreateMemStorage(0) cv.EqualizeHist(greyscale, greyscale) cascade = cv.Load('haarcascade_frontalface_alt2.xml') faces = cv.HaarDetectObjects(greyscale, cascade, storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, (50, 50)) for (x, y, w, h), n in faces: j += 1 cv.SetImageROI(img, (x, y, w, h)) cv.SaveImage("captured/face" + str(j) + ".png", img)
def extractor(self): self.IDnumbers = 0 for dirpath, _, filenames in os.walk(self.path): for file in filenames: if self.writing_method == "cv": currentHog = [] else: currentHog = numpy.zeros((0, 1)) if dirpath.find("hist") != -1: continue filePath = os.path.join(dirpath, file) filename, fileExtension = os.path.splitext(file) if not (fileExtension == ".cv" or fileExtension == ".hist" or fileExtension == ".sa" or fileExtension == ".dat" or fileExtension == ".npy" or fileExtension == ".txt"): zimbo = time.time() if filename == "tp_matrix": continue try: currentPicture = cv.LoadImage(filePath) except: continue (dx, dy) = self.gradient.sobelGradient(currentPicture) Tangent = self.gradient.tangent(dx, dy) Magnitude = self.gradient.Magnitude(dx, dy) tangentList = self.divider.divide( Tangent, column, row, option=self.divisionOption) MagnitudeList = self.divider.divide( Magnitude, column, row, option=self.divisionOption) List = zip(tangentList, MagnitudeList) for tangent, magnitude in List: if self.writing_method == "cv": currentHog.append( self.hog.HoG(tangent, magnitude, self.writing_method)) elif self.writing_method == "numpy": tempHist = self.hog.HoG(tangent, magnitude, self.writing_method) currentHog = numpy.vstack((currentHog, tempHist)) else: continue #a = time.time() self.store_numbers(currentHog, dirpath, self.writing_method, filename) self.IDnumbers += 1 #if self.IDnumbers % 100 == 1: # print "Time:", (time.time() - zimbo) * 1000, ' ms' #print time.time() - a return self.IDnumbers
def __init__(self,img_path): self.image = cv.LoadImage(img_path) self.width = self.image.width self.height = self.image.height self.channels = self.image.channels self.cw = 0 self.ch = 0 self.cc = 0
def test_wrong_mn(self): image = cv.LoadImage( os.path.abspath(os.environ['BORG'] + '/Brain/data/test_img/faces.jpg')) result = self.divider.divide(image, 0, 0) self.assertEqual( result, -2, "The divider could not give error on wrong width and height")
def main(av): fpath = os.path.dirname(os.path.abspath(__file__)) f = open(fpath + "/inp/secret.txt") str = f.read() carrier = cv.LoadImage(fpath + "/inp/image.jpg") steg = LSBSteg(carrier) steg.hideText(str) steg.saveImage(fpath + "/out/image_steg.png")
def load_tile_lowres(self, tile): '''load a lower resolution tile from cache to fill in a map while waiting for a higher resolution tile''' if tile.zoom == self.min_zoom: return None # find the equivalent lower res tile (lat, lon) = tile.coord() width2 = TILES_WIDTH height2 = TILES_HEIGHT for zoom2 in range(tile.zoom - 1, self.min_zoom - 1, -1): width2 /= 2 height2 /= 2 if width2 == 0 or height2 == 0: break tile_info = self.coord_to_tile(lat, lon, zoom2) # see if its in the tile cache key = tile_info.key() if key in self._tile_cache: img = self._tile_cache[key] if img == self._unavailable: continue else: path = self.tile_to_path(tile_info) try: img = cv.LoadImage(path) # add it to the tile cache self._tile_cache[key] = img while len(self._tile_cache) > self.cache_size: self._tile_cache.popitem(0) except IOError as e: continue # copy out the quadrant we want availx = min(TILES_WIDTH - tile_info.offsetx, width2) availy = min(TILES_HEIGHT - tile_info.offsety, height2) if availx != width2 or availy != height2: continue cv.SetImageROI( img, (tile_info.offsetx, tile_info.offsety, width2, height2)) img2 = cv.CreateImage((width2, height2), 8, 3) try: cv.Copy(img, img2) except Exception: continue cv.ResetImageROI(img) # and scale it scaled = cv.CreateImage((TILES_WIDTH, TILES_HEIGHT), 8, 3) cv.Resize(img2, scaled) #cv.Rectangle(scaled, (0,0), (255,255), (0,255,0), 1) return scaled return None
def __init__(self, input_filename, output_filename): self.prev_pt = None self.outname = output_filename self.orig = cv.LoadImage(input_filename) self.image = cv.CloneImage(self.orig) self.chans = self.im_to_lsb() cv.ShowImage("image", self.image) cv.ShowImage("LSB", self.chans[0]) cv.SetMouseCallback("image", self.on_mouse)
def histogramequalization(): src = cv.LoadImage(getpath(), cv.CV_LOAD_IMAGE_GRAYSCALE) dst = cv.CreateImage((src.width, src.height), src.depth, src.channels) cv.EqualizeHist(src, dst) cv.NamedWindow("SourceImage", 1) cv.NamedWindow("EqualizedImage", 1) cv.ShowImage("SourceImage", src) cv.ShowImage("EqualizedImage", dst) cv.WaitKey(0)
def load_video(directory): imgs = [] for r, d, f in os.walk(directory): for files in f: if files.endswith(".png"): imgs.append(os.path.join(r, files)) frame_count = 10 width = cv.LoadImage(imgs[0]).width height = cv.LoadImage(imgs[0]).height orig_vid = [] print width, height for img in imgs: orig_vid.append(cv2.imread(img)) return numpy.array(orig_vid), frame_count
def lines2(): im = cv.LoadImage('roi_edges.jpg', cv.CV_LOAD_IMAGE_GRAYSCALE) pi = math.pi x = 0 dst = cv.CreateImage(cv.GetSize(im), 8, 1) cv.Canny(im, dst, 200, 200) cv.Threshold(dst, dst, 100, 255, cv.CV_THRESH_BINARY) color_dst_standard = cv.CreateImage(cv.GetSize(im), 8, 3) cv.CvtColor(im, color_dst_standard, cv.CV_GRAY2BGR) #Create output image in RGB to put red lines lines = cv.HoughLines2(dst, cv.CreateMemStorage(0), cv.CV_HOUGH_STANDARD, 1, pi / 100, 71, 0, 0) klsum = 0 klaver = 0 krsum = 0 kraver = 0 #global k #k=0 for (rho, theta) in lines[:100]: kl = [] kr = [] a = math.cos(theta) b = math.sin(theta) x0 = a * rho y0 = b * rho pt1 = (cv.Round(x0 + 1000 * (-b)), cv.Round(y0 + 1000 * (a))) pt2 = (cv.Round(x0 - 1000 * (-b)), cv.Round(y0 - 1000 * (a))) k = ((y0 - 1000 * (a)) - (y0 + 1000 * (a))) / ((x0 - 1000 * (-b)) - (x0 + 1000 * (-b))) if abs(k) < 0.4: pass elif k > 0: kr.append(k) len_kr = len(kr) for i in kr: krsum = krsum + i kraver = krsum / len_kr cv.Line(color_dst_standard, pt1, pt2, cv.CV_RGB(255, 0, 0), 2, 4) elif k < 0: kr.append(k) kl.append(k) len_kl = len(kl) for i in kl: klsum = klsum + i klaver = klsum / len_kl cv.Line(color_dst_standard, pt1, pt2, cv.CV_RGB(255, 0, 0), 2, 4) #print k # cv.Line(color_dst_standard, pt1, pt2, cv.CV_RGB(255, 0, 0), 2, 4) cv.SaveImage('lane.jpg', color_dst_standard) print '左车道平均斜率:', klaver, ' 右车道平均斜率:', kraver cv.ShowImage("Hough Standard", color_dst_standard) cv.WaitKey(0)
def process_all(results): dir = "Clubic" for file, r in zip(os.listdir(dir), results): im = cv.LoadImage(os.path.join(dir, file)) res = crack(im) if res == r: print file + ": " + res + " | " + r + " OK" else: print file + ": " + res + " | " + r + " NO"
def test(): #start = time.time() # src1 = cv.LoadImage("all.png", 0) src2 = cv.LoadImage("dark3.png", 0) # crop area w = 100 pt1 = (535, 60) pt2 = (pt1[0] + w, pt1[1] + w) print pt1 print pt2 #get img size and compare # convert cvMat to IplImage crop1 = src1[pt1[1]:pt2[1], pt1[0]:pt2[0]] crop2 = src2[pt1[1]:pt2[1], pt1[0]:pt2[0]] crop3 = cv.GetImage(cv.GetSubRect(src1, (10, 10, 100, 100))) crop4 = cv.GetImage(cv.GetSubRect(src2, (10, 10, 100, 100))) # save image cv.SaveImage("c01.jpg", crop1) cv.SaveImage("c02.jpg", crop2) cv.SaveImage("c03.jpg", crop3) cv.SaveImage("c04.jpg", crop4) print type(src1) print type(crop3) #cv2.GetMat # compute #return 1 hist1 = compute_histogram(crop3) hist2 = compute_histogram(crop4) # compare sc = cv.CompareHist(hist1, hist2, cv.CV_COMP_CHISQR) print sc
def readWholeImg(imgname): curFrame = cv.LoadImage(imgname, 1) gray = cv.CreateImage((curFrame.width, curFrame.height), 8, 1) cv.CvtColor(curFrame, gray, cv.CV_BGR2GRAY) img48 = cv.CreateImage((48, 48), 8, 1) cv.Resize(gray, img48, cv.CV_INTER_LINEAR) cv.EqualizeHist(img48, img48) face_vector = np.asarray(img48[:, :]) face_vector = face_vector.reshape(48 * 48) return [[face_vector], curFrame]
def load_tile(self, tile): '''load a tile from cache or tile server''' # see if its in the tile cache key = tile.key() if key in self._tile_cache: img = self._tile_cache[key] if img == self._unavailable: img = self.load_tile_lowres(tile) if img is None: img = cv.LoadImage(self._unavailable) return img path = self.tile_to_path(tile) try: ret = cv.LoadImage(path) # add it to the tile cache self._tile_cache[key] = ret while len(self._tile_cache) > self.cache_size: self._tile_cache.popitem(0) return ret except IOError as e: # windows gives errno 0 for some versions of python, treat that as ENOENT # and try a download if not e.errno in [errno.ENOENT, 0]: raise pass if not self.download: img = self.load_tile_lowres(tile) if img is None: img = cv.LoadImage(self._unavailable) return img try: self._download_pending[key].refresh_time() except Exception: self._download_pending[key] = tile self.start_download_thread() img = self.load_tile_lowres(tile) if img is None: img = cv.LoadImage(self._loading) return img
def eachFile(filepath): pathDir = os.listdir(filepath) for allDir in pathDir: child = os.path.join('%s%s' % (filepath, allDir)) write_child = os.path.join('%s%s' % (write_path, allDir)) image = cv.LoadImage(child, 0) des_image = cv.CreateImage((width_scale, height_scale), image.depth, 1) cv.Resize(image, des_image, cv2.INTER_AREA) # cv.ShowImage('afe',des_image) cv.SaveImage(write_child, des_image)
def auto_search(): # click search button genymotion_session.console.mouse.put_mouse_event_absolute(660,290,0,0,0) genymotion_session.console.mouse.put_mouse_event_absolute(660,290,0,0,1) genymotion_session.console.mouse.put_mouse_event_absolute(660,290,0,0,0) sleep(10) # processing subprocess.call("adb shell screencap -p /sdcard/screen.png", shell=True) subprocess.call("adb pull /sdcard/screen.png /tmp/screen.png", shell=True) im = Image.open("/tmp/screen.png") #box = (60, 80, 165, 180) #box = (53, 72, 140, 165) box = (57, 75, 140, 138) loot = im.crop(box).convert('L') loot = ImageOps.invert(loot) loot.save("/tmp/loot.png", "png") api = tesseract.TessBaseAPI() api.Init("/home/mrtux/app/bin/", "coc",tesseract.OEM_DEFAULT) api.SetVariable("tessedit_char_whitelist", "0123456789") api.SetPageSegMode(tesseract.PSM_AUTO) image = cv.LoadImage("/tmp/loot.png", cv.CV_LOAD_IMAGE_UNCHANGED) tesseract.SetCvImage(image,api) text = api.GetUTF8Text() conf = api.MeanTextConf() total_loot = text.splitlines() gold_loot, elixir_loot = total_loot[0:2] gold_loot_text_element = gold_loot.split(" ") elixir_loot_text_element = elixir_loot.split(" ") for i in range(len(gold_loot_text_element)): if len(gold_loot_text_element[i]) > 3: gold_loot_text_element[i] = gold_loot_text_element[i][1:] for i in range(len(elixir_loot_text_element)): if len(elixir_loot_text_element[i]) > 3: elixir_loot_text_element[i] = elixir_loot_text_element[i][1:] gold_expr = gold_loot.find(" ") == 3 and int(gold_loot_text_element[0]) >= 200 elixir_expr = elixir_loot.find(" ") == 3 and int(elixir_loot_text_element[0]) >= 200 print gold_loot print gold_loot_text_element print elixir_loot print elixir_loot_text_element if gold_expr and elixir_expr: subprocess.call("mplayer /home/mrtux/app/bin/gun.mp3", shell=True) api.End() return True return False
def detect_and_draw(img, cascade, c): # allocate temporary images gray = cv.CreateImage((img.width, img.height), 8, 1) small_img = cv.CreateImage((cv.Round( img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1) # convert color input image to grayscale cv.CvtColor(img, gray, cv.CV_BGR2GRAY) # scale input image for faster processing cv.Resize(gray, small_img, cv.CV_INTER_LINEAR) cv.EqualizeHist(small_img, small_img) face_flag = False if (cascade): t = cv.GetTickCount() faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) t = cv.GetTickCount() - t print "detection time = %gms" % (t / (cv.GetTickFrequency() * 1000.)) if faces: face_flag = True for ((x, y, w, h), n) in faces: # the input to cv.HaarDetectObjects was resized, so scale the # bounding box of each face and convert it to two CvPoints pt1 = (int(x * image_scale), int(y * image_scale)) pt2 = (int((x + w) * image_scale), int((y + h) * image_scale)) # ある程度顔が検出されたら if c > 4: # 画像の保存 global counter counter = -1 d = datetime.today() datestr = d.strftime('%Y-%m-%d_%H-%M-%S') outputname = '/home/pi/fd/fd_' + datestr + '.jpg' cv.SaveImage(outputname, img) print 'Face Detect' # 読み込みと切り取り fimg = cv.LoadImage(outputname) fimg_trim = fimg[pt1[1]:pt2[1], pt1[0]:pt2[0]] outputname2 = '/home/pi/fd/face_' + datestr + '.jpg' cv.SaveImage(outputname2, fimg_trim) print 'Face Image Save' cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0) cv.ShowImage("result", img) return face_flag