def run(self): started = time.time() while True: curframe = cv.QueryFrame(self.capture) instant = time.time() #Get timestamp o the frame self.processImage(curframe) #Process the image if not self.isRecording: if self.somethingHasMoved(): self.trigger_time = instant #Update the trigger_time if instant > started + 5: #Wait 5 second after the webcam start for luminosity adjusting etc.. print("Something is moving !") if self.doRecord: #set isRecording=True only if we record a video self.isRecording = True else: if instant >= self.trigger_time + 10: #Record during 10 seconds print("Stop recording") self.isRecording = False else: cv.PutText(curframe, datetime.now().strftime("%b %d, %H:%M:%S"), (25, 30), self.font, 0) #Put date on the frame cv.WriteFrame(self.writer, curframe) #Write the frame if self.show: cv.ShowImage("Image", curframe) cv.ShowImage("Res", self.res) cv.Copy(self.frame2gray, self.frame1gray) c = cv.WaitKey(1) if c == 27 or c == 1048603: #Break if user enters 'Esc'. break
def __init__(self, img0): self.thresh1 = 255 self.thresh2 = 30 self.level =4 self.storage = cv.CreateMemStorage() cv.NamedWindow("Source", 0) cv.ShowImage("Source", img0) cv.NamedWindow("Segmentation", 0) cv.CreateTrackbar("Thresh1", "Segmentation", self.thresh1, 255, self.set_thresh1) cv.CreateTrackbar("Thresh2", "Segmentation", self.thresh2, 255, self.set_thresh2) self.image0 = cv.CloneImage(img0) self.image1 = cv.CloneImage(img0) cv.ShowImage("Segmentation", self.image1)
def show_detector(): image = frame_convert.video_cv(freenect.sync_get_video()[0]); # cascade classifiers face_cascade = cv2.CascadeClassifier('opencv_data/haarcascades/haarcascade_frontalface_default.xml') eye_cascade = cv2.CascadeClassifier('opencv_data/haarcascades/haarcascade_eye.xml') # convert image to grayscale to use it with classifers gray = cv2.cvtColor(cv2array(image), cv2.COLOR_BGR2GRAY); # save previous image and use copy img = image; # detect and highlight faces faces = face_cascade.detectMultiScale(gray, 1.3, 5); for (x,y,w,h) in faces: cv.Rectangle(img,(x,y),(x+w,y+h),(0,0,255),2) # detect and highlight eyes eyes = eye_cascade.detectMultiScale(gray) for (ex,ey,ew,eh) in eyes: cv.Rectangle(img,(ex,ey),(ex+ew,ey+eh),(0,255,0),2) # show detector window cv.ShowImage('Detector', img)
def run(self): started = time.time() while True: currentframe = cv.QueryFrame(self.capture) instant = time.time() #Get timestamp o the frame self.processImage(currentframe) #Process the image if not self.isRecording: if self.somethingHasMoved(): self.trigger_time = instant #Update the trigger_time if instant > started + 10: #Wait 5 second after the webcam start for luminosity adjusting etc.. print "Something is moving !" if self.doRecord: #set isRecording=True only if we record a video self.isRecording = True cv.DrawContours(currentframe, self.currentcontours, (0, 0, 255), (0, 255, 0), 1, 2, cv.CV_FILLED) else: if instant >= self.trigger_time + 10: #Record during 10 seconds print "Stop recording" self.isRecording = False else: cv.PutText(currentframe, datetime.now().strftime("%b %d, %H:%M:%S"), (25, 30), self.font, 0) #Put date on the frame cv.WriteFrame(self.writer, currentframe) #Write the frame if self.show: cv.ShowImage("Image", currentframe) c = cv.WaitKey(1) % 0x100 if c == 27 or c == 10: #Break if user enters 'Esc'. break
def maxValueGarylize(image): grayimg = cv2.CreateImage(cv2.GetSize(image), image.depth, 1) for i in range(image.height): for j in range(image.width): grayimg[i, j] = max(image[i, j][0], image[i, j][1], image[i, j][2]) # cv.ShowImage('srcImage', image) cv2.ShowImage('maxGrayImage', grayimg)
def scanner_procces(frame, set_zbar): set_width = 100.0 / 100 set_height = 90.0 / 100 coord_x = int(frame.width * (1 - set_width) / 2) coord_y = int(frame.height * (1 - set_height) / 2) width = int(frame.width * set_width) height = int(frame.height * set_height) get_sub = cv.GetSubRect(frame, (coord_x + 1, coord_y + 1, width - 1, height - 1)) cv.Rectangle(frame, (coord_x, coord_y), (coord_x + width, coord_y + height), (255, 0, 0)) cm_im = cv.CreateImage((get_sub.width, get_sub.height), cv.IPL_DEPTH_8U, 1) cv.ConvertImage(get_sub, cm_im) image = zbar.Image(cm_im.width, cm_im.height, 'Y800', cm_im.tostring()) set_zbar.scan(image) for symbol in image: print '\033[1;32mResult : %s symbol "%s" \033[1;m' % (symbol.type, symbol.data) cv.ShowImage("webcame", frame) #cv.ShowImage("webcame2", get_sub) cv.WaitKey(10)
def show_shapes(shapes): """ Function to show all of the shapes which are passed to it """ cv.NamedWindow("Shape Model", cv.CV_WINDOW_AUTOSIZE) # Get size for the window max_x = int(max([pt.x for shape in shapes for pt in shape.pts])) max_y = int(max([pt.y for shape in shapes for pt in shape.pts])) min_x = int(min([pt.x for shape in shapes for pt in shape.pts])) min_y = int(min([pt.y for shape in shapes for pt in shape.pts])) i = cv.CreateImage((max_x-min_x+20, max_y-min_y+20), cv.IPL_DEPTH_8U, 3) cv.Set(i, (0, 0, 0)) for shape in shapes: r = randint(0, 255) g = randint(0, 255) b = randint(0, 255) #r = 0 #g = 0 #b = 0 for pt_num, pt in enumerate(shape.pts): # Draw normals #norm = shape.get_normal_to_point(pt_num) #cv.Line(i,(pt.x-min_x,pt.y-min_y), \ # (norm[0]*10 + pt.x-min_x, norm[1]*10 + pt.y-min_y), (r, g, b)) cv.Circle(i, (int(pt.x-min_x), int(pt.y-min_y)), 2, (r, g, b), -1) cv.ShowImage("Shape Model",i)
def grab_images(video_file, frame_inc=100, delay=100): """ Walks through the entire video and save image for each increment """ my_video = init_video(video_file) if my_video != None: # Display the video and save evry increment frames cpt = 0 img = cv2.QueryFrame(my_video) if img != None: cv2.NamedWindow("Vid", cv2.CV_WINDOW_AUTOSIZE) else: return None nFrames = int( cv2.GetCaptureProperty(my_video, cv2.CV_CAP_PROP_FRAME_COUNT)) while cpt < nFrames: for ii in range(frame_inc): img = cv2.QueryFrame(my_video) cpt += 1 cv2.ShowImage("Vid", img) out_name = "" + str(cpt) + ".jpg" cv2.SaveImage(out_name, img) print out_name, str(nFrames) cv2.WaitKey(delay) else: return None
def display_video(my_video, frame_inc=100, delay=100): """ Displays frames of the video in a dumb way. Used to see if everything is working fine my_video = cv2Capture object frame_inc = Nmber of increments between each frame displayed delay = time delay between each image """ cpt = 0 img = cv2.QueryFrame(my_video) if img != None: cv2.NamedWindow("Vid", cv2.CV_WINDOW_AUTOSIZE) else: return None nFrames = int(cv2.GetCaptureProperty(my_video, cv2.CV_CAP_PROP_FRAME_COUNT)) while cpt < nFrames: for ii in range(frame_inc): img = cv2.QueryFrame(my_video) cpt + 1 cv2.ShowImage("Vid", img) cv2.WaitKey(delay)
def display_img(img, delay=1000): """ One liner that displays the given image on screen """ cv2.NamedWindow("Vid", cv2.CV_WINDOW_AUTOSIZE) cv2.ShowImage("Vid", img) cv2.WaitKey(delay)
def redraw(): global draging global has_roi global roi_x0 global roi_y0 global cur_mouse_x global cur_mouse_y #Redraw ROI selection image2 = cv.CloneImage(current_image) # redraw old rect pen_width = 4 if rect_table.has_key(current_img_file_name): rects_in_table = rect_table[current_img_file_name] for r in rects_in_table: cv.Rectangle(image2, (r[0], r[1]), (r[0] + r[2], r[1] + r[3]), cv.CV_RGB(0, 255, 0), pen_width) # redraw new rect if has_roi: cv.Rectangle(image2, (roi_x0, roi_y0), (cur_mouse_x, cur_mouse_y), cv.CV_RGB(255, 0, 255), pen_width) # draw background if current_img_file_name in background_files: cv.Line(image2, (0, 0), (image2.width, image2.height), cv.CV_RGB(255, 0, 0)) cv.Line(image2, (0, image2.height), (image2.width, 0), cv.CV_RGB(255, 0, 0)) cv.ShowImage(window_name, image2)
def weightedAverageValueGary(image): grayimg = cv2.CreateImage(cv2.GetSize(image), image.depth, 1) for i in range(image.height): for j in range(image.width): grayimg[i, j] = 0.3 * image[i, j][0] + 0.59 * image[ i, j][1] + 0.11 * image[i, j][2] # cv2.ShowImage('srcImage', image) cv2.ShowImage('weightedGrayImage', grayimg)
def averageValueGary(image): grayimg = cv2.CreateImage(cv2.GetSize(image), image.depth, 1) for i in range(image.height): for j in range(image.width): grayimg[i, j] = (image[i, j][0] + image[i, j][1] + image[i, j][2]) / 3 # cv2.ShowImage('srcImage', image) cv2.ShowImage('averageGrayImage', grayimg)
def draw_model_fitter(f): cv.NamedWindow("Model Fitter", cv.CV_WINDOW_AUTOSIZE) # Copy image i = cv.CreateImage(cv.GetSize(f.image), f.image.depth, 3) cv.Copy(f.image, i) for pt_num, pt in enumerate(f.shape.pts): # Draw normals cv.Circle(i, (int(pt.x), int(pt.y)), 2, (0,0,0), -1) cv.ShowImage("Shape Model",i) cv.WaitKey()
def display_rgb(dev, data, timestamp): global keep_running cv.Image = frame_convert.video_cv(data) img = cv.CreateImage(cv.GetSize(cv.Image), cv.IPL_DEPTH_16S, 3) cv.ShowImage('RGB', cv.Image) for x in range(1, 5): name = "img%d" % (x) cv.SaveImage('name.png', cv.Image) time.sleep(1) if cv.WaitKey(10) == 27: keep_running = False
def show_images(images): """ Shows all images in a window""" if images == None: logging.error( 'Cannot Show Images (No image saved). Image-Type: %s (tools.py)' % str(type(images).__name__)) elif type(images).__name__ == 'list': for i in range(len(images)): print type(images[i]) if type(images[i]).__name__ == 'ndarray': tmpimage = [] tmpimage[i] = array2cv(images[i]) cv.ShowImage("Image", tmpimage[i]) if cv.WaitKey() == 27: cv.DestroyWindow("Image") else: cv.ShowImage("Image", images[i]) if cv.WaitKey() == 27: cv.DestroyWindow("Image") elif type(images).__name__ == 'cvmat': cv.ShowImage("Image", images) if cv.WaitKey() == 27: cv.DestroyWindow("Image") elif type(images).__name__ == 'iplimage': cv.ShowImage("Image", images) if cv.WaitKey() == 27: cv.DestroyWindow("Image") elif type(images).__name__ == 'ndarray': images = array2cv(images) cv.ShowImage("Image", images) if cv.WaitKey() == 27: cv.DestroyWindow("test") elif type(images).__name__ == 'str': logging.error( 'TypeError: Cannot Show Images (No image saved?). Image-Type: %s (tools.py)' % str(type(images).__name__)) else: logging.error( 'TypeError: Cannot Show Images. Image-Type: %s (tools.py)' % str(type(images).__name__))
def show_threshold(): global threshold global current_depth depth, timestamp = freenect.sync_get_depth() depth = 255 * np.logical_and(depth >= current_depth - threshold, depth <= current_depth + threshold) depth = depth.astype(np.uint8) threshold_image = cv.CreateImageHeader((depth.shape[1], depth.shape[0]), cv.IPL_DEPTH_8U, 1) cv.SetData(threshold_image, depth.tostring(), depth.dtype.itemsize * depth.shape[1]) cv.ShowImage('Threshold', resize_image(threshold_image))
def run(self): while True: img = self.capture.read() #blur the source image to reduce color noise cv2.blur(img, img, 3) #convert the image to hsv(Hue, Saturation, Value) so its #easier to determine the color to track(hue) hsv_img = cv2.CreateImage(cv2.GetSize(img), 8, 3) cv2.CvtColor(img, hsv_img, cv2.CV_BGR2HSV) #limit all pixels that don't match our criteria, in this case we are #looking for purple but if you want you can adjust the first value in #both turples which is the hue range(120,140). OpenCV uses 0-180 as #a hue range for the HSV color model greenLower = (20, 190, 165) greenUpper = (30, 225, 220) thresholded_img = cv2.CreateImage(cv2.GetSize(hsv_img), 8, 1) cv2.InRangeS(hsv_img, greenLower, greenUpper, thresholded_img) #determine the objects moments and check that the area is large #enough to be our object moments = cv2.Moments(thresholded_img, 0) area = cv2.GetCentralMoment(moments, 0, 0) #there can be noise in the video so ignore objects with small areas if (area > 100000): #determine the x and y coordinates of the center of the object #we are tracking by dividing the 1, 0 and 0, 1 moments by the area x = cv2.GetSpatialMoment(moments, 1, 0) / area y = cv2.GetSpatialMoment(moments, 0, 1) / area #print 'x: ' + str(x) + ' y: ' + str(y) + ' area: ' + str(area) #create an overlay to mark the center of the tracked object overlay = cv2.CreateImage(cv2.GetSize(img), 8, 3) cv2.Circle(overlay, (x, y), 2, (255, 255, 255), 20) cv2.Add(img, overlay, img) #add the thresholded image back to the img so we can see what was #left after it was applied cv2.Merge(thresholded_img, None, None, None, img) #display the image cv2.ShowImage(color_tracker_window, img) if cv2.WaitKey(10) == 27: break
def detect_and_draw(self, img): # allocate temporary images gray = cv.CreateImage((img.width, img.height), 8, 1) small_img = cv.CreateImage((cv.Round(img.width / self.image_scale), cv.Round(img.height / self.image_scale)), 8, 1) # convert color input image to grayscale cv.CvtColor(img, gray, cv.CV_BGR2GRAY) # scale input image for faster processing cv.Resize(gray, small_img, cv.CV_INTER_LINEAR) cv.EqualizeHist(small_img, small_img) if self.cascade: t = cv.GetTickCount() faces = cv.HaarDetectObjects(small_img, self.cascade, cv.CreateMemStorage(0), self.haar_scale, self.min_neighbors, self.haar_flags, self.min_size) t = cv.GetTickCount() - t # print "time taken for detection = %gms" % (t/(cv.GetTickFrequency()*1000.)) if faces: face_found = True for ((x, y, w, h), n) in faces: # the input to cv.HaarDetectObjects was resized, so scale the # bounding box of each face and convert it to two CvPoints pt1 = (int(x * self.image_scale), int(y * self.image_scale)) pt2 = (int((x + w) * self.image_scale), int((y + h) * self.image_scale)) cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0) else: face_found = False cv.ShowImage("video", img) return face_found
def get_hands(image): """ Returns the hand as white on black. Uses value in HSV to determine hands.""" size = cv2.GetSize(image) hsv = cv2.CreateImage(size, 8, 3) hue = cv2.CreateImage(size, 8, 1) sat = cv2.CreateImage(size, 8, 1) val = cv2.CreateImage(size, 8, 1) hands = cv2.CreateImage(size, 8, 1) cv2.Cv2tColor(image, hsv, cv2.CV2_BGR2HSV) cv2.Split(hsv, hue, sat, val, None) cv2.ShowImage('Live', image) cv2.ShowImage('Hue', hue) cv2.ShowImage('Saturation', sat) cv2.Threshold( hue, hue, 10, 255, cv2.CV2_THRESH_TOZERO) #set to 0 if <= 10, otherwise leave as is cv2.Threshold( hue, hue, 244, 255, cv2.CV2_THRESH_TOZERO_INV) #set to 0 if > 244, otherwise leave as is cv2.Threshold(hue, hue, 0, 255, cv2.CV2_THRESH_BINARY_INV) #set to 255 if = 0, otherwise 0 cv2.Threshold( sat, sat, 64, 255, cv2.CV2_THRESH_TOZERO) #set to 0 if <= 64, otherwise leave as is cv2.EqualizeHist(sat, sat) cv2.Threshold(sat, sat, 64, 255, cv2.CV2_THRESH_BINARY) #set to 0 if <= 64, otherwise 255 cv2.ShowImage('Saturation threshold', sat) cv2.ShowImage('Hue threshold', hue) cv2.Mul(hue, sat, hands) #smooth + threshold to filter noise # cv2.Smooth(hands, hands, smoothtype=cv2.CV2_GAUSSIAN, param1=13, param2=13) # cv2.Threshold(hands, hands, 200, 255, cv2.CV2_THRESH_BINARY) cv2.ShowImage('Hands', hands) return hands
for metadata, binary_data in data: # Display the received image shape = metadata['shape'] nchannels = metadata['nChannels'] depth = metadata['depth'] digest = metadata['md5'] h = hashlib.md5() h.update(binary_data) dig = h.hexdigest() if dig == digest: print "Correct MD5 sum on binary data: %s" % dig else: print "Incorrect MD5 sum: %s (should be %s)" % (dig, digest) img = cv.CreateImageHeader(shape, depth, nchannels) cv.SetData(img, binary_data) cv.ShowImage(name, img) cv.WaitKey(30) if not server: # Send an image to the server img = random.choice(images) metadata = { "shape": (img.width, img.height), "nChannels": img.nChannels, "depth": img.depth } binary_data = img.tostring() h = hashlib.md5() h.update(binary_data) metadata['md5'] = h.hexdigest() print "Sending image with checksum: %s" % metadata['md5']
for (rho, theta) in lines[:100]: a = math.cos(theta) #Calculate orientation in order to print them b = math.sin(theta) x0 = a * rho y0 = b * rho pt1 = (cv.Round(x0 + 1000 * (-b)), cv.Round(y0 + 1000 * (a))) pt2 = (cv.Round(x0 - 1000 * (-b)), cv.Round(y0 - 1000 * (a))) cv.Line(color_dst_standard, pt1, pt2, cv.CV_RGB(255, 0, 0), 2, 4) #Draw the line #---- Probabilistic ---- color_dst_proba = cv.CreateImage(cv.GetSize(im), 8, 3) cv.CvtColor(im, color_dst_proba, cv.CV_GRAY2BGR) # idem rho = 1 theta = pi / 180 thresh = 50 minLength = 120 # Values can be changed approximately to fit your image edges maxGap = 20 lines = cv.HoughLines2(dst, cv.CreateMemStorage(0), cv.CV_HOUGH_PROBABILISTIC, rho, theta, thresh, minLength, maxGap) for line in lines: cv.Line(color_dst_proba, line[0], line[1], cv.CV_RGB(255, 0, 0), 2, 8) cv.ShowImage('Image', im) cv.ShowImage("Cannied", dst) cv.ShowImage("Hough Standard", color_dst_standard) cv.ShowImage("Hough Probabilistic", color_dst_proba) cv.WaitKey(0)
def show_scaled(win, img): min, max, pt1, pt2 = cv2.MinMaxLoc(img) cols, rows = img.shape[:2] tmp = cv2.CreateMat(rows, cols, cv2.CV_32FC1) cv2.Scale(img, tmp, 1.0 / (max - min), 1.0 * (-min) / (max - min)) cv2.ShowImage(win, tmp)
c = (float(imgSize[0]/2.0), float(imgSize[1]/2.0)) imgRes = cv.CreateImage((rad*3, int(360)), 8, 3) #cv.LogPolar(image,imgRes,c,50.0, cv.CV_INTER_LINEAR+cv.CV_WARP_FILL_OUTLIERS) cv.LogPolar(image,imgRes,c,60.0, cv.CV_INTER_LINEAR+cv.CV_WARP_FILL_OUTLIERS) return (imgRes) # Window creation for showing input, output cv.NamedWindow("input", cv.CV_WINDOW_AUTOSIZE) cv.NamedWindow("output", cv.CV_WINDOW_AUTOSIZE) cv.NamedWindow("normalized", cv.CV_WINDOW_AUTOSIZE) eyesList = os.listdir('images/eyes') key = 0 while True: eye = getNewEye(eyesList) frame = cv.LoadImage("images/eyes/"+eye) iris = cv.CloneImage(frame) output = getPupil(frame) iris = getIris(output) cv.ShowImage("input", frame) cv.ShowImage("output", iris) normImg = cv.CloneImage(iris) normImg = getPolar2CartImg(iris,radius) cv.ShowImage("normalized", normImg) key = cv.WaitKey(3000) # seems like Esc with NumLck equals 1048603 if (key == 27 or key == 1048603): break cv.DestroyAllWindows()
def detect_and_draw(img, cascade): t = cv2.GetTickCount() ## start counter cv2.CvtColor(img, gray, cv2.CV_BGR2GRAY) cv2.Resize(gray, small_img, cv2.CV_INTER_LINEAR) #Ages all trackedFaces for f in trackedFaces: f.updateLife() #Remove expired faces for f in trackedFaces: if (f.isTooOld()): trackedFaces.remove(f) faces = cv2.HaarDetectObjects(small_img, cascade, storage, haar_scale, min_neighbors, haar_flags, min_size) drawline = 0 if faces: #found a face for ((x, y, w, h), n) in faces: matchedFace = False pt1 = (int(x * image_scale), int(y * image_scale)) pt2 = (int((x + w) * image_scale), int((y + h) * image_scale)) pt3 = (int(x * image_scale) + int( ((x + w) * image_scale - x * image_scale) / 3), int(y * image_scale)) pt4 = (int((x + w) * image_scale) - int( ((x + w) * image_scale - x * image_scale) / 3), int((y * image_scale) + int(( (y + h) * image_scale) - int(y * image_scale)) / 3)) #check if there are trackedFaces if (len(trackedFaces) > 0): #each face being tracked for f in trackedFaces: #the face is found (small movement) if ((abs(f.xpt - pt1[0]) < FACE_MAX_MOVEMENT) and (abs(f.ypt - pt1[1]) < FACE_MAX_MOVEMENT)): matchedFace = True f.updateFace(int(w * image_scale), int(h * image_scale), pt1[0], pt1[1]) mf = f break #if face not found, add a new face if (matchedFace == False): f = Face(0, int(w * image_scale), int(h * image_scale), pt1[0], pt1[1], 0) trackedFaces.append(f) mf = f #No tracked faces: adding one else: f = Face(0, int(w * image_scale), int(h * image_scale), pt1[0], pt1[1], 0) trackedFaces.append(f) mf = f #where to draw face and properties if (mf.age > 5): #draw attention line lnpt1 = (int(mf.xpt * scale), int(mf.ypt * scale - 5) - 5) if (mf.age > mf.width): lnpt2 = (int(mf.xpt * scale + mf.width), int(mf.ypt * scale - 5)) else: lnpt2 = (int(mf.xpt * scale + mf.age), int(mf.ypt * scale - 5)) cv2.Rectangle(img, lnpt1, lnpt2, RED, 4, 8, 0) ## drawing bolded attention line ### draw eyes cv2.Rectangle(img, mf.eyeLeft1, mf.eyeLeft2, MAGENTA, 3, 8, 0) cv2.Rectangle(img, mf.eyeRight1, mf.eyeRight2, MAGENTA, 3, 8, 0) # ### draw mouth cv2.Rectangle(img, mf.mouthTopLeft, mf.mouthBotRight, ORANGE, 3, 8, 0) # ### draw face cv2.Rectangle(img, pt1, pt2, getColor(mf), 3, 8, 0) #cv2.Rectangle( img, pt3, pt4, MAGENTA, 1, 8, 0 ) #forehead drawline = mf.age if (CAPTURING): saveAsJPG(img) if (osName == "nt"): cv2.Flip(img, img, 0) cv2.ShowImage('Camera', img) t = cv2.GetTickCount() - t ## counter for FPS print("%i fps." % (cv2.GetTickFrequency() * 1000000. / t)) ## print FPS
from time import time as timer import tensorflow as tf import numpy as np import sys import cv2 as cv import os vidFile = cv.CaptureFromFile('Test_Avi') nFrames = int(cv.GetCaptureProperty(vidFile, cv.CV_CAP_PROP_FRAME_COUNT)) fps = cv.GetCaptureProperty(vidFile, cv.CV_CAP_PROP_FPS) waitPerFrameInMillisec = int(1 / fps * 1000 / 1) print('Num. Frames = ', nFrames) print('Frame Rate = ', fps, ' frames per sec') for f in xrange(nFrames): frameImg = cv.QueryFrame(vidFile) cv.ShowImage("My Video Window", frameImg) cv.WaitKey(waitPerFrameInMillisec) # When playing is done, delete the window # NOTE: this step is not strictly necessary, # when the script terminates it will close all windows it owns anyways cv.DestroyWindow("My Video Window")
import cv2 as cv orig = cv.imread('lena.png') #im = cv.CreateImage(cv.GetSize(orig), 8, 1) im = cv.cvtColor(orig,cv.COLOR_BGR2GRAY) #cv.CvtColor(orig, im, cv.CV_BGR2GRAY) #Keep the original in colour to draw contours in the end cv.Threshold(im, im, 128, 255, cv.CV_THRESH_BINARY) cv.ShowImage("Threshold 1", im) element = cv.CreateStructuringElementEx(5*2+1, 5*2+1, 5, 5, cv.CV_SHAPE_RECT) cv.MorphologyEx(im, im, None, element, cv.CV_MOP_OPEN) #Open and close to make appear contours cv.MorphologyEx(im, im, None, element, cv.CV_MOP_CLOSE) cv.Threshold(im, im, 128, 255, cv.CV_THRESH_BINARY_INV) cv.ShowImage("After MorphologyEx", im) # -------------------------------- vals = cv.CloneImage(im) #Make a clone because FindContours can modify the image contours=cv.FindContours(vals, cv.CreateMemStorage(0), cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_SIMPLE, (0,0)) _red = (0, 0, 255); #Red for external contours _green = (0, 255, 0);# Gren internal contours levels=2 #1 contours drawn, 2 internal contours as well, 3 ... cv.DrawContours (orig, contours, _red, _green, levels, 2, cv.CV_FILLED) #Draw contours on the colour image cv.ShowImage("Image", orig) cv.WaitKey(0)
def run(self): # Capture first frame to get size frame = cv.QueryFrame(self.capture) frame_size = cv.GetSize(frame) width = frame.width height = frame.height surface = width * height #Surface area of the image cursurface = 0 #Hold the current surface that have changed grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1) moving_average = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3) difference = None while True: color_image = cv.QueryFrame(self.capture) cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0) #Remove false positives if not difference: #For the first time put values in difference, temp and moving_average difference = cv.CloneImage(color_image) temp = cv.CloneImage(color_image) cv.ConvertScale(color_image, moving_average, 1.0, 0.0) else: cv.RunningAvg(color_image, moving_average, 0.020, None) #Compute the average # Convert the scale of the moving average. cv.ConvertScale(moving_average, temp, 1.0, 0.0) # Minus the current frame from the moving average. cv.AbsDiff(color_image, temp, difference) #Convert the image so that it can be thresholded cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY) cv.Threshold(grey_image, grey_image, 70, 255, cv.CV_THRESH_BINARY) cv.Dilate(grey_image, grey_image, None, 18) #to get object blobs cv.Erode(grey_image, grey_image, None, 10) # Find contours storage = cv.CreateMemStorage(0) contours = cv.FindContours(grey_image, storage, cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_SIMPLE) backcontours = contours #Save contours while contours: #For all contours compute the area cursurface += cv.ContourArea(contours) contours = contours.h_next() avg = ( cursurface * 100 ) / surface #Calculate the average of contour area on the total size if avg > self.ceil: print("Something is moving !") #print avg,"%" cursurface = 0 #Put back the current surface to 0 #Draw the contours on the image _red = (0, 0, 255) #Red for external contours _green = (0, 255, 0) # Gren internal contours levels = 1 #1 contours drawn, 2 internal contours as well, 3 ... cv.DrawContours(color_image, backcontours, _red, _green, levels, 2, cv.CV_FILLED) cv.ShowImage("Target", color_image) # Listen for ESC or ENTER key c = cv.WaitKey(7) % 0x100 if c == 27 or c == 10: break
cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 1280) cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 720) frame = cv.QueryFrame(capture) test = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.NamedWindow("output") previous_x = 0 previous_y = 0 while (1): frame = cv.QueryFrame(capture) cv.Flip(frame, frame, 1) # we make all drawings on imdraw. imdraw = cv.CreateImage(cv.GetSize(frame), 8, 3) # we get coordinates from imgyellowthresh imgyellowthresh = getthresholdedimg(frame) # eroding removes small noises cv.Erode(imgyellowthresh, imgyellowthresh, None, 1) (leftmost, rightmost, topmost, bottommost) = getpositions(imgyellowthresh) if (leftmost - rightmost != 0) or (topmost - bottommost != 0): lastx = posx lasty = posy posx = cv.Round((rightmost + leftmost) / 2) posy = cv.Round((bottommost + topmost) / 2) if lastx != 0 and lasty != 0: win32api.SetCursorPos((posx, posy)) cv.Add(test, imdraw, test) cv.ShowImage("output", test) if cv.WaitKey(10) >= 0: break cv.DestroyWindow("output")
def on_segment(self): comp = cv.PyrSegmentation(self.image0, self.image1, self.storage, \ self.level, self.thresh1+1, self.thresh2+1) cv.ShowImage("Segmentation", self.image1)