def eyeRemove(self, region): """ Crops an eye from the facePhoto and returns it as a seperate photo This method takes in a region which is interpreted to be a region representing and eye and crops the eye out. It then returns the cropped photo Args: region region - a region representing the eye Return: cv2.cv.cvmat eyePhoto - a photo of just the eye """ # really takes in four points per region crop = (region[0],region[1], region[2] - region[0], region[3] - region[1]) if DEBUG: print "Region passed to eye remove: " + str(region) print "And here's crop: " + str(crop) print "Before crop we have type: " + str(type(self.facePhoto)) print self.facePhoto cv.ShowImage("We're cropping", self.facePhoto) cv.WaitKey(0) cv.DestroyWindow("We're cropping") eye = cv.GetSubRect(self.facePhoto, crop) #eye = face.crop(region) if DEBUG: print "After crop we have type: " + str(type(eye)) cv.ShowImage("Cropped", eye) cv.WaitKey(0) cv.DestroyWindow("Cropped") return eye
def salir(self): self.ctimer.stop() cv.DestroyWindow(self.window1) cv.DestroyWindow(self.window2) cv.DestroyWindow(self.window3) cv.DestroyWindow(self.window4) self.deleteLater()
def collectCheckboardPoints(self): self.pointsArray1 = np.zeros((nimages, num_pts, 2)) self.pointsArray2 = np.zeros((nimages, num_pts, 2)) cv.NamedWindow("camera") cv.NamedWindow("camera2") i = 0 while True : frame = cv.QueryFrame(self.video1) # print type(frame) # [rows1, cols] = cv.GetSize(frame) image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, frame.nChannels) cv.Copy(frame, image) cv.ShowImage("camera", frame) grayScaleFullImage = cv.CreateImage((image.width, image.height), 8, 1) cv.CvtColor(image, grayScaleFullImage, cv.CV_BGR2GRAY) frame2 = cv.QueryFrame(self.video2) image2 = cv.CreateImage(cv.GetSize(frame2), cv.IPL_DEPTH_8U, frame2.nChannels) cv.Copy(frame2, image2) cv.ShowImage("camera2", frame2) grayScaleFullImage2 = cv.CreateImage((image2.width, image2.height), 8, 1) cv.CvtColor(image2, grayScaleFullImage2, cv.CV_BGR2GRAY) found, points = cv.FindChessboardCorners(grayScaleFullImage, dims, cv.CV_CALIB_CB_ADAPTIVE_THRESH) if found != 0: print "found chess board " + str(np.shape(points)) cv.DrawChessboardCorners(image, dims, points, found) cv.ShowImage("win2", image) cv.WaitKey(2) # else: # print "no chess" found2, points2 = cv.FindChessboardCorners(grayScaleFullImage2, dims, cv.CV_CALIB_CB_ADAPTIVE_THRESH) if found2 != 0: print "found chess board2" cv.DrawChessboardCorners(image2, dims, points2, found2) cv.ShowImage("win3", image2) cv.WaitKey(2) if found and found2: print "entered here!!!!!" self.pointsArray1[i, :] = points self.pointsArray2[i, :] = points2 i = i + 1 if i == nimages: self.size = cv.GetSize(image) break if cv.WaitKey(10) == 27: break cv.DestroyWindow("Camera 1") cv.DestroyWindow("Camera 2")
def pupilRemove(image, region): """ Crops the eye photo to show only the pupil and then returns it. Args: tuple region - the coordinates of the pupil circle in the form (centerX, centerY, radius) Return: photo - TODO: I'm not sure of the type """ # Converting to (topLeftX, topLeftY, width, length) if region[0] - region[2] < 0: topLeftX = 0 else: topLeftX = region[0] - region[2] if region[1] - region[2] < 0: topLeftY = 0 else: topLeftY = region[1] - region[2] if region[2] < 0: width = 0 else: width = region[2] + region[2] if region[2] < 0: length = 0 else: length = region[2] + region[2] crop = (topLeftX, topLeftY, width, length) if DEBUG: print "Region passed to pupil remove: " + str(region) print "And here's crop: " + str(crop) print "Before crop we have type: " + str(type(image)) print image cv.ShowImage("We're cropping", image) cv.WaitKey(0) cv.DestroyWindow("We're cropping") if crop[0] < 0: crop[0] = 0 if crop[1] < 0: crop[1] = 0 if crop[2] < 0: crop[2] = abs(crop[2]) else: pupil = cv.GetSubRect(image, crop) if DEBUG: print "After crop we have type: " + str(type(pupil)) cv.ShowImage("Cropped", pupil) cv.WaitKey(0) cv.DestroyWindow("Cropped") return pupil return None
def display_grid(img): # Make dot grid #[col_ind, row_ind] = np.meshgrid(range(0,N+320,320), range(0,M+180,180)) [col_ind, row_ind] = np.meshgrid( np.linspace(120, 1800, 4).astype('int'), np.linspace(60, 1020, 4).astype('int')) pt_list = np.vstack((row_ind.flatten(), col_ind.flatten())).T i = 0 while 1: pt = pt_list[i, :] print('pt: ' + str(pt)) DOT_IMG = copy.deepcopy(img) # Display blink dot ret_flag = blink_dot_grid(DOT_IMG, (pt[1], pt[0]), 5) key = cv.WaitKey(500) i = i + 1 if (ret_flag == 1): break if (key == 27): break if (i == pt_list.shape[0]): break cv.DestroyWindow("CALIB_SCREEN")
def findEyes(self): """ Detects eyes in a photo and initializes relevant attributes Uses opencv libarary methods to detect a face and then detect the eyes in that face. If there are exactly two eye regions found it populates the region attributes. If not exactly two eye regions are found the method returns false. Args: None Return: bool - True if there were no issues. False for any error """ #imcolor = cv.LoadImage(self.path) imcolor = self.facePhoto #Path setups cwd = os.path.dirname(os.path.abspath(sys.argv[0])) cwd += "/opencv/haarcascades/" frontalface = cwd + "haarcascade_frontalface_default.xml" eye = cwd + "haarcascade_eye.xml" faceCascade = cv.Load(frontalface) eyeCascade = cv.Load(eye) haarEyes = cv.Load(eye) storage = cv.CreateMemStorage() detectedEyes = cv.HaarDetectObjects(imcolor,haarEyes,storage) if DEBUG: print "detectedEyes = " + str(detectedEyes) if len(detectedEyes) == 2: if DEBUG: # Draw the rectangle cv.Rectangle(imcolor,(detectedEyes[0][0][0], detectedEyes[0][0][1]), (detectedEyes[0][0][0] + detectedEyes[0][0][2], detectedEyes[0][0][1] + detectedEyes[0][0][3]),cv.RGB(155,155,200),2) cv.Rectangle(imcolor,(detectedEyes[1][0][0], detectedEyes[1][0][1]), (detectedEyes[1][0][0] + detectedEyes[1][0][2], detectedEyes[1][0][1] + detectedEyes[1][0][3]),cv.RGB(155,155,200),2) cv.ShowImage("Face with eyes",imcolor) cv.WaitKey(0) cv.DestroyWindow("Face with eyes") left = (detectedEyes[0][0][0], detectedEyes[0][0][1], detectedEyes[0][0][0] + detectedEyes[0][0][2], detectedEyes[0][0][1] + detectedEyes[0][0][3]) right = (detectedEyes[1][0][0], detectedEyes[1][0][1], detectedEyes[1][0][0] + detectedEyes[1][0][2], detectedEyes[1][0][1] + detectedEyes[1][0][3]) if DEBUG: print "left: " + str(left) print "right: " + str(right) self.setEyes(left, right) return True if DEBUG: print "Found more or less than 2 eyes, returning false" return False
def dilationofimage(): display(src_image, "Source Image") struc = cv.CreateStructuringElementEx(10, 10, 5, 5, cv.CV_SHAPE_RECT) cv.Dilate(src_image, dst_image, struc, 1) display(dst_image, "Dilation") cv.WaitKey(0) cv.DestroyWindow("Dilation")
def LoadDisplay(): img = cv.LoadImageM(k, cv.CV_LOAD_IMAGE_COLOR) print img cv.NamedWindow("LoadAndDisplay", cv.CV_WINDOW_AUTOSIZE) cv.ShowImage("LoadAndDisplay", img) cv.WaitKey(0) cv.DestroyWindow("LoadAndDisplay")
def process_symbol(self, symbol): if symbol.data == MESSAGE_BEGIN: self.start = True return True if symbol.data == HEADER_BEGIN: return True if 'LEN' in symbol.data: self.length = symbol.data.split(':')[1] click.secho('[*] The message will come in {0} parts'.format(self.length), fg='green') return True if 'HASH' in symbol.data: self.hash = symbol.data.split(':')[1] click.secho('[*] The message has hash: {0}'.format(self.hash), fg='green') return True if symbol.data == HEADER_END: if not self.length or not self.hash: raise Exception('Header read failed. No lengh or hash data.') return True if not self.start: raise Exception('Received message without proper Message Start Header') # Cleanup On Message End if symbol.data == MESSAGE_END: # integrity check! final_hash = hashlib.sha1(''.join(self.data)).hexdigest() if final_hash != self.hash: click.secho('[*] Warning! Hashcheck failed!', fg='red') click.secho('[*] Expected: {0}, got: {1}'.format(self.hash, final_hash), fg='red', bold=True) else: click.secho('[*] Data checksum check passed.', fg='green') cv.DestroyWindow(self.window_name) return False iteration, data = int(symbol.data.split(':')[0]), base64.b64decode(symbol.data.split(':')[1]) if iteration in self.received_iterations: return True else: self.received_iterations.append(iteration) if self.position != iteration: click.secho( '[*] Position lost! Transfer will fail! Expected {0} but got {1}'.format(self.position, iteration), fg='red') self.position = iteration self.position += 1 self.data = self.data + data click.secho('[*] {0}:{1}'.format(iteration, data), fg='green', bold=True) return True
def main(self): if not self.without_robot: self.last_time = time.time() print "[!!] capture white face first" while self.facenum < 6: if not self.without_robot and (time.time() - self.last_time > ROTATE_DELAY): self.save_color() frame = cv.QueryFrame(self.capture) if not frame: raise IOError, "Camera Error" cv.Resize(frame, self.img) cv.CvtColor(self.img, self.hsv, cv.CV_RGB2HSV) for iy in xrange(3): for ix in xrange(3): lefttop = (int(self.left_top[0] + self.one_side * 3 / 2 * ix), int(self.left_top[1] + self.one_side * 3 / 2 * iy)) rightbottom = (int(self.left_top[0] + self.one_side * 3 / 2 * ix + self.one_side), int(self.left_top[1] + self.one_side * 3 / 2 * iy + self.one_side)) col = cv.Avg(self.img[lefttop[1]:rightbottom[1], lefttop[0]:rightbottom[0]]) cv.Rectangle(self.img, lefttop, rightbottom, (0, 0, 0), cv.CV_FILLED) cv.Rectangle(self.img, (lefttop[0] + 3, lefttop[1] + 3), (rightbottom[0] - 3, rightbottom[1] - 3), col, cv.CV_FILLED) hsv_col = cv.Avg(self.hsv[lefttop[1]:rightbottom[1], lefttop[0]:rightbottom[0]]) #print hsv_col self.colors[iy][ix] = hsv_col[:] cv.ShowImage("cube_capture.py", self.img) c = cv.WaitKey(10) % 0x100 if c == 27: # ESC break elif self.without_robot and chr(c) == " ": self.save_color() #print self.colors_list if self.colors_list[-1]: r = self.process_colors() else: r = None cv.DestroyWindow("cube_capture.py") return r
def capture(key_value): capture = cv.CaptureFromCAM(0) key = key_value while True: img = cv.QueryFrame(capture) cv.ShowImage("camera", img) if key == 'capture': filename = "face.jpg" cv.SaveImage(filename, img) print "图片截取成功" break del (capture) cv.DestroyWindow("camera")
def Color_callibration(capture): vals = [] bgr = [] mini = [255, 255, 255] maxi = [0, 0, 0] cv.NamedWindow("BGR", 0) print 'Please Put Your color in the circular area.Press ESC to start Callibration:' while 1: image = cv.QueryFrame(capture) cv.Flip(image, image, 1) cv.Circle(image, (int(200), int(300)), 10, cv.CV_RGB(255, 255, 255), 4) cv.ShowImage("BGR", image) c = cv.WaitKey(33) if c == 27: break print 'Starting Callibration...Analyzing the Object...' for i in range(0, 100): image = cv.QueryFrame(capture) cv.Flip(image, image, 1) cv.Smooth(image, image, cv.CV_MEDIAN, 3, 0) imagehsv = cv.CreateImage(cv.GetSize(image), 8, 3) cv.CvtColor(image, imagehsv, cv.CV_BGR2YCrCb) vals = cv.Get2D(imagehsv, 300, 200) font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 1, 0, 2, 8) cv.PutText( image, " " + str(vals[0]) + "," + str(vals[1]) + "," + str(vals[2]), (200, 300), font, (55, 25, 255)) for j in range(0, 3): if (vals[j] < mini[j]): mini[j] = vals[j] if (vals[j] > maxi[j]): maxi[j] = vals[j] cv.Circle(image, (int(200), int(300)), 10, cv.CV_RGB(255, 255, 255), 4) cv.ShowImage("BGR", image) c = cv.WaitKey(33) if c == 27: break print 'Analyzation Completed' mini[0] -= 35 mini[1] -= 15 mini[2] -= 15 maxi[0] += 35 maxi[1] += 15 maxi[2] += 15 for i in range(0, 3): if (mini[i] < 0): mini[i] = 0 if (maxi[i] > 255): maxi[i] = 255 cv.DestroyWindow("BGR") bgr = (mini, maxi) return bgr
def display_pts(img, RAND_FLAG, pt_vect, label_dict): if (RAND_FLAG): # Random Pattern [col_ind, row_ind] = np.meshgrid( np.linspace(40, N - 40, 24).astype('int'), np.linspace(40, M - 40, 14).astype('int')) pt_list_orig = np.vstack((row_ind.flatten(), col_ind.flatten())).T #print(pt_list.shape) ind = range(0, pt_list_orig.shape[0]) #print(ind[0:10]) ind = np.random.permutation(ind) #print(ind[0:10]) pt_list = pt_list_orig[ind[pt_vect], :] #print('Original Point List: {}'.format(pt_list_orig[ind,:])) #out_file = open('./Train_Labels/orig_pts.pkl', 'wb') #pickle.dump(pt_list_orig[ind,:], out_file) #out_file.close() else: # Grid [col_ind, row_ind] = np.meshgrid( np.linspace(120, 1800, 4).astype('int'), np.linspace(60, 1020, 4).astype('int')) pt_list = np.vstack((row_ind.flatten(), col_ind.flatten())).T pt_list = pt_list[pt_vect] for i in range(0, pt_list.shape[0]): pt = pt_list[i, :] print('pt: ' + str(pt)) DOT_IMG = copy.deepcopy(img) # Display blinking dot blink_dot(DOT_IMG, (pt[1], pt[0]), pt_vect[i]) #, 5) key = cv.WaitKey(500) #i=i+1 #if(key == 27): # break #if(i == pt_list.shape[0]): #break; print('\n') #label_dict[pt_vect[i]] = pt cv.DestroyWindow("CALIB_SCREEN") for i in range(0, pt_list_orig.shape[0]): label_dict[ind[i]] = pt_list_orig[i, :]
def main(): # create windows for use later cv.NamedWindow("LaserDuckOut", 1) cv.NamedWindow("Theshold_IMG", 1) cv.NamedWindow("HSV Histogram", 1) # initiate camera capture = cv.CreateCameraCapture(0) # grab frame from camera while True: frame = cv.QueryFrame(capture) # cv.Flip(frame, frame, 1) hist = histogram(frame) img = thresholdImage(frame) img = erodeImage(img) findImageContour(img, frame) # Mark out sampling region for histogram cv.Rectangle(frame, (10, 10), (110, 110), (0, 255, 0), 1, 0) # outputs image to windows created previously cv.ShowImage("Threshold_IMG", img) cv.ShowImage("LaserDuckOut", frame) cv.ShowImage("HSV_Histogram", hist) if cv.WaitKey(10) >= 0: break cv.DestroyWindow("LaserDuckOut") cv.DestroyWindow("Threshold_IMG") cv.DestroyWindow("HSV_Histogram")
def drawrandline(): rand = Random() img = cv.CreateImage((700, 1000), 8, 3) cv.SetZero(img) cv.NamedWindow("RandomViewer", 1) for i in range(100): cv.Line(img, (rand.randrange(0, 700), rand.randrange(0, 1000)), (300, 200), (rand.randrange(0, 256), rand.randrange( 0, 256), rand.randrange(0, 256)), 1, 8, 0) cv.ShowImage("RandomViewer", img) cv.WaitKey(5) cv.PutText(img, "Hello OpenCV", (100, 200), cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 5, 10, 0, 1, 8), (255, 255, 255)) cv.ShowImage("RandomViewer", img) cv.WaitKey(0) cv.DestroyWindow("RandomViewer")
def _run_camera(self, camera_position): cascade = cv.Load("haarcascades/haarcascade_frontalface_alt2.xml") capture = cv.CreateCameraCapture(self.camera_number[camera_position]) if self.show_main_view[camera_position]: cv.NamedWindow("result" + str(camera_position), 1) if capture: frame_copy = None #i=0 prev_t, now_t = time.time(), 0 while self.should_camera_be_able_to_run: frame = cv.QueryFrame(capture) if self.flip_image_verticaly[camera_position]: cv.Flip(frame, frame) if not frame: print "not frame" else: now_t = time.time() fps = 1 / (now_t - prev_t) prev_t = now_t print fps self.detect_and_draw(frame, cascade, camera_position) #cv.WaitKey(1) #continue #if self.show_main_view[camera_position]: cv.ShowImage("result"+str(camera_position), frame) #if not frame_copy: # frame_copy = cv.CreateImage((frame.width,frame.height),cv.IPL_DEPTH_8U, frame.nChannels) #if frame.origin == cv.IPL_ORIGIN_TL: # cv.Copy(frame, frame_copy) #else: # cv.Flip(frame, frame_copy, 0) #if cascade: #self.detect_and_draw(frame, cascade, camera_position) #else: #image = cv.LoadImage(input_name, 1) #cv.WaitKey(0) try: cv.DestroyWindow("result" + str(camera_position)) except: print "could not destroy window"
def crop_Images(images): global drawingBox folder = 'croped/' os.system('rm -r '+folder) os.system('mkdir '+folder) cv.NamedWindow('Crop') for im in images: print im try: image = cv.LoadImage(im) temp = cv.CloneImage(image) cv.SetMouseCallback('Crop', mouse_Callback, image) while True: cv.Copy(image,temp) if drawingBox: draw_box(temp) cv.ShowImage('Crop', temp) key = cv.WaitKey(10) if key == 13: break except: print 'Ocurrio un error' cv.DestroyWindow('Crop')
def camera_Capture(cam): global extension folder = 'captures/' os.system('rm -r '+folder) os.system('mkdir '+folder) name = 'out' n = 0 cv.NamedWindow("Camera", cv.CV_WINDOW_AUTOSIZE) while True: try: frame = cv.QueryFrame(cam) cv.ShowImage("Camera", frame) key = cv.WaitKey(10) if key == 13: image = name+str(n)+extension cv.SaveImage(folder+image, frame) print 'Se guardo imagen: ',image n += 1 if key == 27: cv.DestroyWindow('Camera') break except: print "Hubo un problema con la camara" break
def display_rand_pattern(img): # Get dot pattern [col_ind, row_ind] = np.meshgrid( np.linspace(0, N, 100).astype('int'), np.linspace(0, M, 100).astype('int')) pt_list = np.vstack((row_ind.flatten(), col_ind.flatten())).T #print(pt_list.shape) ind = range(0, pt_list.shape[0]) #print(ind[0:10]) ind = np.random.permutation(ind)[0:2] #print(ind[0:10]) pt_list = pt_list[ind, :] i = 0 while 1: pt = pt_list[i, :] print('pt: ' + str(pt)) DOT_IMG = copy.deepcopy(img) # Display blink dot ret_flag = blink_dot(DOT_IMG, (pt[1], pt[0]), 5) key = cv.WaitKey(500) i = i + 1 if (ret_flag == 1): break if (key == 27): break if (i == pt_list.shape[0]): break cv.DestroyWindow("CALIB_SCREEN")
def main(): cv.NamedWindow("camera", 1) capture = cv.CaptureFromCAM(0) while True: img = cv.QueryFrame(capture) """ im_gray = cv.CreateImage(cv.GetSize(img),cv.IPL_DEPTH_8U,1) cv.CvtColor(img,im_gray,cv.CV_RGB2GRAY) # Sobel operator dstSobel = cv.CreateMat(im_gray.height, im_gray.width, cv.CV_32FC1) # Sobel(src, dst, xorder, yorder, apertureSize = 3) cv.Sobel(im_gray,dstSobel,1,1,3) """ cv.ShowImage('camera', img) # image smoothing and subtraction # imageBlur = cv.CreateImage(cv.GetSize(im_gray), im_gray.depth, im_gray.nChannels) # # filering the original image # # Smooth(src, dst, smoothtype=CV_GAUSSIAN, param1=3, param2=0, param3=0, param4=0) # cv.Smooth(im_gray, imageBlur, cv.CV_BLUR, 11, 11) # diff = cv.CreateImage(cv.GetSize(im_gray), im_gray.depth, im_gray.nChannels) # # subtraction (original - filtered) # cv.AbsDiff(im_gray,imageBlur,diff) # cv.ShowImage('camera', diff) if cv.WaitKey(10) == 27: break gevent.sleep(0.5) cv.DestroyWindow("camera")
def __init__(self, photo, path): """ Initialize the attributes of a FacePhoto This constructor rotates the photo so that we can process it as we would a vertical photo then calls FacePhoto's __init__ method to populate the eyes, etc. Args: photo photo - a photo of a face Return: None """ # Rotate photo # NOTE: Not sure if this will rotate the photo to be right side # up or upside # TODO: Find this out for sure from the hardware team #photo = self.rotateImage(photo,270) if DEBUG: cv.ShowImage("Rotated Image (in VerticalPhoto init)", photo) cv.WaitKey(0) cv.DestroyWindow("Rotated Image (in VerticalPhoto init)") # call FacePhoto(super)'s init super(VerticalPhoto, self).__init__(photo, path)
# UPDATED 9/22: 20 X AND Y PIXEL MINIMUM TO BE APPENDED TO CENTROID LISTS if (55 < cv.Get2D(imghsv,centroidy,centroidx)[0] < 155) and ypix > 20 and xpix > 20: blue.append((centroidx,centroidy)) # draw colors in windows; exception handling is used to avoid IndexError. # after drawing is over, centroid from previous part is removed from list by pop. # so in next frame, centroids in this frame become initial points of line to draw # draw blue box around blue blimp blob try: cv.Circle(imdraw, blue[1], 5, (255,0,0)) cv.Line(imdraw, blue[0], blue[1], (255,0,0), 3, 8, 0) print('xpix:'+str(xpix)) blue.pop(0) print("centroid x:" + str(centroidx)) print("centroid y:" + str(centroidy)) print("") except IndexError: print "no blimp detected" # adds cv.Add(test,imdraw,test) # display windows previously created cv.ShowImage("Real", color_image) if cv.WaitKey(33) == 1048603: cv.DestroyWindow("Real") break ######################################################
def Display(image): cv.NamedWindow("Smile Test") cv.ShowImage("Smile Test", image) cv.WaitKey(0) cv.DestroyWindow("Smile Test")
# draw the outer circle cv2.circle(imageArray, (i[0], i[1]), i[2], (0, 255, 0), 2) # draw the center of the circle cv2.circle(imageArray, (i[0], i[1]), 2, (0, 0, 255), 3) output = "X" + i[0].str() + "Y" + i[1].str() print "output = '" + output + "'" serialConnection.write(output + '\r\n') time.sleep(1) serialRead = '' while serialConnection.inWaiting() > 0: serialRead += serialConnection.read(1) if serialRead != '': print 'serialRead = ' + serialRead # display webcam image cv.ShowImage('Original', originalImage) cv.ShowImage('Threshold', thresholdImage) # handle events # As long as camera window has focus (e.g. is selected), this will intercept # pressed key; it will not work if the python terminal window has focus k = cv.WaitKey(100) if k == 0x1b: # ESC print 'ESC pressed. Exiting ...' cv.DestroyWindow("Original") # This may not work on a Mac cv.DestroyWindow("Threshold") # This may not work on a Mac serialConnection.close() break
elif len(sys.argv) == 2: capture = cv.CreateFileCapture(sys.argv[1]) if not capture: print "Could not initialize capturing..." sys.exit(-1) cv.NamedWindow("Laplacian", 1) while True: frame = cv.QueryFrame(capture) if frame: if not laplace: planes = [cv.CreateImage((frame.width, frame.height), 8, 1) for i in range(3)] laplace = cv.CreateImage((frame.width, frame.height), cv.IPL_DEPTH_16S, 1) colorlaplace = cv.CreateImage((frame.width, frame.height), 8, 3) cv.Split(frame, planes[0], planes[1], planes[2], None) for plane in planes: cv.Laplace(plane, laplace, 3) cv.ConvertScaleAbs(laplace, plane, 1, 0) cv.Merge(planes[0], planes[1], planes[2], None, colorlaplace) cv.ShowImage("Laplacian", colorlaplace) if cv.WaitKey(10) != -1: break cv.DestroyWindow("Laplacian")
def findWhiteDot(self): ## The code here is based on findPupil() from Eye.py """ Detects a whiteDot within a pupil region. Uses opencv libarary methods to detect the white dot in the center of the pupil caused by the reflection of the flash. Algorithm Overview: Load the source image. GrayScale Invert it. Convert to binary image by thresholding it. Find all blobs. Remove noise by filling holes in each blob. Get blob which is big enough and has round shape. Then initializes whiteDot to the region found and sets whiteDotCenter. Returns false if any errors are encountered Args: None Return: bool - True if there were no issues. False for any error """ # Image Processing # read the im from disc using absolute path im = cv2.imread( os.path.join(os.path.dirname(__file__), 'PUPILPHOTO.jpg')) # TODO - change all the random explicit numbers in this method # to descriptively named class level variables if DEBUG: print "im is of type: " + str(type(im)) im2 = im.copy() imblur = cv2.blur(im, (3, 3)) imgray = cv2.cvtColor(imblur, cv2.COLOR_BGR2GRAY) if DEBUG: cv.ShowImage("Grayscaled", cv.fromarray(imgray)) # Grayscale Picture cv.WaitKey(0) cv.DestroyWindow("Grayscaled") ret, thresh = cv2.threshold( imgray, 127, 255, 0) # ret : type float. thresh: type :numpy.ndarray if DEBUG: cv.ShowImage("Binary", cv.fromarray(thresh)) # Binary Picture cv.WaitKey(0) cv.DestroyWindow("Binary") contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if DEBUG: print("Number of Contours Found: " + str(len(contours))) cv2.drawContours( im, contours, -1, (0, 255, 0), 0 ) # Final argument for drawContours() : 0 = Outline, -1 = Fill-In cv.ShowImage("All Contours", cv.fromarray(im)) cv.WaitKey(0) cv.DestroyWindow("All Contours") # Finding center coordinates of photo photoCenterX = len(im[0]) / 2 photoCenterY = len(im) / 2 if DEBUG: print("Photo's Center Coordinates: (" + str(photoCenterX) + ", " + str(photoCenterY) + ")") min_area = maxint ## This is finding WhiteDot by comparing contour centroids shortestDist = maxint closestCnt = contours[0] closestX = closestY = 0 for cnt in contours: M = cv2.moments(cnt) ## Ignores all contours with M00 = 0, ## because that will cause divide by 0 error if (M['m00'] != 0.0): centroid_x = int(M['m10'] / M['m00']) centroid_y = int(M['m01'] / M['m00']) if DEBUG: print cnt print("\n") print M['m10'], M['m00'] print M['m01'], M['m00'] print("\n\n") dist = np.sqrt( np.square(centroid_x - photoCenterX) + np.square(centroid_y - photoCenterY)) if DEBUG: print("Distance to center = " + str(dist)) ## At the end of the loop, the closest contour to center of image is stored if (dist < shortestDist): closestX = centroid_x closestY = centroid_y shortestDist = dist closestCnt = cnt self.setWhiteDotCenter((closestX, closestY)) if DEBUG: #print (shortestDist) print("Closest Contour: (" + str(closestX) + ", " + str(closestY) + ")") ## This only prints the one contour that is passed, on top of the image cv2.drawContours(im, [closestCnt], 0, (255, 0, 0), -1) cv2.drawContours(im2, [closestCnt], 0, (255, 0, 0), 1) cv.ShowImage("White Dot with Contours", cv.fromarray(im)) cv.WaitKey(0) cv.DestroyWindow("White Dot with Contours") cv.ShowImage("White Dot only", cv.fromarray(im2)) cv.WaitKey(0) cv.DestroyWindow("White Dot only")
def findCrescent(self): """ Detects a crescent within a pupil region. Uses opencv libarary methods to detect a crescent. Then initializes crescent to the area of the region found. Returns false if any errors are encountered Args: None Return: bool - True if there were no issues. False for any error """ if DEBUG: print "self.pupilPhoto is of type: " + str(type(self.pupilPhoto)) # Currently self.pupilPhoto is stored as a cvmat so we need to convert to a # numpy array before working with it. #im = np.asarray(self.pupilPhoto) # read the im from disc using absolute path im = cv2.imread( os.path.join(os.path.dirname(__file__), 'PUPILPHOTO.jpg')) if DEBUG: print "im is of type: " + str(type(im)) imblur = cv2.blur(im, (3, 3)) imgray = cv2.cvtColor(imblur, cv2.COLOR_BGR2GRAY) # TODO Take away magic (ex: 127,255,0) numbers here and make pretty # Variables at the top ret, thresh = cv2.threshold(imgray, 127, 255, 0) contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if DEBUG: print "Contours is of type: " + str(type(contours)) print "Contours is of id: " + str(hex(id(contours))) print "Countours: " + str(contours) cv.ShowImage("Thresholded", cv.fromarray(thresh)) cv.WaitKey(0) cv.DestroyWindow("Thresholded") cnt = contours[0] len(cnt) cv2.drawContours(im, contours, -1, (0, 255, 0), -1) cv.ShowImage("Coutours", cv.fromarray(im)) cv.WaitKey(0) cv.DestroyWindow("Contours") max_area = 0 for cnt in contours: area = cv2.contourArea(cnt) if area > max_area: max_area = area best_cnt = cnt #set the max_area found into the actual structure self.setCrescent(max_area) #show it, or exit on waitkey #cv2.imshow('imblur',imblur) if DEBUG: #find centroids of best_cnt M = cv2.moments(best_cnt) cx, cy = int(M['m10'] / M['m00']), int(M['m01'] / M['m00']) cv2.circle(imblur, (cx, cy), 5, 255, -1) cv2.imshow('thresh', thresh) if cv2.waitKey(33) == 27: cv.DestroyAllWindows() cnt = contours[0] len(cnt) cv2.drawContours(imblur, contours, -1, (0, 255, 0), -1) cv2.circle(imblur, (cx, cy), 5, 255, -1) cv.ShowImage("Contour Shading", cv.fromarray(imblur)) #cv.WaitKey(0) #cv.DestroyWindow("Testing") cv.WaitKey(0) cv.DestroyAllWindows()
while True: frame = cv.QueryFrame(capture) if not frame: cv.WaitKey(0) break if not frame_copy: frame_copy = cv.CreateImage((frame.width, frame.height), cv.IPL_DEPTH_8U, frame.nChannels) # frame_copy = cv.CreateImage((frame.width,frame.height), # cv.IPL_DEPTH_8U, frame.nChannels) if frame.origin == cv.IPL_ORIGIN_TL: cv.Copy(frame, frame_copy) else: cv.Flip(frame, frame_copy, 0) detect_and_draw(frame_copy, cascade, jpg_cnt) jpg_cnt += 1 #print(jpg_cnt) if cv.WaitKey(10) >= 0: break else: image = cv.LoadImage(input_name, 1) detect_and_draw(image, cascade, jpg_cnt) jpg_cnt += 1 cv.WaitKey(0) cv.DestroyWindow("result")
import numpy as np import cv2 import cv2.cv as cv #Creamos un objeto que maneje la camara cap = cv2.VideoCapture(0) cap.open(0) #Repetimos hasta que se cierre el programa while (True): #Capturamos la imagen actual ret, img = cap.read() #Si la captura es correcta entonces la mostramos if ret: cv2.imshow('img', img) #Eperamos que el usuario presione alguna tecla, en este caso la letra "q" # que nos servira para cerrar el programa. key = cv2.waitKey(1) if key == ord('q'): break #Al salir del bucle infinito cerramos la ventana y dejamos de utilizar la camara. cap.release() cv.DestroyWindow("img")
def main(): face_detected_count = 0 speech_detected_count = 0 loop_count = 0 listening = False main_data = [] #init_speech_time = 0 mouth_data = {} # current (temp) mouth data pause = False # LOCAL only mouth_color = (255, 255, 255, 0) # LOCAL only image = None # LOCAL only if LOCAL: cv.NamedWindow(WINDOW_NAME, cv.CV_WINDOW_AUTOSIZE) image = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 3) if DEBUG: mouth_data["Frames"] = [] while True: # se script in locale e finestra in pausa if LOCAL and pause: continue loop_count += 1 FaceDetected = memoryProxy.getData("FaceDetected") SpeechDetected = memoryProxy.getData("SpeechDetected") WordRecognized = memoryProxy.getData("WordRecognized") FrontTactilTouched = memoryProxy.getData("FrontTactilTouched") # Uscita (unsubscribe) if FrontTactilTouched: if LOCAL: cv.DestroyWindow(WINDOW_NAME) break if SpeechDetected: # inizio ascolto if not listening: mouth_data['InitSpeechTime'] = time.time() mouth_data['Frames'] = [] # NOTA: questo tempo presenta un ritardo di qualche millisecondo! log("- SpeechDetected: Begin Listening @ " + str(mouth_data['InitSpeechTime'])) listening = True speech_detected_count += 1 if LOCAL: mouth_color = (0, 0, 255, 0) else: # fine ascolto if listening: mouth_data['EndSpeechTime'] = time.time() log("- SpeechDetected: Stop listening @ " + str(mouth_data['EndSpeechTime'])) listening = False main_data.append(mouth_data) mouth_data = {} if LOCAL: mouth_color = (255, 255, 255, 0) # calcolare elapsed in ms: 1000 * (end - init) # Face Detection FaceDetectedData = [] if ( # listening and FaceDetected and isinstance(FaceDetected, list) and len(FaceDetected) >= 2): face_detected_count += 1 log(str(face_detected_count) + "FD " + str(FaceDetected[0])) # time.time FaceDetectedData = FaceDetected memoryProxy.insertData("FaceDetected", []) # collect data current_frame = { 'FaceDetectionTimestamp': FaceDetectedData[0] #, # 'MouthPoints': FaceDetectedData[1][0][1][8] } mouth_data['Frames'].append(current_frame) # osservazione: sicuramente wordrecognized avviene dopo l'end # di speechdetected, allora posso aggire sull'ultimo mouth_data, # che è salvato in main_data (append) # TODO: risolvere problemi di sincronizzazione qui # (questo codice andrebe modifcato) [se l'ultimo mouth_data non è ancora stato messo # in main data?] if WordRecognized[0] != '': log(WordRecognized) main_data[-1]['WordRecognized'] = WordRecognized memoryProxy.insertData("WordRecognized", ['']) # CV window etc if LOCAL: # local sleep (cv) k = cv.WaitKey(FacePeriod) if k == 13: cv.DestroyWindow(WINDOW_NAME) break elif k != -1: pause = not pause cv.Set(image, cv.CV_RGB(0, 0, 0)) if FaceDetectedData != []: DrawPoints(FaceDetectedData, image, mouth_color) cv.ShowImage(WINDOW_NAME, image) else: # NAO sleep time.sleep(0.1) # end of loop #saveData(main_data) # risolvere problema cartella non esistente in remoto #log(main_data) if DEBUG: main_data = mouth_data global md md = main_data time.sleep(1) log("-- Main DONE") pass