def grab_screen(): last_time = time.time() frame, accum_time, fps = 0, 0, 0 while True: # screenshot normalization screen = np.array(ImageGrab.grab(bbox=(0, 40, 800, 640))) # screen = cv2.cvtColor(screen, cv2.COLOR_BGR2RGB) screen = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY) screen = cv2.GaussianBlur(screen, (3, 3), 0) # calculate fps this_time = time.time() print('loop took {} seconds'.format(this_time - last_time)) accum_time += this_time - last_time last_time = time.time() frame += 1 if accum_time >= 1: fps = frame print('fps:', frame) frame, accum_time = 0, 0 cv2.putText(screen, 'fps:{}'.format(fps), (10, 30), cv2.FONT_HERSHEY_COMPLEX, 0.8, (0, 255, 0), 2) # show screenshot cv2.imshow('screen', screen) if cv2.waitKey(25) & 0xFF == ord('q'): cv2.destoryAllWindows() break
def faceDetect(): face_cascade = cv2.CascadeClassifier(HAAR_CASCADE_XML_FILE_FACE) video_capture = cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER) if video_capture.isOpened(): cv2.namedWindow("faceDetect", cv2.WINDOW_NORMAL) cv2.resizeWindow("faceDetect", 800, 600) while True: return_key, image = video_capture.read() if not return_key: break grayscale_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) detected_faces = face_cascade.detectMultiScale(grayscale_image, 1.3, 5) #create rectangle around face for(x_pos, y_pos, width, height) in detected_faces: cv2.rectangle(image, (x_pos, y_pos), (x_pos + width, y_pos + height), (0, 0, 0), 2) cv2.imshow("faceDetect", image) # key esc as quit key = cv2.waitKey(30) & 0xff if key == 27: break video_capture.release() cv2.destoryAllWindows() else: print("open camera fail")
def get_live_video(queue, state): #emotion = "neutral" recent_frames = deque() alt_frame = True video = cv2.VideoCapture(0) detector = dlib.get_frontal_face_detector() count = 1 while True: (grabbed, frame) = video.read() if not grabbed: print("not grabbed") break if alt_frame: faces = detector(frame) frame_copy = frame.copy() for face in faces: x1 = face.left() y1 = face.top() x2 = face.right() y2 = face.bottom() face = frame[y1:y2, x1:x2] cv2.rectangle(frame_copy, (x1, y1), (x2, y2), (255, 0, 0), thickness=7) #cv2.putText(frame_copy, predicted_emotion, (int(x1), int(y1)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2) # if not predicted_emotion.empty(): # emotion = predicted_emotion.get() # cv2.putText(frame_copy, emotion, (int(x1), int(y1)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2) cv2.imshow('Frame', frame_copy) frame = cv2.resize(face, (224, 224)).astype("float32") norm_image = cv2.normalize(frame, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) recent_frames.append(norm_image) if len(recent_frames) > 5: recent_frames.popleft() #e.wait() #print(e.is_set(), "event state in process 1") # if e.is_set(): # #print("pushing to queue") # #queue.put(norm_image) # queue.put(recent_frames) # #queue.put(count) if not state.empty(): state.get() queue.put(recent_frames) count += 1 alt_frame = not alt_frame if cv2.waitKey(10) == ord('q'): #wait until keyboard interrupt break cv2.destoryAllWindows()
def trackbar(): img = np.zeros((200, 512, 3), np.uint8) cv2.namedWindow('color_palette') cv2.createTrackbar('B', 'color_palette', 0, 255, onChange) cv2.createTrackbar('G', 'color_palette', 0, 255, onChange) cv2.createTrackbar('R', 'color_palette', 0, 255, onChange) switch = '0: OFF\n1: ON' cv2.createTrackbar(switch, 'color_palette', 0, 1, onChange) while True: cv2.imshow('color_palette', img) k = cv2.waitKey(1) & 0xFF if k == 27: break b = cv2.getTrackbarPos('B', 'color_palette') g = cv2.getTrackbarPos('G', 'color_palette') r = cv2.getTrackbarPos('G', 'color_palette') s = cv2.getTrackbarPos('G', 'color_palette') if s == 0: img[:] = 0 else: img[:] = [b, g, r] cv2.destoryAllWindows()
def __del__(self): self.sock.close() #服务器端连接成功后尝试创建一个窗口用于显示接收道德视频 try: cv2.destoryAllWindows() except: pass
def Drone_tracking_ex (cap) : ret, frame = cap.read() #Set Roi c, r, w, h = 900, 650, 70, 70 track_window = (c, r, w, h) #mask / histogram be made roi = frame[r : r+h, c: c+w] hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv_roi, np.array((0., 30., 32.)), np.array((180., 255., 255.))) roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180]) cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX) term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 80, 1) while True : ret, frame = cap.read() hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) dst = cv2.calcBcakProject([hsv], [0], roi_hist, [0, 180], 1) ret, track_window = cv2.meanShift(dst, track_window, term_crit) x, y, w, h = track_window cv2.rectangle(frame, (x, y), (x+w, y+h), 255, 2) cv2.putText(frame, 'Tracked', (x-25, y-10), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv.CV_AA) cv2.imshow('Tracking', frame) if cv2.waitKey(1) & 0xFF == ord('q') : break cap.release() cv2.destoryAllWindows()
def face_rec(): names = ['wang', 'll'] [faces_sample, lables] = read_images(GENERATE_PATH) lables = np.asarray(lables, dtype=np.int32) model = cv2.face.LBPHFaceRecognizer_create() model.train(np.asarray(faces_sample), np.asarray(lables)) camera = cv2.VideoCapture(0) face_cascade = cv2.CascadeClassifier( './haarcascades/haarcascade_frontalface_default.xml') while True: status, frame = camera.read() faces = face_cascade.detectMultiScale(frame, 1.3, 5) for (x, y, w, h) in faces: gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) roi = gray[x:x + w, y:y + h] # 已经训练结果对脸部进行判断 try: params = model.predict(roi) print('标签:%s, 相似率:%.2f' % (params[0], params[1])) cv2.putText(frame, names[params[0]], (x, y - 20), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2) except: continue cv2.imshow('camera', frame) if cv2.waitKey(100) & 0xff == ord('q'): break cv2.destoryAllWindows()
def showImage(): imgfile = 'images/cat.jpg' img = cv2.imread(imgfile, cv2.IMREAD_COLOR) cv2.namedWindow('cat', cv2.WINDOW_NORMAL) cv2.imshow('cat', img) cv2.waitKey(0) cv2.destoryAllWindows()
def show_target(name): """ 展示检测的目标 :param name: :return: """ cv2.imshow('Show', name) cv2.waitKey(0) cv2.destoryAllWindows()
def main(args): rospy.init_node('NCS_node', anonymous=False) ic = NCS_node() try: while (1): ic.ncs() except KeyboardInterrupt: print "shutting down" cv2.destoryAllWindows()
def display(filename): im = cv2.imread(filename, 1) #画像の読み込み if im is None: #エラー処理 print(filename, 'does not exist!') sys.exit('Error') height, width = im.shape[0:2] #画像サイズを取得 print('height:', height, 'width:', width) #画像サイズ表示 cv2.imshow(filename, im) cv2.waitKey(0) #キー入力待機 cv2.destoryAllWindows() #すべてのウィンドウズを閉じる
def show(self): if self.cap.isOpened(): while True: _, img = self.cap.read() cv2.imshow("frame", img) if cv2.waitKey(1) & 0xFF == ord("q"): break self.cap.release() cv2.destoryAllWindows()
def showImage(): imgfile= '/home/parksanghyeon/Downloads/practice1.png' img = cv2.imread(imgfile,cv2.IMREAD_COLOR) #second argument means output IMREAD_COLOR=1 #RGB img2 = cv2.imread(imgfile,cv2.IMREAD_GRAYSCALE) #second argument means output IMREAD_GRAYSCALE=0 #GRAY #img3 = cv2.imread(imgfile,cv2.IMREAD_UNCHANGED)#second argument means output IMREAD_UNCHANGED=-1 #RGBA cv2.namedWindow('practice1',cv2.WINDOW_NORMAL) cv2.namedWindow('practice2',cv2.WINDOW_AUTOSIZE) #cv2.namedWindow('practice3',cv2.WINDOW_NORMAL) cv2.imshow('cutting',img) subimg = img[300:400, 350:750] cv2.imshow('cutting', subimg) img[300:400,0:400] = subimg cv2.imshow('modified',img) # b, g, r = cv2.split(img) b = img[:,:,0] g = img[:,:,1] r = img[:,:,2] print(img[100,100]) print(b[100, 100], g[100, 100], r[100, 100]) cv2.imshow('blue channel', b) cv2.imshow('green channel', g) cv2.imshow('red channel',r) merged_img = cv2.merge((b,g,r)) cv2.imshow('merged', merged_img) #cv2.imshow('practice1', img1) #cv2.imshow('practice2', img2) #cv2.imshow('practice3', img3) print(img.shape) print(img2.shape) k =cv2.waitKey(0) & 0xFF if k == 27: cv2.destroyAllWindows() if k == ord('c'): cv2.imwrite('/home/parksanghyeon/Downloads/practice2.png',img1) cv2.destoryAllWindows() #if i type the 'c', it means copy practice1. to pratice3
def stop(self): self.cap.release cv2.destoryAllWindows() # if __name__=="__main__": # test = Vision() # while True: # test.update() # if cv2.waitKey(1) & 0xFF == ord('q'): # break # test.stop()
def main(): img = cv2.imread("Bikesgray.jpg", 0) edges, img_grad = Sobel_op(img) ret, thresh = cv2.threshold(edges, 120, 255, cv2.THRESH_BINARY) H, v_h = HOG(edges, img_grad) cv2.imshow("th", thresh) cv2.imshow("H_v", v_h) cv2.waitKey(0) cv2.destoryAllWindows()
def demo_img(net, detector, transform, img, save_dir): _t = {'inference': Timer(), 'misc': Timer()} scale = torch.Tensor( [img.shape[1], img.shape[0], img.shape[1], img.shape[0]]) with torch.no_grad(): x = transform(img).unsqueeze(0) if args.cuda: x = x.cuda() scale = scale.cuda() _t['inference'].tic() out = net(x) # forward pass boxes, scores = detector.forward(out, priors) inference_time = _t['inference'].toc() boxes = boxes[0] scores = scores[0] boxes *= scale boxes = boxes.cpu().numpy() scores = scores.cpu().numpy() _t['misc'].tic() for j in range(1, num_classes): max_ = max(scores[:, j]) inds = np.where(scores[:, j] > args.threshold)[0] if inds is None: continue c_bboxes = boxes[inds] c_scores = scores[inds, j] c_dets = np.hstack((c_bboxes, c_scores[:, np.newaxis])).astype(np.float32, copy=False) #keep = nms(c_dets, args.threshold, force_cpu=args.cpu) keep = nms_py(c_dets, args.threshold) c_dets = c_dets[keep, :] c_bboxes = c_dets[:, :4] for bbox in c_bboxes: # Create a Rectangle patch label = labels[j - 1] score = c_dets[0][4] cv2.rectangle(img, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), COLORS[1], 2) cv2.putText( img, '{label}: {score:.2f}'.format(label=label, score=score), (int(bbox[0]), int(bbox[1])), FONT, 1, COLORS[1], 2) nms_time = _t['misc'].toc() #status = ' inference time: {:.3f}s \n nms time: {:.3f}s \n FPS: {:d}'.format(inference_time, nms_time, int(1/(inference_time+nms_time))) status = 't_inf: {:.3f} s || t_misc: {:.3f} s \r'.format( inference_time, nms_time) cv2.putText(img, status[:-2], (10, 20), FONT, 0.7, (0, 0, 0), 5) cv2.putText(img, status[:-2], (10, 20), FONT, 0.7, (255, 255, 255), 2) print(status) cv2.imwrite(save_dir, img) cv2.imshow('result', img) cv2.waitKey(0) cv2.destoryAllWindows()
def contour(): img = cv2.imread('D:\PycharmProject\data\IU.jpg') imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) ret, thr = cv2.threshold(imgray, 127, 255, 0) _, contours, _ = cv2.findContours(thr, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) cv2.drawContours(img, contours, -1, (0, 0, 255), 1) cv2.imshow('thresh', thr) cv2.imshow('contour', img) cv2.waitKey(0) cv2.destoryAllWindows()
def getFaceFromCamera(outDir): createDir(outDir) camera = cv2.VideoCapture(0) haar = cv2.CascadeClassifier("/home/crq/opencv-python/opencv/data/haarcascades_cuda/haarcascade_frontalface_default.xml") #haar = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') count = 1 while 1: if count <= 200: print('process...') success, img = camera.read() if img == None: print("img is none") continue print(success) # cv2.imshow('img', img) grayImage = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # cv2.imshow('img', grayImage) # key = cv2.waitKey(30) faceImage = haar.detectMultiScale(grayImage, 1.3, 5) # print(faceImage) for f_x,f_y,f_w,f_h in faceImage: print(f_x, f_y, f_w, f_h) face = img[f_y:f_y+f_h, f_x:f_x+f_w] #print(img) #cv2.imshow('source', img) #cv2.imshow('img', face) face = cv2.resize(face, (size, size)) cv2.imwrite(os.path.join(outDir, str(count)+'.jpg'), face) #print(img) if img == None: print("before recangle img is none") cv2.rectangle(img, (f_x, f_y), (f_x + f_w, f_y + f_h), (255, 0, 0), 2) if img == None: print("rectangle fail") count += 1 #cv2.imshow('img', img) if img == None: print("img is none") continue cv2.imshow('source', img) key = cv2.waitKey(30) & 0xff if key == 27: break else: break camera.release() cv2.destoryAllWindows()
def picture(): old_frame = cv2.imread("3.jpg") frame = cv2.imread("3.jpg") # ShiTomasi 角点检测参数 feature_params = dict(maxCorners=100, qualityLevel=0.05, minDistance=7, blockSize=7) # lucas kanade光流法参数 lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) # 创建随机颜色 color = np.random.randint(0, 255, (100, 3)) #找到原始灰度图 old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY) #获取图像中的角点,返回到p0中 p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params) # 创建一个蒙版用来画轨迹 mask = np.zeros_like(old_frame) frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #灰度化 # 计算光流 p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params) # 选取好的跟踪点 good_new = p1[st == 1] good_old = p0[st == 1] # 画出轨迹 for i, (new, old) in enumerate(zip(good_new, good_old)): a, b = new.ravel() #多维数据转一维,将坐标转换后赋值给a,b c, d = old.ravel() mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2) #画直线 frame = cv2.circle(frame, (a, b), 1, color[i].tolist(), -1) #画点 img = cv2.add(frame, mask) # 将画出的线条进行图像叠加 cv2.imshow('frame', img) #显示图像 cv2.waitKey(0) cv2.destoryAllWindows() #关闭所有窗口
def showImage2(): imgfile = 'images/cat.jpg' img = cv2.imread(imgfile, cv2.IMREAD_COLOR) cv2.imshow('cat', img) while (True): k = cv2.waitKey(0) & 0xFF if k == 27: cv2.destroyAllWindows() break elif k == ord('c'): cv2.imwrite('images/cat_copy.jpg', img) cv2.destoryAllWindows() else: print(k)
def addImage(imgfile1, imgfile2): img1 = cv2.imread(imgfile1) img2 = cv2.imread(imgfile2) cv2.imshow('img1', img1) cv2.imshow('img2', img2) add_img1 = img1 + img2 add_img2 = cv2.add(img1, img2) cv2.imshow('img1+img2', add_img1) cv2.imshow('add(img1,img2)', add_img2) cv2.waitKey(0) cv2.destoryAllWindows()
def take_snapshot(): number = random.randint(0, 100) videoCaptureObject = cv2.VideoCapture(0,cv2.CAP_DSHOW) result = True while(result): ret, frame = videoCaptureObject.read() img_name = "img_" + str(number) + ".png" cv2.imwrite(img_name, frame) start_time = time.time result = False return img_name print("Snapshot taken") videoCaptureObject.release() cv2.destoryAllWindows()
def camPreview(previewName, camID): cv2.namedWindow(previewName) cam = cv2.VideoCapture(camID) if cam.isOpened(): rval, frame = cam.read() else: rval = False while rval: cv2.imshow(previewName, frame) rval, frame = cam.read() key = cv2.waitKey(20) if key == 27: break cv2.destoryAllWindows(previewName)
def showImage(img_flag): img_file = '../resources/img/sana01.jpeg' window_title = 'sana' img = cv2.imread(img_file, img_flag) cv2.namedWindow(window_title, cv2.WINDOW_NORMAL) cv2.imshow(window_title, img) key = cv2.waitKey(0) & 0xFF if key == 27: cv2.destoryAllWindows() elif key == ord('c'): cv2.imwrite('../resources/img/sana_copy.jpg', img) cv2.destoryAllWindows()
def drawing(): img = np.zeros((512, 512, 3), np.uint8) #다양한 색상과 선두께를 가진 도형 그리기 cv2.line(img, (0, 0), (511, 511), (255, 0, 0), 5) cv2.rectangle(img, (384, 0), (510, 128), (0, 255, 0), 3) cv2.circle(img, (477, 63), 63, (0, 0, 255), -1) cv2.ellipse(img, (256, 256), (100, 50), 0, 0, 180, (255, 0, 0), -1) font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(img, ' OpenCV', (10, 500), font, 4, (255, 255, 255), 2) cv2.putText(img, ' OpenCV', (10, 500), font, 4, (255, 255, 255), 2) cv2.imshow('drawing', img) cv2.waitKey(0) cv2.destoryAllWindows()
def save(): cap = cv.VideoCapture(0) fourcc = cv.VideoWriter_fourcc(*'XVID') out = cv.VideoWriter('output.avi', fourcc, 20.0, (640, 480)) while (cap.isOpened()): ret, frame = cap.read() if ret == True: out.write(frame) cv.imshow('frame', frame) if cv.waitKey(1) & 0xFF == ord('q'): break else: break cap.release() out.release() cv.destoryAllWindows()
def pyramid(): img = cv2.imread('D:\PycharmProject\data\suji.jpg',cv2.IMREAD_ANYCOLOR) tmp = img.copy() win_titles =['org', 'level1', 'level2', 'level3'] g_down = [] g_down.append(tmp) for i in range(3): tmp1 = cv2.pyrDown(tmp) g_down.append(tmp1) tmp = tmp1 for i in range(4): cv2.imshow(win_titles[i], g_down[i]) cv2.waitKey(0) cv2.destoryAllWindows()
def main(): try: cap = cv2.VideoCapture(VDEV) except: "Failed to open" + VDEV showVideoInfo(cap) while (True): ret, frame = cap.read() cv2.imshow("preview", frame) rotate_frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE) cv2.imshow("rotate", rotate_frame) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destoryAllWindows()
def imgvector(): img = cv2.imread("/home/zk/opencvtest/opencvlearn/people.jpg") vector = img.shape print(vector) #vet2 = img.resize(500,330,3) #print(img.shape) raw = img.flatten() x = raw.shape means = cv2.mean(img) print('means ', means) print('raw.shape ', raw.shape) print('raw', raw) cv2.imshow("img", img) #cv2.imshow("means",means) #cv2.imshow("vet2",vet2) if cv2.waitKey(0) == 27: cv2.destoryAllWindows()
def movTest(): vc = cv2.VideoCapture("mp4/test01.mp4") if(vc.isOpened()): open,frame = vc.read()#一帧一帧读取图像 else: open = False while open: ret,frame = vc.read() if(frame is None): break if(ret == True): gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) cv2.imshow("result",gray) if(cv2.waitKey(1) & 0xFF == 27): break vc.release() cv2.destoryAllWindows()
''' img = cv2.imread("cctv.jpg", 0) shape = img.shape img = cv2.GaussianBlur(img, (3, 3), 0) canny = cv2.Canny(img, 10, 250) cv2.imshow('Canny', canny) cv2.imwrite("cctv.jpg", canny) image = cv2.imread('cctv.jpg') height = image.shape[0]#图像的高 width = image.shape[1]#图像的宽 print height print width res = cv2.resize(image,(200,100), interpolation=cv2.INTER_LINEAR) cv2.imshow('iker', res) cv2.imshow('image', image) cv2.waitKey(0) cv2.destoryAllWindows() ''' CV_INTER_NN - 最近邻插值, CV_INTER_LINEAR - 双线性插值 (缺省使用) CV_INTER_AREA - 使用象素关系重采样。当图像缩小时候,该方法可以避免波纹出现。当图像放大时,类似于 CV_INTER_NN 方法.. CV_INTER_CUBIC - 立方插值. '''
def main(): """ This is the main program. It is what detects the motion on the camera and alarms if motion is detected. The original form of this was obtain from: http://www.pyimagesearch.com/2015/05/25/basic-motion-detection-and-tracking-with-python-and-opencv/ :return: """ alarm = False ap = argparse.ArgumentParser() ap.add_argument("--v", "--video", help="path to the video file") ap.add_argument("-a", "--min-area", type=int, default=250, help="minimum area size") args = vars(ap.parse_args()) if args.get("video", None) is None: camera = cv2.VideoCapture(0) time.sleep(0.25) else: camera = cv2.VideoCapture(args["video"]) firstFrame = None while True: (grabbed, frame) = camera.read() text = "NO MOTION DETECTED" if not grabbed: break frame = imutils.resize(frame, width=500) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (21, 21), 0) if firstFrame is None: firstFrame = gray continue frameDelta = cv2.absdiff(firstFrame, gray) thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1] thresh = cv2.dilate(thresh, None, iterations=2) (cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for c in cnts: if cv2.contourArea(c) < args["min_area"]: continue (x, y, w, h) = cv2.boundingRect(c) cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) text = "MOTION DETECTED" cv2.putText(frame, "Room Status: {}".format(text), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1) cv2.imshow("Security Feed", frame) if text == "MOTION DETECTED" and not alarm: send_message() winsound.Beep(2500, 1000) alarm = True key = cv2.waitKey(1) & 0xFF if key == ord("q"): break camera.release() cv2.destoryAllWindows()
def method(): # define the upper and lower boundaries for a color # to be considered "blue" blueLower = np.array([14,65,183], dtype = "uint8") blueUpper = np.array([81,116,233], dtype = "uint8") # load camera camera = cv2.VideoCapture(0) #keep looping while True: # grab the current frame (grabbed, frame) = camera.read() # check to see if we have reached the end of the video if not grabbed: break # determine which pixels fall within the blue boundaries # and then blur the binary image blue = cv2.inRange(frame, blueLower, blueUpper) blue = cv2.GaussianBlur(blue, (3, 3), 0) # find contours in the image (cnts, _) = cv2.findContours(blue.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # check to see if any contours were found if len(cnts) > 0: # sort the contours and find the largest one -- we # will assume this contour correspondes to the area # of object sorted_cnts = sorted(cnts, key = cv2.contourArea, reverse = True) displayed_rectangles = 0 for cnt in sorted_cnts: # compute the (rotated) bounding box around then # contour and then draw it rect = np.int32(cv2.cv.BoxPoints(cv2.minAreaRect(cnt))) cv2.drawContours(frame, [rect], -1, (0, 255, 0), 2) displayed_rectangles = displayed_rectangles + 1 if displayed_rectangles >= 2: break # show the frame and the binary image cv2.imshow("Tracking", frame) cv2.imshow("Binary", blue) # if your machine is fast, it may display the frames in # what appears to be 'fast forward' since more than 32 # frames per second are being displayed -- a simple hack # is just to sleep for a tiny bit in between frames; # however, if raspberry is slow, comment out the line time.sleep(0.025) # if the 'q' key is pressed, stop the loop if cv2.waitKey(1) & 0xFF == ord("q"): break # cleanup the camera and close any open windows camera.release() cv2.destoryAllWindows()
def dtmove(cap=cv2.VideoCapture(0)): occflag = 0 motionCounter = 0 avg = None times = 0 time.sleep(1) for i in range(0, 50): ret, frame = cap.read() ret, frame = cap.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) avg = cv2.GaussianBlur(gray, (21, 21), 0) grabold = avg.copy() diflx = avg.copy() while True: timestamp = datetime.datetime.now() ret, frame = cap.read() grab = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(grab, (21, 21), 0) differ = cv2.absdiff(gray, cv2.convertScaleAbs(avg)) ret, thresh = cv2.threshold(differ, 50, 255, cv2.THRESH_BINARY) thresh = cv2.dilate(thresh, None, iterations=8) (cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for c in cnts: if cv2.contourArea(c) < 500: continue # 计算轮廓的边界框,在当前帧中画出该框 (x, y, w, h) = cv2.boundingRect(c) cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) occflag = 1 # 背景重新获取 if occflag == 0: (difavg, _, _, _) = cv2.mean(differ) print "m", difavg if difavg > 2: avg = gray.copy() else: if times >= 30: times = 0 diflx = cv2.absdiff(gray, cv2.convertScaleAbs(grabold)) (difzx, _, _, _) = cv2.mean(diflx) print "d", difzx grabold = gray.copy() if difzx < 2: avg = gray.copy() times = times + 1 occflag = 0 cv2.imshow("farme", frame) cv2.imshow("thresh", thresh) cv2.imshow("avg", avg) cv2.imshow("differ", differ) key = cv2.waitKey(15) & 0xFF if key == ord("s"): ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p") cv2.imwrite("/home/lucky_d/pyex/images/" + ts + ".jpg", frame) print "imgsaved!as" + ts + ".jpg" if key == ord("q"): break cap.release() cv2.destoryAllWindows() return
def destory(self): self._cap.release() cv2.destoryAllWindows()