def main_debug(displaying): video_file = os.path.join(os.path.dirname(__file__), "video/staircase.mp4") vid_stream = VideoStream(video_file, interval=0.03) vid_stream.start() if debug: import ptvsd ptvsd.enable_attach(('0.0.0.0', 56781)) ptvsd.wait_for_attach() ptvsd.break_into_debugger() while True: _, frame = vid_stream.get_frame_with_id() detections = detector.detect(frame) #logging.info(detections) if not displaying: logging.info(detections) continue frame = display(frame, detections) # # check to see if the output frame should be displayed to our # # screen cv2.imshow("Frame", frame) key = cv2.waitKey(1) & 0xFF if key == ord('q') or key == 27: break cv2.destroyAllWindows()
class VideoCamera(object): INIT_TIME = 100 FRAME_WIDTH = 640 FRAME_HEIGHT = 480 HISTORY = 25 THRESHOLD = 18 def __init__(self, usePiCamera=True): # initialize the camera and grab a reference to the raw camera capture self.vs = VideoStream(usePiCamera=usePiCamera, src=0, resolution=(self.FRAME_WIDTH, self.FRAME_HEIGHT), framerate=12) self.kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) self.fgbg = cv2.createBackgroundSubtractorMOG2(history=self.INIT_TIME, varThreshold=12, detectShadows=False) self.face_cascade = cv2.CascadeClassifier( 'models/haarcascade_frontalface_default.xml') self.background1 = cv2.imread('backgrounds/background1.jpeg') self.background1 = cv2.resize(self.background1, (self.FRAME_WIDTH, self.FRAME_HEIGHT)) self.hats = [ cv2.imread('./hats/' + file, cv2.IMREAD_UNCHANGED) for file in os.listdir('./hats') ] self.stopped = False self.frame = None self.state = 0 self.time = 0 self.lastReady = 0 self.lastCapture = 0 self.history = np.zeros(self.HISTORY) def start(self): self.vs.start() # start the thread to process frames from the video stream t = Thread(target=self.update, args=()) t.daemon = True t.start() return self def stop(self): self.vs.stop() # indicate that the thread should be stopped self.stopped = True def update(self): # Keep looping infinitely until the thread is stopped while True: # If the thread indicator variable is set, stop the thread if self.stopped: return # Grab an image from the video stream frame = self.vs.read() # Wait until frames start to be available if frame is None: time.sleep(1) continue # Resize frame to fit working dimensions frame = cv2.resize(frame, (self.FRAME_WIDTH, self.FRAME_HEIGHT), 0, 0, cv2.INTER_CUBIC) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.equalizeHist(gray) fgmask = self.fgbg.apply(frame, learningRate=0 if self.state > 0 else -1) fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, self.kernel) bgmask = 255 - fgmask # State machine # S0: Training if self.state == 0: cv2.putText(img=fgmask, text='Training Segmentation Algorithm (' + str(int(100.0 * self.time / self.INIT_TIME)) + ' %)', org=(0, self.FRAME_HEIGHT - 5), fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=.7, color=(255, 255, 255), thickness=1) # Transition to S1 if self.time >= self.INIT_TIME: self.state = 1 # Show mask when training. self.frame = fgmask # S1: Ready to capture elif self.state == 1: # Detect and draw faces faces = self.face_cascade.detectMultiScale(gray, 1.3, 5) for (x, y, w, h) in faces: cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2) # Update history self.history[self.time % self.HISTORY] = min(len(faces), 1) self.frame = frame #self.frame = cv2.bitwise_and(self.background1, self.background1, mask=bgmask) + cv2.bitwise_and(frame, frame, mask=fgmask) cv2.putText(self.frame, text='Frame ' + str(self.time), org=(0, self.FRAME_HEIGHT - 5), fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=.7, color=(255, 255, 255), thickness=1) # Transition to S2 if faces present. if np.sum( self.history ) >= self.THRESHOLD and self.time > self.lastCapture + self.HISTORY: self.lastReady = self.time self.state = 2 # Transition to S0 if no faces and time has gone, reset all elif np.sum( self.history ) < self.THRESHOLD and self.time > 4 * self.INIT_TIME: self.time = 0 self.lastReady = 0 self.lastCapture = 0 self.state = 0 # S2: Cont down elif self.state == 2: self.frame = frame cv2.putText(self.frame, text=str(3 - int((self.time - self.lastReady) / 10)), org=(int(self.FRAME_WIDTH / 2), int(self.FRAME_HEIGHT / 2)), fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=3, color=(255, 255, 255), thickness=2) if self.time - self.lastReady >= 29: self.state = 3 # S3: Capture elif self.state == 3: #frame = cv2.bitwise_and(self.background1, self.background1, mask=bgmask) + cv2.bitwise_and(frame, frame, mask=fgmask) faces = self.face_cascade.detectMultiScale(gray, 1.3, 5) for (x, y, w, h) in faces: # Select a hat at fit it to face hat = random.choice(self.hats) x_factor = 1.45 y_offset = 0.15 hat = cv2.resize( hat, (int(w * x_factor), int(w * x_factor / hat.shape[1] * hat.shape[0])), 0, 0, cv2.INTER_CUBIC) # Get fitted hat width/hight, and crop to bounding box. h_h, h_w = hat.shape[:2] h_x1 = max(int(x - (h_w - w) / 2), 0) h_x2 = min(int(x + w + (h_w - w) / 2), self.FRAME_WIDTH) h_y1 = max(int(y + h * y_offset - h_h), 0) h_y2 = min(int(y + h * y_offset), self.FRAME_HEIGHT) # Recalulate hat width/hight if cropped by bounding box. h_w = h_x2 - h_x1 h_h = h_y2 - h_y1 # Blend hat to frame with alpha-channel. for c in range(0, 3): alpha = hat[-h_h:, :h_w, 3] / 255.0 color = hat[-h_h:, :h_w, c] * alpha beta = frame[h_y1:h_y2, h_x1:h_x2, c] * (1.0 - alpha) frame[h_y1:h_y2, h_x1:h_x2, c] = color + beta # Save the image to disk. filename = 'images/' + datetime.datetime.now().strftime( "%Y%m%d-%H%M%S") + '.png' print('Saving image: ', filename) cv2.imwrite(filename, frame) self.frame = frame time.sleep(10) # Transition to S1 self.lastCapture = self.time self.state = 1 self.time = self.time + 1 time.sleep(1 / 16) def get_frame(self): # Wait until frames start to be available while self.frame is None: time.sleep(0) # We are using Motion JPEG, but OpenCV defaults to capture raw images, # so we must encode it into JPEG in order to correctly display the # video stream. ret, jpeg = cv2.imencode('.jpg', self.frame) return jpeg.tobytes()
gallery = None kairos = Kairos(gallery) source = 0 # camera #Load a cascade file for detecting faces #xmlfile = '../XML/haarcascades/haarcascade_frontalface_alt.xml' xmlfile = "C:\\Prasad-IoT\\Code\\Python\\CV1-master\\XML\\haarcascades\\haarcascade_frontalface_alt.xml" if not os.path.isfile(xmlfile): print "Could not find cascade training set" raise SystemExit(1) face_cascade = cv2.CascadeClassifier(xmlfile) fileName = "detected_face.jpg" stream = VideoStream(src=source) stream.start() time.sleep(2.0) frame = stream.read() if frame is None: print "No camera !" raise SystemExit(1) print "press any key to save file; ESC to quit.." while (True): frame = stream.read() frame = my_imutils.resize(frame, 640) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.1, 5) if (len(faces) == 0): cv2.imshow("Face", frame) if (cv2.waitKey(20) & 0xff) == 27: break
class PeekabooController(threading.Thread): def __init__(self): threading.Thread.__init__(self) self.running = False self.soundCtrlr = SoundController() self.initVideo() self.whereIsEveryoneFlag = False self.iSeeSomeoneFlag = False self.failure = False self.record = False def initVideo(self): try: self.video = VideoStream(src=0) except: print("video stream not found") if(self.video is None): print("video stream was not initialized") return try: self.video.start() except: print("video failed to start") # construct the face detector and allow the camera to warm up try: face = "cascades/haarcascade_frontalface_default.xml" self.faceDetector = FaceDetector(face) sleep(0.1) except: print("face detector init failed") # choose xvid codec try: self.fourcc = cv2.VideoWriter_fourcc(*'XVID') except: print("video writer not found") sleep(0.1) #called by the thread def run(self): self._start() def toggleRecord(self): if(self.record == True): self.record = False print("video recording stopped") else: self.record = True print("video recording started") # start looking def _start(self): self.running = True zeros = None previousX = 0 direction = "NONE" self.writer = None (h, w) = (None, None) # run until the controller is stopped while (True): # capture frames from the camera if(self.running == True): frame = self.video.read() if(frame is None): print("ERROR: cannot read frame from video, stopping Peekaboo. If you want Peekaboo to work, connect camera and restart R2.py") self.failure = True self.stop() break # resize the frame and convert it to grayscale frame = self.resizeImage(frame, width=500) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # detect faces in the image and then clone the frame # so that we can draw on it faceRects = self.faceDetector.detect(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30)) frameClone = frame.copy() # where is everyone? if len(faceRects) <= 0: cv2.putText(frameClone, "WHERE IS EVERYONE?", (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 0), 2) self.whereIsEveryone() else: # peekaboo! # R2 is happy to see someone self.iSeeSomeone() # loop over the face bounding boxes and draw them for (fX, fY, fW, fH) in faceRects: cv2.rectangle(frameClone, (fX, fY), (fX + fW, fY + fH), (255, 0, 0), 2) # only turn head if face gets far out of center if ((previousX - 10) < fX < (previousX + 10)): direction = "NONE" elif fX < (previousX + 10): direction = "LEFT" elif fX > (previousX - 10): direction = "RIGHT" # turn R2's head to keep face centered # if direction == "LEFT": # self.mainCtrlr.rightThumbX(self.mainCntlr, self.mainCtrlr.xValueRight - 10) # elif direction == "RIGHT": # self.mainCtrlr.rightThumbX(self.mainCntlr, self.mainCtrlr.xValueRight + 10) cv2.putText(frameClone, "PEEKABOO!".format(direction), (fX, fY - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 0), 2) if direction != "NONE": cv2.putText(frameClone, "<Turn {}>".format(direction), (fX, fY - 0), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 0), 2) previousX = fX # show our detected faces, then clear the frame in preparation for the next frame # NOTE: comment this out if you don't want the video stream window to show in terminal cv2.imshow("Face", frameClone) # write video to file if self.record == True: if self.writer is None: # store the image dimensions, initialize the video writer, # and construct the zeros array (h, w) = frameClone.shape[:2] self.writer = cv2.VideoWriter("r2_recording.avi", self.fourcc, 4, (w, h), True) output = np.zeros((h, w, 3), dtype="uint8") output[0:h, 0:w] = frameClone self.writer.write(output) # NOTE: comment this out if you don't want the video stream window to show in terminal # if the 'q' key is pressed, stop the loop if cv2.waitKey(1) & 0xFF == ord("q"): print("keypress 'q', stopping Peekaboo") self.stop() break def resume(self): print("starting PeekabooController") if(self.failure == True): print("ERROR: the video had failed to load. If you want Peekaboo to work, you will need to connect the camera and restart R2.py") return self.running = True def stop(self): print("stopping PeekabooController") self.running = False if self.writer is not None: self.writer.release() def stopVideo(self): if(self.video is not None): self.video.stop() try: cv2.destroyAllWindows() except: print("") def whereIsEveryone(self): if(self.whereIsEveryoneFlag == False): SoundController.worried(self.soundCtrlr) self.whereIsEveryoneFlag = True self.iSeeSomeoneFlag = False def iSeeSomeone(self): if(self.iSeeSomeoneFlag == False): SoundController.whistle(self.soundCtrlr) self.whereIsEveryoneFlag = False self.iSeeSomeoneFlag = True def resizeImage(self, image, width=None, height=None, inter=cv2.INTER_AREA): # initialize the dimensions of the image to be resized and # grab the image size dim = None (h, w) = image.shape[:2] # if both the width and height are None, then return the # original image if width is None and height is None: return image # check to see if the width is None if width is None: # calculate the ratio of the height and construct the # dimensions r = height / float(h) dim = (int(w * r), height) # otherwise, the height is None else: # calculate the ratio of the width and construct the # dimensions r = width / float(w) dim = (width, int(h * r)) # resize the image resized = cv2.resize(image, dim, interpolation=inter) # return the resized image return resized
# -*- coding: utf-8 -*- ''' ### Example.1: 打开USB摄像头 ''' import cv2 from videostream import VideoStream from imutils.video import FPS camera_id = 0 vs = VideoStream(camera_id) vs.start() fps = FPS().start() while True: res, frame = vs.read() fps.update() if not(res): print("Camera Read Failed") break cv2.imshow("test", frame) waikey = cv2.waitKey(1) & 0xFF if waikey == ord('q'): break fps.stop() print("[INFO] elasped time: {:.2f}".format(fps.elapsed())) print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))