Пример #1
0
def blocking_video_test():
    # grab a pointer to the video stream and initialize the FPS counter
    print("[INFO] sampling frames from webcam...")
    stream = cv2.VideoCapture(SRC)
    fps = FPS().start()

    # loop over some frames
    while fps._numFrames < 1000:
        # grab the frame from the stream and resize it to have a maximum
        # width of 400 pixels
        (grabbed, frame) = stream.read()
        frame = imutils.resize(frame, width=VID_WIDTH)

        # check to see if the frame should be displayed to our screen
        if DISPLAY:
            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1) & 0xFF

        # update the FPS counter
        fps.update()

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # do a bit of cleanup
    stream.release()
    cv2.destroyAllWindows()
Пример #2
0
def from_stream():

    fps = FPS().start()
    cam = WebcamVideoStream().start()

    max_frames = 50
    i = 0

    while True:

        frame = cam.read()

        if i > max_frames:

            fps.stop()
            print(fps.elapsed())
            print(fps.fps())
            break

        i += 1

        testcone(frame, stream=True)
        fps.update()
        cv2.imshow('', frame)
        cv2.waitKey(1)
Пример #3
0
def threaded_video_test():
    # created a *threaded* video stream, allow the camera sensor to warmup,
    # and start the FPS counter
    print("[INFO] sampling THREADED frames from webcam...")
    vs = WebcamVideoStream(src=SRC).start()
    fps = FPS().start()

    # loop over some frames...this time using the threaded stream
    while fps._numFrames < NUM_FRAMES:
        # grab the frame from the threaded video stream and resize it
        # to have a maximum width of 400 pixels
        frame = vs.read()
        frame = imutils.resize(frame, width=VID_WIDTH)

        # check to see if the frame should be displayed to our screen
        if DISPLAY:
            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1) & 0xFF

        # update the FPS counter
        fps.update()

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
Пример #4
0
    def test01():
        """can we instantiate? """
        fps = FPS().start()
        pacer = Pacer(DESIRED_FPS).start()

        while fps.n_frames < N_TEST_FRAMES:
            print(datetime.datetime.now())
            fps.update()
            pacer.update()

        fps.stop()
        print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
        print("[INFO] n_frames: %i" % fps.n_frames)
Пример #5
0
        k = ord('h')
    elif cv2.getWindowProperty(window_title, 0) < 0:  # window closed
        break

    if k >= 0:
        k = chr(k).lower()
        if k == '+':
            tolerance += 1
            caption(["Tolerance: %s" % tolerance], 1)
        elif k == '-':
            tolerance -= 1
            caption(["Tolerance: %s" % tolerance], 1)
        elif k == 'c':  # capture
            countdown = int(3 * fps.fps())
        elif k == 'e':
            caption([fps.elapsed()])
        elif k == 'h':  # help
            for i, line in enumerate(__doc__.splitlines()):
                caption([line, 2, 55 + i * 14], 5)
        elif k == 'r':  # record raw video
            if cap.video:
                cap.record_stop()
            else:
                path = 'cam_' + time.strftime("%Y-%m-%d_%H.%M.%S") + '.avi'
                caption(["Recording to %s @ %s FPS" % (path, cap.fps)])
                cap.record(path)
        elif k == 'v':  # video
            if video:
                video.release()
                video = None
            else:
Пример #6
0
            #cv.circle(frame,(int(xe[n]),int(ye[n])),int(uncertainty), (255, 255, 0),1)
            cv.circle(frame, (int(xe[n]), int(ye[n])), 3, (0, 255, 255), -1)
        # Draw the predicted path
        for n in range(len(xp)):
            uncertaintyP = (xpu[n] + ypu[n]) / 2
            # Draw prediction (circles), with uncertainty as radius
            cv.circle(frame, (int(xp[n]), int(yp[n])), int(uncertaintyP),
                      (0, 0, 255))
            cv.circle(frame, (int(xp[n]), int(yp[n])), 3, (255, 255, 255), -1)

    ########## DISPLAY ##########
    # check to see if the frame should be displayed to our screen
    cv.imshow('Frame', frame)
    cv.imshow('Vision', mask)
    out.write(frame)

    # update the FPS counter
    streamfps.update()
    lastframe = frame_read

################### CLEARING UP ###################
# stop the timer and display information

streamfps.stop()
print("[INFO] elasped time: {:.2f}".format(streamfps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(streamfps.fps_tot()))
print("Measurements: " + str(listPoints))

out.release()
stream.release()
cv.destroyAllWindows()
Пример #7
0
    def run(self):
        #Activate Detector module
        self.detector = VideoInferencePage()

        # Create a VideoCapture object and read from input file
        # If the input is the camera, pass 0 instead of the video file name
        cap = cv2.VideoCapture(self.video_address)
        fps = None
        new_detected=[]
        # Check if camera opened successfully
        if (cap.isOpened()== False): 
            print("Error opening video stream or file")
         
        # Read until video is completed
        while(cap.isOpened() or shouldrun):
            # Capture frame-by-frame
            ret, frame = cap.read()
            global baseimageupd
            if ret == True:
                if not self.detector.isready():
                    continue
                if not fps:
                    fps = FPS().start()
                elif fps.elapsed()>60:
                    fps = FPS().start()


                if state=="take_off" and float(var_altitude) >= 10 and baseimageupd==False:
                    #print ("hahah2")
                    object_image = frame
                    baseimageupd = True
                    #cv2.imwrite("/media/ibrahim/Data/faster-rcnn/tools/img/baseimage.jpg",object_image)
                    image = self.image_resize(object_image, height=300)
                    retval, buffer = cv2.imencode('.png', image)
                    #print ("hahah22")
                    image_base64 = base64.b64encode(buffer)
                    self.newdetected.emit(image_base64)
                    #print ("hahah23")

                #feed the detector and wait for true result
                self.detector.send_frame(frame)
                result=self.detector.get_result()
                
                #Uncomment this if want to bypass the detector
                #result=cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

                if not isinstance(result, np.ndarray):
                    continue

                # Display the resulting frame
                convertToQtFormat = QtGui.QImage(result.data, result.shape[1], result.shape[0], QtGui.QImage.Format_RGB888)
                p = convertToQtFormat.scaled(1260, 720, QtCore.Qt.KeepAspectRatio)
                self.newimage.emit(p)
                
                #self.emit(QtCore.SIGNAL('newFPS(int)'), int(fps.fps()))

                passobject = self.detector.get_passingobject()
                #passobject = []
                if len(new_detected)<len(passobject):
                    for objectID in passobject.keys():
                        if not objectID in new_detected:
                            new_detected.append(objectID)
                            #image parsing to base64
                            #print (passobject[objectID]['image'])

                            try:
                                image = self.image_resize(passobject[objectID]['image'], height=300)
                                label = (passobject[objectID]['label'])
                                retval, buffer = cv2.imencode('.png', image)
                                image_base64 = base64.b64encode(buffer)
                                self.newinv.emit(image_base64, label)
                            except Exception as e:
                                print ("\n*************\nMissing Image\n***************\n")
                                continue

                            '''    
                            if passobject[objectID]['image'] != []:
                                image = self.image_resize(passobject[objectID]['image'], height=300)
                                label = (passobject[objectID]['label'])
                                retval, buffer = cv2.imencode('.png', image)
                                image_base64 = base64.b64encode(buffer)
                                self.newinv.emit(image_base64, label)
                            else:
                                print ("\n*************\nMissing Image\n***************\n")
                                continue
                            '''

                fps.update()
                self.new_fps.emit(int(fps.fps()))
                if self.detector.isobjectsupdated:
                    objects = self.detector.get_objects()

                    
                # Press Q on keyboard to  exit
                if not shouldrun:
                    fps.stop()
                    self.detector.exit_detection()
                    break
         
            # restart stream
            else: 
                print ("ret is false")
                if fps:
                    fps.stop()
                time.sleep(3)
                cap.release()
                cap = cv2.VideoCapture(self.video_address)
                if (cap.isOpened()== True) and fps: 
                    fps.start()
         
        # When everything done, release the video capture object
        cap.release()
         
        # Closes all the frames
        cv2.destroyAllWindows()
Пример #8
0
    # hence frame1 is very similar to frame2 and no difference is seen
    # thus a sleep is introduced to consume time between frame grabs
    # this can be replaced by any required computation
    time.sleep(0.02)

    # draw rectangle around each detected object
    for obj in objects:
        cv2.rectangle(display, (obj[0], obj[1]), (obj[2], obj[3]), (0, 255, 0),
                      1)

    # display result
    cv2.imshow('Video', display)

    # get 2nd frame after some time has passed
    ret2, frame2 = capture.read()

    # update FPS data
    fps.update()

    # press q to exit loop
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# stop recording FPS data and log final values
fps.stop()
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

# release video stream and close any opened cv2 windows
capture.release()
cv2.destroyAllWindows()
Пример #9
0
fps.start()

timeOut = 0



while frames < 300:
  #print "Main Thread Queue - > ",frame_queue.qsize() 
  with queue_lock:
    if not frame_queue.empty():
      timeOut = 0
      frame = frame_queue.get()
      sal = MPSaliency(frame)
      sal_map = sal.get_saliency_map()
      #sal_frame = (sal_map*255).round().astype(np.uint8)
      #frame = cv2.cvtColor(sal_frame, cv2.COLOR_GRAY2BGR)
      #out.write(frame)
      frames = frames + 1
      fps.update()
    else:
        pass
  

fps.stop()
stream.terminate()
cv2.destroyAllWindows()
#out.release()

print "FPS :: ", fps.fps()
print "DUR :: ", fps.elapsed()
Пример #10
0
	# grab the frame from the threaded video stream and resize it
	# to have a maximum width of 400 pixels
	frame = vs.read()
	frame = imutils.resize(frame, width=400)
	print(vs.readxOffset())
	if (vs.readxOffset() > 30):
		ser.write(" rt > ".encode("utf-8"))
	elif ((vs.readxOffset() < 30) and (vs.readxOffset() > -30)):
		ser.write(" st > ".encode("utf-8"))
	else:
		ser.write(" lt > ".encode("utf-8"))


 
	# check to see if the frame should be displayed to our screen
	if args["display"] > 0:
		cv2.imshow("Frame", frame)
		key = cv2.waitKey(1) & 0xFF
 
	# update the FPS counter
	fps.update()
 
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
 
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
Пример #11
0
def recognize_video(detector,
                    embedder: Embedder,
                    recognizer: Recognizer,
                    detector_params='default',
                    source=0):

    # инициализация видеопотока
    print('Starting video stream...')
    vs = VideoStream(src=source).start()

    if not is_detector(detector):
        raise TypeError('Incorrect type of detector')

    # разогрев камеры
    time.sleep(0.5)

    # запуск оценщика пропускной способности FPS
    fps = FPS().start()

    # цикл по фреймам из видео
    while True:

        frame = vs.read()

        if detector_params == 'default':
            faces_roi, boxes = detector.calc_image(frame, return_mode='both')

        elif type(detector) == DetectorSSD:
            confidence = detector_params[0]
            faces_roi, boxes = detector.calc_image(frame,
                                                   confidence=confidence,
                                                   return_mode='both')

        elif type(detector) == DetectorVJ or type(detector) == DetectorLBP:
            [scale_factor, min_neighbors] = detector_params
            faces_roi, boxes = detector.calc_image(frame,
                                                   scale_factor=scale_factor,
                                                   min_neighbors=min_neighbors,
                                                   return_mode='both')

        elif type(detector) == DetectorHOG or type(detector) == DetectorMMOD:
            upsampling_times = detector_params[0]
            faces_roi, boxes = detector.calc_image(
                frame, upsampling_times=upsampling_times, return_mode='both')

        for i in range(len(faces_roi)):

            embeddings = embedder.calc_face(faces_roi[i])
            name = recognizer.recognize(embeddings)
            start_x, start_y, end_x, end_y = boxes[i]

            text = '{}'.format(name)
            y = start_y - 10 if start_y - 10 > 10 else start_y + 10
            cv2.rectangle(frame, (start_x, start_y), (end_x, end_y),
                          (0, 0, 255), 2)
            cv2.putText(frame, text, (start_x, y), cv2.FONT_HERSHEY_SIMPLEX,
                        0.45, (0, 0, 255), 2)

        # обновление счетчика FPS
        fps.update()

        # показ выходного фрейма
        cv2.imshow('Frame', frame)
        key = cv2.waitKey(1) & 0xFF

        # завершение при нажатии 'q'
        if key == ord("q"):
            break

    fps.stop()
    print('Elasped time: {:.2f}'.format(fps.elapsed()))
    print('Approx. FPS: {:.2f}'.format(fps.fps()))

    cv2.destroyAllWindows()
    vs.stop()
Пример #12
0
class Interface(QWidget):
    def __init__(self, path, config):
        QWidget.__init__(self)

        self.path = path
        self.config = config

        self.setWindowTitle('AR4maps')
        self.move(0, 0)
        self.video_size = QSize(VIDEO.WIDTH, VIDEO.HEIGHT)
        self.setup_ui()

        self.markerImg = cv.imread(self.path + self.config['target'])
        # cv.imshow("target",targetImg)
        self._cam = Camera().start()
        self._track = Tracking(self.markerImg)
        self._rendering = Rendering(self.markerImg, self.config['coords'])
        self._fps = FPS()

        self.setup_render()

    def setup_ui(self):
        self.main_layout = QHBoxLayout()
        self.main_layout.setContentsMargins(0, 0, 0, 0)

        ### CENTER LAYOUT
        self.center_layout = QVBoxLayout()
        self.main_layout.addLayout(self.center_layout)

        # AR
        self.pixmap = QLabel()
        self.pixmap.setFixedSize(self.video_size)
        self.pixmap.mousePressEvent = self.click_pixmap
        self.center_layout.addWidget(self.pixmap)

        ## SOUTH LAYOUT
        self.south_layout = QVBoxLayout()
        self.south_layout.setContentsMargins(20, 10, 20, 20)
        self.center_layout.addLayout(self.south_layout)
        # Feature Description
        #   Title
        self.feature_title = QLabel('<br/>')
        self.feature_title.setFont(QFont('Helvetica', 18))
        self.south_layout.addWidget(self.feature_title)
        #   Description
        self.feature_description = QLabel('<br/><br/><br/><br/><br/>')
        self.feature_description.setWordWrap(True)
        self.south_layout.addWidget(self.feature_description)
        self.south_layout.addStretch()
        #   Buttons
        self.south_btns_layout = QHBoxLayout()
        self.south_layout.addLayout(self.south_btns_layout)
        self.feature_website_btn = QPushButton('Website')
        self.feature_website_btn.hide()
        self.south_btns_layout.addWidget(self.feature_website_btn)
        self.feature_photos_btn = QPushButton('Photos')
        self.feature_photos_btn.hide()
        self.south_btns_layout.addWidget(self.feature_photos_btn)
        self.feature_video_btn = QPushButton('Video')
        self.feature_video_btn.hide()
        self.south_btns_layout.addWidget(self.feature_video_btn)
        self.south_btns_layout.addStretch()

        ### EAST LAYOUT
        self.east_layout = QVBoxLayout()
        self.east_layout.setContentsMargins(0, 10, 20, 20)
        self.main_layout.addLayout(self.east_layout)
        # Logo
        self.logo = QSvgWidget(self.path + self.config['logo'])
        self.logo.setMinimumSize(252, 129)
        self.logo.setMaximumSize(252, 129)
        self.east_layout.addWidget(self.logo)
        # Buttons
        for layer in self.config['layers']:
            btn = QPushButton(layer['name'])
            btn.clicked.connect(lambda state, x=layer: self.load_layer(x))
            self.east_layout.addWidget(btn)
        # Layer Description
        sep = QFrame()
        sep.setFrameShape(QFrame.HLine)
        self.east_layout.addWidget(sep)
        self.layer_title = QLabel('Select a layer...')
        self.layer_title.setFont(QFont('Helvetica', 18))
        self.east_layout.addWidget(self.layer_title)
        self.layer_description = QLabel('')
        self.layer_description.setWordWrap(True)
        self.east_layout.addWidget(self.layer_description)
        # FPS
        self.east_layout.addStretch()
        self.fps_label = QLabel()
        self.fps_label.setAlignment(Qt.AlignRight)
        self.east_layout.addWidget(self.fps_label)

        self.setLayout(self.main_layout)

        self.web = QWebEngineView()
        self.web.resize(VIDEO.WIDTH, VIDEO.HEIGHT)
        self.web.move(0, 0)
        self.web.hide()

    def load_layer(self, layer):
        self.layer_title.setText(layer['name'])
        self.layer_description.setText(layer['description'])
        self.feature_title.setText('Select an item on the screen...')
        self.feature_description.setText('')
        self._rendering.setHighlighted(None)
        self.feature_website_btn.hide()
        self.feature_photos_btn.hide()
        self.feature_video_btn.hide()
        with open(self.path + layer['file']) as json_file:
            data = json.load(json_file)
            self._rendering.setGeoJSON(data['features'])

    def click_pixmap(self, event):
        pos = (event.x(), event.y())
        feature = self._rendering.getClickedFeature(pos)
        self.feature_website_btn.hide()
        self.feature_photos_btn.hide()
        self.feature_video_btn.hide()
        if feature is not None:
            props = feature['properties']
            self.feature_title.setText(props['title'] if 'title' in
                                       props else 'NO TITLE')
            self.feature_description.setText(
                props['description'] if 'description' in props else '')
            self._rendering.setHighlighted(feature['uuid'])
            if 'website' in props:
                self.feature_website_btn.show()
                try:
                    self.feature_website_btn.clicked.disconnect()
                except Exception:
                    pass
                self.feature_website_btn.clicked.connect(
                    lambda state, x=props['website']: webbrowser.open(x))
            if 'photos' in props:
                self.feature_photos_btn.show()
                try:
                    self.feature_photos_btn.clicked.disconnect()
                except Exception:
                    pass
                self.feature_photos_btn.clicked.connect(
                    lambda state, x=props['photos']: self.display_photos(x))
            if 'video' in props:
                self.feature_video_btn.show()
                try:
                    self.feature_video_btn.clicked.disconnect()
                except Exception:
                    pass
                self.feature_video_btn.clicked.connect(
                    lambda state, x=props['video']: self.display_video(x))
        else:
            self.feature_title.setText('')
            self.feature_description.setText('')
            self._rendering.setHighlighted(None)

    def display_photos(self, photos):
        photos = list(map(lambda x: self.path + x, photos))
        self.slideshow = SlideShow(photos)
        self.slideshow.show()

    def display_video(self, url):
        self.web.load(QUrl(url))
        self.web.show()

    def setup_render(self):
        self._fps.start()
        self.timer = QTimer()
        self.timer.timeout.connect(self.render)
        self.timer.start(1000 / VIDEO.FPS)

    def render(self):
        _, frameImg = self._cam.read()
        frameImg = cv.cvtColor(frameImg, cv.COLOR_BGR2RGB)
        H = self._track.update(frameImg)
        self._rendering.update(H, frameImg)
        if (H is not None):
            # self._rendering.drawBorder()
            self._rendering.renderGeoJSON()
            # self._rendering.renderObj()

        image = QImage(frameImg, frameImg.shape[1], frameImg.shape[0],
                       frameImg.strides[0], QImage.Format_RGB888)
        self.pixmap.setPixmap(QPixmap.fromImage(image))
        self.fps_label.setText("{:.2f} FPS".format(self._fps.update()))

    def closeEvent(self, event):
        self._cam.stop()
        self._fps.stop()
        print("\033[0;30;102m[INFO]\033[0m {:.2f} seconds".format(
            self._fps.elapsed()))
        print("\033[0;30;102m[INFO]\033[0m {:.2f} FPS".format(self._fps.fps()))
Пример #13
0
        #Converte para escala de cinza, aplica um filtro gaussiano e binariza
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        blur = cv2.GaussianBlur(gray, (args['gaussian'], args['gaussian']), 0)
        T, bin = cv2.threshold(blur, args['threshold'], 255, cv2.THRESH_BINARY)

        #Soma a quantidade de pontos brancos na imagem
        soma = bin.sum()
        #Calcula a porcentagem de pontos brancos na imagem
        area = bin.shape[0] * bin.shape[1] * 255
        percent = soma * 100 / area

        #Verifica se a porcentagem de branco esta maior que o limite
        if percent > args['limite']:

            write(bin, 'Plataforma Localizada', pos=(130, 40))

#Escreve na Imagem a porcentagem de branco e mostra as imagens
        write(bin, str(round(percent)))
        cv2.imshow('Gray', bin)
        cv2.imshow('Original', frame)
        fps.update()

        if cv2.waitKey(1) == ord('q'):

            fps.stop()
            camera.stop()
            print(fps.elapsed())
            print(fps.fps())
            cv2.destroyAllWindows()
            break
Пример #14
0
                                             crop=False)
            embedder.setInput(faceBlob)
            vec = embedder.forward()
            preds = recognizer.predict_proba(vec)[0]
            j = np.argmax(preds)
            proba = preds[j]
            name = le.classes_[j]
            text = "{}: {:.2f}%".format(name, proba * 100)
            y = startY - 10 if startY - 10 > 10 else startY + 10
            # cv2.rectangle(frame, (startX, startY), (endX, endY), (255, 0, 0), 2)
            # cv2.putText(frame, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (255, 0, 0), 2)

    fps.update()
    for (frameW, nameW) in zip(frames, ("Left Eye", "Right Eye")):
        cv2.imshow(nameW, frameW)
    key = cv2.waitKey(1) & 0xFF
    if key == 27:
        break

    if name == 'vladimir' and proba > 0.8 and block:
        Thread(target=voice, name='voice').start()
        block = False

fps.stop()
print("[INFO] пройденное время: {:.2f}".format(fps.elapsed()))
print("[INFO] приблизительно FPS: {:.2f}".format(fps.fps()))

cv2.destroyAllWindows()
left_eye.stop()
right_eye.stop()
Пример #15
0
    def run(self):
        #Activate Detector module
        self.detector = VideoInferencePage()

        # Create a VideoCapture object and read from input file
        # If the input is the camera, pass 0 instead of the video file name
        cap = cv2.VideoCapture(self.video_address)
        fps = None
        new_detected=[]
        # Check if camera opened successfully
        if (cap.isOpened()== False): 
            print("Error opening video stream or file")
         
        # Read until video is completed
        while(cap.isOpened() or shouldrun):
            # Capture frame-by-frame
            ret, frame = cap.read()
            if ret == True:
                if not self.detector.isready():
                    continue
                if not fps:
                    fps = FPS().start()
                elif fps.elapsed()>60:
                    fps = FPS().start()
                #feed the detector and wait for true result
                self.detector.send_frame(frame)
                result=self.detector.get_result()
                
                #Uncomment this if want to bypass the detector
                #result=cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

                if not isinstance(result, np.ndarray):
                    continue

                # Display the resulting frame
                convertToQtFormat = QtGui.QImage(result.data, result.shape[1], result.shape[0], QtGui.QImage.Format_RGB888)
                p = convertToQtFormat.scaled(1260, 720, QtCore.Qt.KeepAspectRatio)
                self.emit(QtCore.SIGNAL('newImage(QImage)'), p)
                fps.update()
                self.emit(QtCore.SIGNAL('newFPS(int)'), int(fps.fps()))

                passobject = self.detector.get_passingobject()
                passobject = []
                if len(new_detected)<len(passobject):
                    for objectID in passobject.keys():
                        if not objectID in new_detected:
                            new_detected.append(objectID)
                            #image parsing to base64
                            image = self.image_resize(passobject[objectID]['image'], height=300)
                            retval, buffer = cv2.imencode('.png', image)
                            image_base64 = base64.b64encode(buffer)
                            self.newdetected.emit(image_base64)

         
                # Press Q on keyboard to  exit
                if not shouldrun:
                    fps.stop()
                    self.detector.exit_detection()
                    break
         
            # restart stream
            else: 
                print "ret is false"
                if fps:
                    fps.stop()
                time.sleep(3)
                cap.release()
                cap = cv2.VideoCapture(self.video_address)
                if (cap.isOpened()== True) and fps: 
                    fps.start()
         
        # When everything done, release the video capture object
        cap.release()
         
        # Closes all the frames
        cv2.destroyAllWindows()
Пример #16
0
        os.path.join(dst_dir, filename), fourcc, 30.0,
        (int(config['stream']['width']), int(config['stream']['height'])))

    # Start streaming
    vs.start()
    fps = FPS()
    fps.start()

    # TODO: Apply object dectection and face recognition here
    while vs.size() or not vs.stopped:
        frame = vs.read()
        out.write(frame)
        cv2.imshow("Streaming", frame)

        key = cv2.waitKey(1) & 0xFF
        if key == ord('q'):
            vs.stop()

        fps.update()

    # Stop streaming
    fps.stop()
    vs.stop()

    # Release Resource
    out.release()
    cv2.destroyAllWindows()

    print("Elasped time: %s" % str(fps.elapsed()))
    print("Approximate FPS: %s" % str(fps.fps()))