Пример #1
0
class VideoPlayer:
    def __init__(self, source=0, dest=None):
        self._source = source
        self._dest = dest
        self._frame = None
        self._playing = False
        self._fps = FPS()

    def start(self):
        self._cap = cv2.VideoCapture(self._source)
        self._cap.set(3, 640)
        self._cap.set(4, 640)
        if self._dest is not None:
            width = int(self._cap.get(cv2.CAP_PROP_FRAME_WIDTH) + 0.5)
            height = int(self._cap.get(cv2.CAP_PROP_FRAME_HEIGHT) + 0.5)
            fps = int(self._cap.get(cv2.CAP_PROP_FPS))
            fourcc = cv2.VideoWriter_fourcc(*'mp4v')
            self._out = cv2.VideoWriter(self._dest, fourcc, fps,
                                        (width, height))
        self._playing = True
        self._fps.start()
        while self._playing:
            self.read_frame()
            self.process_frame()
            self.write_frame()
        self._fps.stop()
        print(self._fps.fps())

    def stop(self):
        self._playing = False
        self._cap.release()
        self._out.release()
        cv2.destroyAllWindows()

    def read_frame(self):
        ret, frame = self._cap.read()
        self._frame = frame
        self._fps.update()

    def process_frame(self):
        pass

    def write_frame(self):
        self.show_frame()
        if self._dest is not None:
            self.save_frame()

    def show_frame(self):
        cv2.imshow('Video', self._frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            self.stop()

    def save_frame(self):
        self._out.write(self._frame)
Пример #2
0
class VideoStreamer(object):
    def __init__(self):
        self.fps = FPS().start()

    def get_fps(self):
        self.fps.stop()
        fps = self.fps.fps()
        self.fps.start()
        return fps

    def get_frame(self):
        raise NotImplementedError("Choose a video streamer from the available ones "
                                  "e.g., CV2VideoStreamer or ROSVideoStreamer")
Пример #3
0
class WebcamVideoStream:
    def __init__(self, src=0, resolution=(320, 240), framerate=32):
        # initialize the video camera stream and read the first frame
        # from the stream
        self.stream = cv2.VideoCapture(src)
        self.fps = FPS()
        if self.stream and self.stream.isOpened():
            self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, resolution[0])
            self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, resolution[1])
            self.stream.set(cv2.CAP_PROP_FPS, framerate)
        (self.grabbed, self.frame) = self.stream.read()

        # initialize the variable used to indicate if the thread should
        # be stopped
        self.stopped = False

    def start(self):
        # start the thread to read frames from the video stream
        t = Thread(target=self.update, args=())
        t.daemon = True
        t.start()
        return self

    def update(self):
        # keep looping infinitely until the thread is stopped
        self.fps.start()
        while True:
            # if the thread indicator variable is set, stop the thread
            if self.stopped:
                return

            # otherwise, read the next frame from the stream
            (self.grabbed, self.frame) = self.stream.read()
            self.fps.update()

    def read(self):
        # return the frame most recently read
        return self.frame

    def stop(self):
        # indicate that the thread should be stopped
        self.fps.stop()
        self.stopped = True
Пример #4
0
# DEBUG values
SET_GUI = False  # Do or do not show GUI
DEBUG_MOTORSPEED = False  # Do or do not write motor speed commands on console
DEBUG_TIMING = False  # Do or do not write how much time each processing step takes on console
DEBUG_CIRCLEPOS = True  # Do or do not write detected circle position on console

# Initialize the motor object
motor = mw.MyMotor("/dev/ttyACM0", 115200)
motor.pwm = 50

# initialize the camera
width = 320
height = 240
camera = PiVideoStream((width, height), 30).start()
counter = FPS()
counter.start()
# allow the camera to warmup capture frames from the camera
time.sleep(0.5)

# detection variables
posX = None  # X position
posX_prev = 0  # X position in the previous iteration
posY = None  # Y position
posX_exp_filter_coeff = 0.8  # The amount of how much the current measurement changes the position. [0,1]. current = alpha * measurement + (1-alpha) * previous
radius = None  # Circle radius
radius_prev = 0  # Previous circle radius
rad_exp_filter_coeff = 0.8  # The amount of how much the current measurement changes the radius. [0,1]. current = alpha * measurement + (1-alpha) * previous
speed = 0  # Speed to send to the motor controller
angleCorr = 0  # The difference between the two tracks so the robot turns
roi = None  # Part of the image where we expect to find the ball
Пример #5
0
fps = FPS(60)
cap = Camera(mirror=True).start()
#TODO: bg1 = cv2.BackgroundSubtractorMOG2(history=3, nmixtures=5, backgroundRatio=0.0001)
w = cap.width or 640
h = cap.height or 480
ball_color = random_color()
r = 20
x = w // 2 - r // 2
y = h // 2 - r // 2
dx = dy = w // fps.limit // 2  # Ball crosses screen in about 2 seconds.

tolerance = 34  # Bounce at this amount of color change.
window_title = 'Video Pong'
countdown = old_frame_count = 0
img = new_img = diff = video = None
fps.start()
while True:
    old_img = new_img  # Use raw feed without overlay.
    ok, new_img = cap.read()
    if new_img is not None:
        img = new_img.copy()  # No motion blur.
        #fg1 = bg1.apply(img)
        #cv2.imshow('BG', fg1)
        #cv2.BackgroundSubtractor.apply(img)
    else:
        img = new_img = np.zeros((480, 640, 3), np.uint8)

    fps.update()

    if old_img is not None and img is not None:
        diff = cv2.absdiff(old_img, img) > tolerance
Пример #6
0
    def run(self):
        #Activate Detector module
        self.detector = VideoInferencePage()

        # Create a VideoCapture object and read from input file
        # If the input is the camera, pass 0 instead of the video file name
        cap = cv2.VideoCapture(self.video_address)
        fps = None
        new_detected=[]
        # Check if camera opened successfully
        if (cap.isOpened()== False): 
            print("Error opening video stream or file")
         
        # Read until video is completed
        while(cap.isOpened() or shouldrun):
            # Capture frame-by-frame
            ret, frame = cap.read()
            global baseimageupd
            if ret == True:
                if not self.detector.isready():
                    continue
                if not fps:
                    fps = FPS().start()
                elif fps.elapsed()>60:
                    fps = FPS().start()


                if state=="take_off" and float(var_altitude) >= 10 and baseimageupd==False:
                    #print ("hahah2")
                    object_image = frame
                    baseimageupd = True
                    #cv2.imwrite("/media/ibrahim/Data/faster-rcnn/tools/img/baseimage.jpg",object_image)
                    image = self.image_resize(object_image, height=300)
                    retval, buffer = cv2.imencode('.png', image)
                    #print ("hahah22")
                    image_base64 = base64.b64encode(buffer)
                    self.newdetected.emit(image_base64)
                    #print ("hahah23")

                #feed the detector and wait for true result
                self.detector.send_frame(frame)
                result=self.detector.get_result()
                
                #Uncomment this if want to bypass the detector
                #result=cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

                if not isinstance(result, np.ndarray):
                    continue

                # Display the resulting frame
                convertToQtFormat = QtGui.QImage(result.data, result.shape[1], result.shape[0], QtGui.QImage.Format_RGB888)
                p = convertToQtFormat.scaled(1260, 720, QtCore.Qt.KeepAspectRatio)
                self.newimage.emit(p)
                
                #self.emit(QtCore.SIGNAL('newFPS(int)'), int(fps.fps()))

                passobject = self.detector.get_passingobject()
                #passobject = []
                if len(new_detected)<len(passobject):
                    for objectID in passobject.keys():
                        if not objectID in new_detected:
                            new_detected.append(objectID)
                            #image parsing to base64
                            #print (passobject[objectID]['image'])

                            try:
                                image = self.image_resize(passobject[objectID]['image'], height=300)
                                label = (passobject[objectID]['label'])
                                retval, buffer = cv2.imencode('.png', image)
                                image_base64 = base64.b64encode(buffer)
                                self.newinv.emit(image_base64, label)
                            except Exception as e:
                                print ("\n*************\nMissing Image\n***************\n")
                                continue

                            '''    
                            if passobject[objectID]['image'] != []:
                                image = self.image_resize(passobject[objectID]['image'], height=300)
                                label = (passobject[objectID]['label'])
                                retval, buffer = cv2.imencode('.png', image)
                                image_base64 = base64.b64encode(buffer)
                                self.newinv.emit(image_base64, label)
                            else:
                                print ("\n*************\nMissing Image\n***************\n")
                                continue
                            '''

                fps.update()
                self.new_fps.emit(int(fps.fps()))
                if self.detector.isobjectsupdated:
                    objects = self.detector.get_objects()

                    
                # Press Q on keyboard to  exit
                if not shouldrun:
                    fps.stop()
                    self.detector.exit_detection()
                    break
         
            # restart stream
            else: 
                print ("ret is false")
                if fps:
                    fps.stop()
                time.sleep(3)
                cap.release()
                cap = cv2.VideoCapture(self.video_address)
                if (cap.isOpened()== True) and fps: 
                    fps.start()
         
        # When everything done, release the video capture object
        cap.release()
         
        # Closes all the frames
        cv2.destroyAllWindows()
Пример #7
0
          #print "waiting for frame queue"
  print "import timeout"


queue_lock = Lock()
frame_queue = Queue()

stream = Process(target=import_image, args=(frame_queue, queue_lock))
time.sleep(1)
stream.start()

frames = 0

# loop over some frames
fps = FPS()
fps.start()

timeOut = 0



while frames < 300:
  #print "Main Thread Queue - > ",frame_queue.qsize() 
  with queue_lock:
    if not frame_queue.empty():
      timeOut = 0
      frame = frame_queue.get()
      sal = MPSaliency(frame)
      sal_map = sal.get_saliency_map()
      #sal_frame = (sal_map*255).round().astype(np.uint8)
      #frame = cv2.cvtColor(sal_frame, cv2.COLOR_GRAY2BGR)
Пример #8
0
class VideoCapture(object):
    def __init__(self,
                 videoPath="",
                 verbose=True,
                 displayW=1920,
                 displayH=1080,
                 fontScale=1.0,
                 inference=True,
                 confidenceLevel=0.5):

        self.verbose = verbose
        self._debug = False

        self.videoPath = videoPath
        self._videoSourceType = CaptureDevice.Unknown
        self._videoSourceState = CaptureDeviceState.Unknown
        self.videoStream = None

        self._videoReadyEvent = Event()

        self._capture_in_progress = False

        # Display Resolution
        # Will try to set camera's resolution to the specified resolution
        self._displayW = displayW
        self._displayH = displayH

        self._cameraW = 0
        self._cameraH = 0

        # Camera's FPS
        self._cameraFPS = 30

        # Font Scale for putText
        self._fontScale = float(fontScale)

        # turn inference on/off
        self.runInference = inference

        # confidence level threshold
        self.confidenceLevel = confidenceLevel

        # various frame data

        # frame data for UI
        self._displayFrame = None

        # wallpapers for UI
        self._frame_wp_init_system = cv2.imread(
            "./www/WP-InitializingSystem.png")
        self._frame_wp_no_video = cv2.imread("./www/WP-NoVideoData.png")
        self._frame_wp_init_iothub = cv2.imread(
            "./www/WP-InitializeIotHub.png")

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name + '()')

        logging.info(
            '===============================================================')
        logging.info(
            'Initializing Video Capture with the following parameters:')
        logging.info('   - OpenCV Version     : {}'.format(cv2.__version__))
        logging.info('   - Video path         : {}'.format(self.videoPath))
        logging.info('   - Display Resolution : {} x {}'.format(
            self._displayW, self._displayH))
        logging.info('   - Font Scale         : {}'.format(self._fontScale))
        logging.info('   - Inference?         : {}'.format(self.runInference))
        logging.info('   - ConfidenceLevel    : {}'.format(
            self.confidenceLevel))
        logging.info(
            '===============================================================')

        # set wallpaper
        self.set_Wallpaper(self._frame_wp_init_system)

        # set FPS
        self.fps = FPS()

        self.imageStreamHandler = None

        # Start Web Server for View
        self.imageServer = ImageServer(80, self)
        self.imageServer.start()

        # Set Video Source
        self.set_Video_Source(self.videoPath)

        self.set_Wallpaper(cv2.imread("./www/WP-InitializeAIEngine.png"))
        # logging.info('Yolo Inference Initializing\r\n')
        self.yoloInference = YoloInference(self._fontScale, sendMessage=False)
        # logging.info('Yolo Inference Initialized\r\n')

    def __enter__(self):

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name + '()')

        # self.set_Video_Source(self.videoPath)

        return self

    def videoStreamReadTimeoutHandler(self, signum, frame):
        raise Exception("VideoStream Read Timeout")

    #
    # Video Source Management
    #
    def _set_Video_Source_Type(self, videoPath):

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name +
                         '() : {}'.format(videoPath))

        self._reset_Video_Source()

        if '/dev/video' in videoPath.lower():
            self._videoSourceType = CaptureDevice.Webcam

        elif 'rtsp:' in videoPath.lower():
            self._videoSourceType = CaptureDevice.Rtsp

        elif '/api/holographic/stream' in videoPath.lower():
            self._videoSourceType = CaptureDevice.Hololens

        if self.verbose:
            logging.info('<< ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name +
                         '() : {}'.format(self._videoSourceType))

    def _get_Video_Source_Type(self, videoPath):

        videoType = CaptureDevice.Unknown

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name +
                         '() : {}'.format(videoPath))

        if '/dev/video' in videoPath.lower():
            videoType = CaptureDevice.Webcam

        elif 'rtsp:' in videoPath.lower():
            videoType = CaptureDevice.Rtsp

        elif '/api/holographic/stream' in videoPath.lower():
            videoType = CaptureDevice.Hololens

        return videoType

    #
    # Resets video capture/stream settings
    #
    def _reset_Video_Source(self):

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name + '()')

        if self.videoStream:
            self.videoStream.stop()
        #    self.videoStream.close()
        #     self.videoStream = None

        self._videoSourceType = CaptureDevice.Unknown
        self._videoSourceState = CaptureDeviceState.Unknown

    def set_Video_Source(self, newVideoPath):

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name + '()')

        retVal = False
        realVideoPath = newVideoPath

        if self.videoPath == newVideoPath and self._videoSourceState == CaptureDeviceState.Running:
            return True

        if self.imageStreamHandler != None:
            statusMsg = '{{\"DeviceStatus\":\"Connecting to {}\",\"isSuccess\":{}}}'.format(
                self._remove_credential(newVideoPath), 1)
            self.imageStreamHandler.submit_write(statusMsg)

        self._videoSourceState = CaptureDeviceState.Stop

        if self._capture_in_progress:
            # wait for queue to drain and loop to exit
            time.sleep(1.0)

        self._capture_in_progress = False

        self._set_Video_Source_Type(realVideoPath)

        if self._videoSourceType == CaptureDevice.Unknown:
            self._videoSourceState = CaptureDeviceState.ErrorNotSupported
            logging.error('>> ' + self.__class__.__name__ + "." +
                          sys._getframe().f_code.co_name +
                          '() : Unsupported Video Source {}'.format(
                              self._videoSourceType))
        else:
            self._videoSourceState = CaptureDeviceState.Init

            if self._videoSourceType == CaptureDevice.Hololens:
                strHololens = realVideoPath.split('?')
                # disable audio
                realVideoPath = '{}?holo=true&pv=true&mic=false&loopback=false'.format(
                    strHololens[0])

            self.videoStream = VideoStream(videoCapture=self,
                                           path=realVideoPath)

            fps_override = 30

            if not self.videoStream.videoCapture == None:

                # get resolution
                cameraH1 = int(
                    self.videoStream.videoCapture.get(
                        cv2.CAP_PROP_FRAME_HEIGHT))
                cameraW1 = int(
                    self.videoStream.videoCapture.get(
                        cv2.CAP_PROP_FRAME_WIDTH))
                cameraFPS1 = int(
                    self.videoStream.videoCapture.get(cv2.CAP_PROP_FPS))

                if self._videoSourceType == CaptureDevice.Webcam:

                    if not cameraH1 == self._displayH:
                        self.videoStream.videoCapture.set(
                            cv2.CAP_PROP_FRAME_HEIGHT, self._displayH)
                    if not cameraW1 == self._displayW:
                        self.videoStream.videoCapture.set(
                            cv2.CAP_PROP_FRAME_WIDTH, self._displayW)

                elif self._videoSourceType == CaptureDevice.Rtsp:

                    if not cameraH1 == self._displayH:
                        self.videoStream.videoCapture.set(
                            cv2.CAP_PROP_FRAME_HEIGHT, self._displayH)
                    if not cameraW1 == self._displayW:
                        self.videoStream.videoCapture.set(
                            cv2.CAP_PROP_FRAME_WIDTH, self._displayW)

                elif self._videoSourceType == CaptureDevice.Hololens:

                    holo_w = 1280
                    holo_h = 720

                    if 'live_med.mp4' in realVideoPath:
                        holo_w = 854
                        holo_h = 480
                    elif 'live_low.mp4' in realVideoPath:
                        holo_w = 428
                        holo_h = 240
                        fps_override = 15

                    self.videoStream.videoCapture.set(
                        cv2.CAP_PROP_FRAME_HEIGHT, holo_h)
                    self.videoStream.videoCapture.set(cv2.CAP_PROP_FRAME_WIDTH,
                                                      holo_w)

                self.videoStream.videoCapture.set(cv2.CAP_PROP_FPS,
                                                  fps_override)

                self._cameraH = int(
                    self.videoStream.videoCapture.get(
                        cv2.CAP_PROP_FRAME_HEIGHT))
                self._cameraW = int(
                    self.videoStream.videoCapture.get(
                        cv2.CAP_PROP_FRAME_WIDTH))
                self._cameraFPS = int(
                    self.videoStream.videoCapture.get(cv2.CAP_PROP_FPS))

                logging.info(
                    '==============================================================='
                )
                logging.info(
                    'Setting Video Capture with the following parameters:')
                logging.info('   - Video Source Type  : {}'.format(
                    self._videoSourceType))
                logging.info('   - Display Resolution : {} x {}'.format(
                    self._displayW, self._displayH))
                logging.info('   Original             : {} x {} @ {}'.format(
                    cameraW1, cameraH1, cameraFPS1))
                logging.info('   New                  : {} x {} @ {}'.format(
                    self._cameraW, self._cameraH, self._cameraFPS))
                logging.info(
                    '==============================================================='
                )

                if self.videoStream.start():
                    self._videoSourceState = CaptureDeviceState.Running
                    retVal = True
                else:
                    self._videoSourceState = CaptureDeviceState.ErrorRead
            else:

                if self._videoSourceType == CaptureDevice.Hololens or self._videoSourceType == CaptureDevice.Rtsp:
                    url_parsed = urlparse(realVideoPath)

                    if url_parsed.password != None or url_parsed.username != None:
                        url_parsed = url_parsed._replace(
                            netloc="{}".format(url_parsed.hostname))

                    ipAddress = url_parsed.netloc

                    ping_ret = subprocess.call(
                        ['ping', '-c', '5', '-W', '3', ipAddress],
                        stdout=open(os.devnull, 'w'),
                        stderr=open(os.devnull, 'w'))

                    if ping_ret == 0:
                        self._videoSourceState = CaptureDeviceState.ErrorOpen
                    else:
                        self._videoSourceState = CaptureDeviceState.ErrorNoNetwork

                logging.error('>> ' + self.__class__.__name__ + "." +
                              sys._getframe().f_code.co_name +
                              '() : Failed to open Video Capture')

        self.videoPath = realVideoPath

        if retVal == False:
            self.set_Wallpaper(self._frame_wp_no_video)
        else:
            self._videoReadyEvent.set()

        self.sendCurrentVideoPath(realVideoPath)

        return retVal

    def get_display_frame(self):
        return self.displayFrame

    def set_status(self, device_status):
        self._videoSourceState = device_status

        if self._videoSourceState != CaptureDeviceState.Running:
            self.sendCurrentVideoPath("")

    def sendCurrentVideoPath(self, videoPath):

        if videoPath == "":
            video_path = self._remove_credential(self.videoPath)
        else:
            video_path = self._remove_credential(videoPath)

        logging.info('>> Current Video Status {}'.format(
            self._videoSourceState))

        if self.imageStreamHandler != None:
            if self._videoSourceState == CaptureDeviceState.Running:
                strUserName = ""
                strPassword = ""

                videoType = self._get_Video_Source_Type(videoPath)

                if videoType == CaptureDevice.Rtsp or videoType == CaptureDevice.Hololens:
                    url_parsed = urlparse(videoPath)

                    if url_parsed.password != None:
                        strPassword = url_parsed.password
                    if url_parsed.username != None:
                        strUserName = url_parsed.username

                statusMsg = '{{\"DevicePath\":\"{}\",\"isSuccess\":{},\"UserName\":\"{}\",\"Password\":\"{}\"}}'.format(
                    video_path, 1, strUserName, strPassword)
            else:
                statusMsg = '{{\"DeviceStatus\":\"Error ({}): {}\",\"isSuccess\":{},\"UserName\":\"\",\"Password\":\"\"}}'.format(
                    self._videoSourceState, video_path, 0)
            self.imageStreamHandler.submit_write(statusMsg)

    def setVideoPathFromUI(self, json_Data):

        videoPath = ""
        json_Data = json.loads(json_Data)
        logging.info('>> ' + self.__class__.__name__ + "." +
                     sys._getframe().f_code.co_name +
                     '() : {}'.format(json_Data["VideoPath"]))
        logging.info('>> {}'.format(json_Data["VideoPath"]))
        logging.info('>> {}'.format(json_Data["UserName"]))
        logging.info('>> {}'.format(json_Data["Password"]))

        videoType = self._get_Video_Source_Type(json_Data["VideoPath"])

        if videoType == CaptureDevice.Webcam:
            videoPath = json_Data["VideoPath"].strip()
        elif videoType == CaptureDevice.Rtsp or videoType == CaptureDevice.Hololens:
            url_parsed = urlparse(json_Data["VideoPath"].strip())

            if '@' in url_parsed.netloc or len(json_Data["UserName"]) == 0:
                # already contains password or user name not specified
                videoPath = json_Data["VideoPath"]
            else:
                url_parsed = url_parsed._replace(netloc='{}:{}@{}'.format(
                    json_Data["UserName"], json_Data["Password"],
                    url_parsed.netloc))
                videoPath = url_parsed.geturl()

        self.set_Video_Source(videoPath)

    def _remove_credential(self, videoPath):

        logging.info('>> ' + self.__class__.__name__ + "." +
                     sys._getframe().f_code.co_name + '()')

        ret_Path = ""
        videoType = self._get_Video_Source_Type(videoPath)

        if videoType == CaptureDevice.Webcam:
            ret_Path = videoPath
        elif videoType == CaptureDevice.Rtsp or videoType == CaptureDevice.Hololens:

            url_parsed = urlparse(videoPath)

            if url_parsed.password != None or url_parsed.username != None:
                url_parsed = url_parsed._replace(
                    netloc="{}".format(url_parsed.hostname))

            ret_Path = url_parsed.geturl()

        return ret_Path

    def set_Wallpaper(self, image):

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name + '()')

        self.displayFrame = cv2.imencode('.jpg', image)[1].tobytes()

    def start(self):

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name + '()')

        while True:
            if self._videoSourceState == CaptureDeviceState.Running:
                self._capture_in_progress = True
                self.__Run__()
                self._capture_in_progress = False
            else:

                if self._videoSourceState == CaptureDeviceState.ErrorOpen or self._videoSourceState == CaptureDeviceState.ErrorRead:
                    self.set_Wallpaper(self._frame_wp_no_video)

                if self._videoSourceType == CaptureDevice.Unknown:
                    if self._debug:
                        logging.info('>> ' + self.__class__.__name__ + "." +
                                     sys._getframe().f_code.co_name +
                                     '() : Unknown Device')
                    time.sleep(1.0)
                else:
                    if self._debug:
                        logging.info('>> ' + self.__class__.__name__ + "." +
                                     sys._getframe().f_code.co_name +
                                     '() : Device Not Running')
                    # time.sleep(1.0)
                    logging.info('>> Video Ready Event Enter ---------------')
                    self._videoReadyEvent.wait()
                    logging.info('<< Video Ready Event Exit  ---------------')
                    self._videoReadyEvent.clear()

    def __Run__(self):

        if self.verbose:
            logging.info(
                '==============================================================='
            )
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name + '()')

        # Check camera's FPS
        if self._cameraFPS == 0:
            logging.error('Error : Could not read FPS')
            # raise Exception("Unable to acquire FPS for Video Source")
            return

        logging.info('>> Frame rate (FPS)     : {}'.format(self._cameraFPS))
        logging.info('>> Run Inference {}'.format(self.runInference))

        perFrameTimeInMs = 1000 / self._cameraFPS

        self.fps.start()
        self.fps.reset()

        while True:

            # Get current time before we capture a frame
            tFrameStart = time.time()
            frame = np.array([])
            captureRet = False

            if not self._videoSourceState == CaptureDeviceState.Running:
                break

            captureRet, frame = self.videoStream.read()

            if captureRet == False:
                self._videoSourceState = CaptureDeviceState.ErrorRead
                logging.error("ERROR : Failed to read from video source")
                break

            if frame.size > 0:

                # Run Object Detection
                if self.runInference:
                    self.yoloInference.runInference(frame, self._cameraW,
                                                    self._cameraH,
                                                    self.confidenceLevel)

                # Calculate FPS
                currentFPS = self.fps.fps()

                if (currentFPS > self._cameraFPS):
                    # Cannot go faster than Camera's FPS
                    currentFPS = self._cameraFPS

                # Add FPS Text to the frame
                cv2.putText(frame, "FPS " + str(currentFPS),
                            (10, int(30 * self._fontScale)),
                            cv2.FONT_HERSHEY_SIMPLEX, self._fontScale,
                            (0, 0, 255), 2)

                self.displayFrame = cv2.imencode('.jpg', frame)[1].tobytes()

            timeElapsedInMs = (time.time() - tFrameStart) * 1000

            if perFrameTimeInMs > timeElapsedInMs:
                # This is faster than image source (e.g. camera) can feed.
                waitTimeBetweenFrames = perFrameTimeInMs - timeElapsedInMs
                time.sleep(waitTimeBetweenFrames / 1000.0)

    def __exit__(self, exception_type, exception_value, traceback):

        self.imageServer.close()
        cv2.destroyAllWindows()
Пример #9
0
class Interface(QWidget):
    def __init__(self, path, config):
        QWidget.__init__(self)

        self.path = path
        self.config = config

        self.setWindowTitle('AR4maps')
        self.move(0, 0)
        self.video_size = QSize(VIDEO.WIDTH, VIDEO.HEIGHT)
        self.setup_ui()

        self.markerImg = cv.imread(self.path + self.config['target'])
        # cv.imshow("target",targetImg)
        self._cam = Camera().start()
        self._track = Tracking(self.markerImg)
        self._rendering = Rendering(self.markerImg, self.config['coords'])
        self._fps = FPS()

        self.setup_render()

    def setup_ui(self):
        self.main_layout = QHBoxLayout()
        self.main_layout.setContentsMargins(0, 0, 0, 0)

        ### CENTER LAYOUT
        self.center_layout = QVBoxLayout()
        self.main_layout.addLayout(self.center_layout)

        # AR
        self.pixmap = QLabel()
        self.pixmap.setFixedSize(self.video_size)
        self.pixmap.mousePressEvent = self.click_pixmap
        self.center_layout.addWidget(self.pixmap)

        ## SOUTH LAYOUT
        self.south_layout = QVBoxLayout()
        self.south_layout.setContentsMargins(20, 10, 20, 20)
        self.center_layout.addLayout(self.south_layout)
        # Feature Description
        #   Title
        self.feature_title = QLabel('<br/>')
        self.feature_title.setFont(QFont('Helvetica', 18))
        self.south_layout.addWidget(self.feature_title)
        #   Description
        self.feature_description = QLabel('<br/><br/><br/><br/><br/>')
        self.feature_description.setWordWrap(True)
        self.south_layout.addWidget(self.feature_description)
        self.south_layout.addStretch()
        #   Buttons
        self.south_btns_layout = QHBoxLayout()
        self.south_layout.addLayout(self.south_btns_layout)
        self.feature_website_btn = QPushButton('Website')
        self.feature_website_btn.hide()
        self.south_btns_layout.addWidget(self.feature_website_btn)
        self.feature_photos_btn = QPushButton('Photos')
        self.feature_photos_btn.hide()
        self.south_btns_layout.addWidget(self.feature_photos_btn)
        self.feature_video_btn = QPushButton('Video')
        self.feature_video_btn.hide()
        self.south_btns_layout.addWidget(self.feature_video_btn)
        self.south_btns_layout.addStretch()

        ### EAST LAYOUT
        self.east_layout = QVBoxLayout()
        self.east_layout.setContentsMargins(0, 10, 20, 20)
        self.main_layout.addLayout(self.east_layout)
        # Logo
        self.logo = QSvgWidget(self.path + self.config['logo'])
        self.logo.setMinimumSize(252, 129)
        self.logo.setMaximumSize(252, 129)
        self.east_layout.addWidget(self.logo)
        # Buttons
        for layer in self.config['layers']:
            btn = QPushButton(layer['name'])
            btn.clicked.connect(lambda state, x=layer: self.load_layer(x))
            self.east_layout.addWidget(btn)
        # Layer Description
        sep = QFrame()
        sep.setFrameShape(QFrame.HLine)
        self.east_layout.addWidget(sep)
        self.layer_title = QLabel('Select a layer...')
        self.layer_title.setFont(QFont('Helvetica', 18))
        self.east_layout.addWidget(self.layer_title)
        self.layer_description = QLabel('')
        self.layer_description.setWordWrap(True)
        self.east_layout.addWidget(self.layer_description)
        # FPS
        self.east_layout.addStretch()
        self.fps_label = QLabel()
        self.fps_label.setAlignment(Qt.AlignRight)
        self.east_layout.addWidget(self.fps_label)

        self.setLayout(self.main_layout)

        self.web = QWebEngineView()
        self.web.resize(VIDEO.WIDTH, VIDEO.HEIGHT)
        self.web.move(0, 0)
        self.web.hide()

    def load_layer(self, layer):
        self.layer_title.setText(layer['name'])
        self.layer_description.setText(layer['description'])
        self.feature_title.setText('Select an item on the screen...')
        self.feature_description.setText('')
        self._rendering.setHighlighted(None)
        self.feature_website_btn.hide()
        self.feature_photos_btn.hide()
        self.feature_video_btn.hide()
        with open(self.path + layer['file']) as json_file:
            data = json.load(json_file)
            self._rendering.setGeoJSON(data['features'])

    def click_pixmap(self, event):
        pos = (event.x(), event.y())
        feature = self._rendering.getClickedFeature(pos)
        self.feature_website_btn.hide()
        self.feature_photos_btn.hide()
        self.feature_video_btn.hide()
        if feature is not None:
            props = feature['properties']
            self.feature_title.setText(props['title'] if 'title' in
                                       props else 'NO TITLE')
            self.feature_description.setText(
                props['description'] if 'description' in props else '')
            self._rendering.setHighlighted(feature['uuid'])
            if 'website' in props:
                self.feature_website_btn.show()
                try:
                    self.feature_website_btn.clicked.disconnect()
                except Exception:
                    pass
                self.feature_website_btn.clicked.connect(
                    lambda state, x=props['website']: webbrowser.open(x))
            if 'photos' in props:
                self.feature_photos_btn.show()
                try:
                    self.feature_photos_btn.clicked.disconnect()
                except Exception:
                    pass
                self.feature_photos_btn.clicked.connect(
                    lambda state, x=props['photos']: self.display_photos(x))
            if 'video' in props:
                self.feature_video_btn.show()
                try:
                    self.feature_video_btn.clicked.disconnect()
                except Exception:
                    pass
                self.feature_video_btn.clicked.connect(
                    lambda state, x=props['video']: self.display_video(x))
        else:
            self.feature_title.setText('')
            self.feature_description.setText('')
            self._rendering.setHighlighted(None)

    def display_photos(self, photos):
        photos = list(map(lambda x: self.path + x, photos))
        self.slideshow = SlideShow(photos)
        self.slideshow.show()

    def display_video(self, url):
        self.web.load(QUrl(url))
        self.web.show()

    def setup_render(self):
        self._fps.start()
        self.timer = QTimer()
        self.timer.timeout.connect(self.render)
        self.timer.start(1000 / VIDEO.FPS)

    def render(self):
        _, frameImg = self._cam.read()
        frameImg = cv.cvtColor(frameImg, cv.COLOR_BGR2RGB)
        H = self._track.update(frameImg)
        self._rendering.update(H, frameImg)
        if (H is not None):
            # self._rendering.drawBorder()
            self._rendering.renderGeoJSON()
            # self._rendering.renderObj()

        image = QImage(frameImg, frameImg.shape[1], frameImg.shape[0],
                       frameImg.strides[0], QImage.Format_RGB888)
        self.pixmap.setPixmap(QPixmap.fromImage(image))
        self.fps_label.setText("{:.2f} FPS".format(self._fps.update()))

    def closeEvent(self, event):
        self._cam.stop()
        self._fps.stop()
        print("\033[0;30;102m[INFO]\033[0m {:.2f} seconds".format(
            self._fps.elapsed()))
        print("\033[0;30;102m[INFO]\033[0m {:.2f} FPS".format(self._fps.fps()))
Пример #10
0
    def run(self):
        #Activate Detector module
        self.detector = VideoInferencePage()

        # Create a VideoCapture object and read from input file
        # If the input is the camera, pass 0 instead of the video file name
        cap = cv2.VideoCapture(self.video_address)
        fps = None
        new_detected=[]
        # Check if camera opened successfully
        if (cap.isOpened()== False): 
            print("Error opening video stream or file")
         
        # Read until video is completed
        while(cap.isOpened() or shouldrun):
            # Capture frame-by-frame
            ret, frame = cap.read()
            if ret == True:
                if not self.detector.isready():
                    continue
                if not fps:
                    fps = FPS().start()
                elif fps.elapsed()>60:
                    fps = FPS().start()
                #feed the detector and wait for true result
                self.detector.send_frame(frame)
                result=self.detector.get_result()
                
                #Uncomment this if want to bypass the detector
                #result=cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

                if not isinstance(result, np.ndarray):
                    continue

                # Display the resulting frame
                convertToQtFormat = QtGui.QImage(result.data, result.shape[1], result.shape[0], QtGui.QImage.Format_RGB888)
                p = convertToQtFormat.scaled(1260, 720, QtCore.Qt.KeepAspectRatio)
                self.emit(QtCore.SIGNAL('newImage(QImage)'), p)
                fps.update()
                self.emit(QtCore.SIGNAL('newFPS(int)'), int(fps.fps()))

                passobject = self.detector.get_passingobject()
                passobject = []
                if len(new_detected)<len(passobject):
                    for objectID in passobject.keys():
                        if not objectID in new_detected:
                            new_detected.append(objectID)
                            #image parsing to base64
                            image = self.image_resize(passobject[objectID]['image'], height=300)
                            retval, buffer = cv2.imencode('.png', image)
                            image_base64 = base64.b64encode(buffer)
                            self.newdetected.emit(image_base64)

         
                # Press Q on keyboard to  exit
                if not shouldrun:
                    fps.stop()
                    self.detector.exit_detection()
                    break
         
            # restart stream
            else: 
                print "ret is false"
                if fps:
                    fps.stop()
                time.sleep(3)
                cap.release()
                cap = cv2.VideoCapture(self.video_address)
                if (cap.isOpened()== True) and fps: 
                    fps.start()
         
        # When everything done, release the video capture object
        cap.release()
         
        # Closes all the frames
        cv2.destroyAllWindows()