Пример #1
0
def blocking_video_test():
    # grab a pointer to the video stream and initialize the FPS counter
    print("[INFO] sampling frames from webcam...")
    stream = cv2.VideoCapture(SRC)
    fps = FPS().start()

    # loop over some frames
    while fps._numFrames < 1000:
        # grab the frame from the stream and resize it to have a maximum
        # width of 400 pixels
        (grabbed, frame) = stream.read()
        frame = imutils.resize(frame, width=VID_WIDTH)

        # check to see if the frame should be displayed to our screen
        if DISPLAY:
            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1) & 0xFF

        # update the FPS counter
        fps.update()

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # do a bit of cleanup
    stream.release()
    cv2.destroyAllWindows()
Пример #2
0
def threaded_video_test():
    # created a *threaded* video stream, allow the camera sensor to warmup,
    # and start the FPS counter
    print("[INFO] sampling THREADED frames from webcam...")
    vs = WebcamVideoStream(src=SRC).start()
    fps = FPS().start()

    # loop over some frames...this time using the threaded stream
    while fps._numFrames < NUM_FRAMES:
        # grab the frame from the threaded video stream and resize it
        # to have a maximum width of 400 pixels
        frame = vs.read()
        frame = imutils.resize(frame, width=VID_WIDTH)

        # check to see if the frame should be displayed to our screen
        if DISPLAY:
            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1) & 0xFF

        # update the FPS counter
        fps.update()

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
Пример #3
0
def from_stream():

    fps = FPS().start()
    cam = WebcamVideoStream().start()

    max_frames = 50
    i = 0

    while True:

        frame = cam.read()

        if i > max_frames:

            fps.stop()
            print(fps.elapsed())
            print(fps.fps())
            break

        i += 1

        testcone(frame, stream=True)
        fps.update()
        cv2.imshow('', frame)
        cv2.waitKey(1)
Пример #4
0
class ALPRCamera:
    def __init__(self, camera, db_service, alpr_config, alpr_run_time, gui):
        self.stopped = False

        self.camera_name = camera.name
        self.cam = videoStream(src=camera.url)

        self.guiFPS = FPS()
        self.gui = gui
        self.detection_boxes = []
        for search_box in camera.aoi_list:
            for search_box_name in search_box:
                new_box = detectionBox(camera.name, search_box_name,
                                       search_box[search_box_name], self.cam,
                                       alpr_config, alpr_run_time, db_service)
                self.detection_boxes.append(new_box)

    def get_frame(self):
        frame = self.cam.read()
        self.guiFPS.update()

        for box in self.detection_boxes:
            frame = box.draw(frame)

        return frame

    def is_alive(self):
        return not self.stopped

    def start(self):
        Thread(target=self.run, args=()).start()
        return self

    def stop(self):
        self.stopped = True

    def run(self):
        self.cam.start()
        self.guiFPS.start_timer()
        for box in self.detection_boxes:
            box.start()

        # main loop
        while not self.stopped:
            continue

        for box in self.detection_boxes:
            box.stop()

        self.guiFPS.stop()
        if self.gui:
            print(self.camera_name + " gui", self.guiFPS.fps())

        self.cam.stop()

        cv2.destroyAllWindows()

        return
Пример #5
0
class VideoPlayer:
    def __init__(self, source=0, dest=None):
        self._source = source
        self._dest = dest
        self._frame = None
        self._playing = False
        self._fps = FPS()

    def start(self):
        self._cap = cv2.VideoCapture(self._source)
        self._cap.set(3, 640)
        self._cap.set(4, 640)
        if self._dest is not None:
            width = int(self._cap.get(cv2.CAP_PROP_FRAME_WIDTH) + 0.5)
            height = int(self._cap.get(cv2.CAP_PROP_FRAME_HEIGHT) + 0.5)
            fps = int(self._cap.get(cv2.CAP_PROP_FPS))
            fourcc = cv2.VideoWriter_fourcc(*'mp4v')
            self._out = cv2.VideoWriter(self._dest, fourcc, fps,
                                        (width, height))
        self._playing = True
        self._fps.start()
        while self._playing:
            self.read_frame()
            self.process_frame()
            self.write_frame()
        self._fps.stop()
        print(self._fps.fps())

    def stop(self):
        self._playing = False
        self._cap.release()
        self._out.release()
        cv2.destroyAllWindows()

    def read_frame(self):
        ret, frame = self._cap.read()
        self._frame = frame
        self._fps.update()

    def process_frame(self):
        pass

    def write_frame(self):
        self.show_frame()
        if self._dest is not None:
            self.save_frame()

    def show_frame(self):
        cv2.imshow('Video', self._frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            self.stop()

    def save_frame(self):
        self._out.write(self._frame)
Пример #6
0
    def test01():
        """can we instantiate? """
        fps = FPS().start()
        pacer = Pacer(DESIRED_FPS).start()

        while fps.n_frames < N_TEST_FRAMES:
            print(datetime.datetime.now())
            fps.update()
            pacer.update()

        fps.stop()
        print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
        print("[INFO] n_frames: %i" % fps.n_frames)
class PyForce:
    def __init__(self):
        pygame.init()
        self.screen = pygame.display.set_mode(RESOLUTION)
        pygame.display.set_caption(CAPTION)
        self.clock = pygame.time.Clock()
        self.sfx = Sfx()
        self.rand = Random()
        self.battlefield = BattleField(self.screen, self.sfx, self.rand)
        if FPS_ENABLED:
            self.fps = FPS(self.screen, self.clock)
        self.status = 'game'
        if DEBUG:
            print 'init : main'
            import sys
            print sys.version[:6]

    def run(self):
        while self.status != 'quit':
            # check quit
            for event in pygame.event.get():
                if event.type == QUIT:
                    self.status = 'quit'
                elif event.type == KEYDOWN:
                    if event.key == K_ESCAPE:
                        self.status = 'quit'

            # select status
            if self.status == 'intro':
                self.intro()
            elif self.status == 'game':
                self.game()
            elif self.status == 'win':
                img = pygame.image.load('res/img/end.png').convert()
                self.screen.blit(img, (0, 0))

            pygame.display.update()
            self.clock.tick(FPS_LIMIT)
        else:
            if DEBUG: print 'user prompt quit'
            

    def game(self):
        self.battlefield.update()
        if self.battlefield.status == 'win':
            self.status = 'win'
        if FPS_ENABLED:
            self.fps.update()
Пример #8
0
class WebcamVideoStream:
    def __init__(self, src=0, resolution=(320, 240), framerate=32):
        # initialize the video camera stream and read the first frame
        # from the stream
        self.stream = cv2.VideoCapture(src)
        self.fps = FPS()
        if self.stream and self.stream.isOpened():
            self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, resolution[0])
            self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, resolution[1])
            self.stream.set(cv2.CAP_PROP_FPS, framerate)
        (self.grabbed, self.frame) = self.stream.read()

        # initialize the variable used to indicate if the thread should
        # be stopped
        self.stopped = False

    def start(self):
        # start the thread to read frames from the video stream
        t = Thread(target=self.update, args=())
        t.daemon = True
        t.start()
        return self

    def update(self):
        # keep looping infinitely until the thread is stopped
        self.fps.start()
        while True:
            # if the thread indicator variable is set, stop the thread
            if self.stopped:
                return

            # otherwise, read the next frame from the stream
            (self.grabbed, self.frame) = self.stream.read()
            self.fps.update()

    def read(self):
        # return the frame most recently read
        return self.frame

    def stop(self):
        # indicate that the thread should be stopped
        self.fps.stop()
        self.stopped = True
Пример #9
0
class CV2VideoStreamer(VideoStreamer):
    def __init__(self, resource=None):
        super(CV2VideoStreamer, self).__init__()
        if resource is None:
            print "You must give an argument to open a video stream."
            print "  It can be a number as video device,  e.g.: 0 would be /dev/video0"
            print "  It can be a url of a stream,         e.g.: rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov"
            print "  It can be a video file,              e.g.: samples/moon.avi"
            print "  It can be a class generating images, e.g.: TimeStampVideo"
            exit(0)

        # If given a number interpret it as a video device
        if isinstance(resource, int) or len(resource) < 3:
            self.resource_name = "/dev/video" + str(resource)
            resource = int(resource)
            self.vidfile = False
        else:
            self.resource_name = str(resource)
            self.vidfile = True
        print "Trying to open resource: " + self.resource_name

        if isinstance(resource, VideoSource):
            self.cap = resource
        else:
            self.cap = cv2.VideoCapture(resource)

        if not self.cap.isOpened():
            print "Error opening resource: " + str(resource)
            exit(0)
        self.fps = FPS().start()

    def __del__(self):
        self.cap.release()

    def get_frame(self):
        success, image = self.cap.read()
        # TODO (fabawi): resizing the image takes some time. make it multi-threaded
        # image = imutils.resize(image, width=VID_WIDTH)

        ret, jpeg = cv2.imencode('.jpg', image)
        jpeg_bytes = jpeg.tobytes()
        self.fps.update()
        return jpeg_bytes
Пример #10
0
class World:
    
    def __init__(self, fps):
        
        self.display = pygame.display
        self.options = Options()
        self._create_screen()
        pygame.display.set_caption("Shaolin Fury - beta version")
        pygame.mouse.set_visible(False)
        self.quit = False
        self.fps = FPS(fps)
        self.font = Font()
        self.change_state(Game(self, DATADIR, CONFIGDIR))

    def _create_screen(self):
        """Genera la ventana principal del videojuego"""

        if self.options.fullscreen:
            flags = pygame.FULLSCREEN
        else:
            flags = 0

        if self.options.reduce_mode:
            size = (320, 240)
        else:
            size = (640, 480)

        self.screen = pygame.display.set_mode(size, flags)

    def change_state(self, new_state):
        self.state = new_state

    def loop(self):
        
        while not self.quit:
            n = self.fps.update()

            for t in xrange(n):
                e = pygame.event.poll()

                if e:
                    if e.type == pygame.QUIT:
                        self.quit = True
                    else:
                        if e.type == pygame.KEYDOWN:
                            
                            if e.key == pygame.K_F3:
                                pygame.display.toggle_fullscreen()
                            elif e.key == pygame.K_q:
                                self.quit = True

                self.state.update()

            if n > 0:
                self.state.draw()
Пример #11
0
def main(yolo):

    # Definition of the parameters
    max_cosine_distance = 0.1  # changed
    nn_budget = None
    nms_max_overlap = 1.0

    # deep_sort
    model_filename = 'model_data/mars.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    ############
    writeVideo_flag = True

    video_capture = cv2.VideoCapture('DJI_0034.mp4')
    ##################################### added to accelerate
    fps_ = FPS().start()
    #####################################

    if writeVideo_flag:
        # Define the codec and create VideoWriter object
        w = 720  #int(video_capture.get(3))
        h = 480  #int(video_capture.get(4))
        fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        out = cv2.VideoWriter('output.avi', fourcc, 15, (w, h))
        list_file = open('detection.txt', 'w')
        frame_index = -1

    fps = 0.0
    while True:
        ret, frame = video_capture.read()  # frame shape 640*480*3
        frame = cv2.resize(frame, (720, 480))

        cv2.namedWindow("", 0)
        cv2.resizeWindow("", 1980, 1080)

        if ret != True:
            break
        t1 = time.time()

        # image = Image.fromarray(frame)
        image = Image.fromarray(frame[..., ::-1])  #bgr to rgb
        boxs = yolo.detect_image(image)  #image)
        features = encoder(frame, boxs)

        # score to 1.0 here).
        detections = [
            Detection(bbox, 1.0, feature)
            for bbox, feature in zip(boxs, features)
        ]

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap,
                                                    scores)
        detections = [detections[i] for i in indices]

        # Call the tracker
        # DeepSort
        tracker.predict()
        tracker.update(detections)

        # DeepSort white rectangle
        for track in tracker.tracks:
            if not track.is_confirmed() or track.time_since_update > 1:
                continue
            bbox = track.to_tlbr()
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                          (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
            cv2.putText(frame, str(track.track_id),
                        (int(bbox[0]), int(bbox[1])), 0, 5e-3 * 200,
                        (0, 255, 0), 2)

# YOLO blue rectangle
        for det in detections:
            bbox = det.to_tlbr()
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                          (int(bbox[2]), int(bbox[3])), (255, 0, 0), 2)


#############Openpose
        if len(boxs) != 0:  # check for yolo
            image_copy_crop = image.copy()
            cropped_img = image_copy_crop.crop(
                (int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])))
            cropped_image = np.array(cropped_img)
        else:
            cropped_image = frame
        datum = op.Datum()
        datum.cvInputData = cropped_image
        opWrapper.emplaceAndPop([datum])

        if len(boxs) != 0:  # check for yolo
            image_copy_paste = Image.fromarray(frame).copy()
            image_copy_paste.paste(
                Image.fromarray(datum.cvOutputData),
                (int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])))
            datum.cvOutputData = np.array(image_copy_paste)
            cv2.imshow('', datum.cvOutputData)
            #print("Body keypoints: \n" + str(datum.poseKeypoints))
        else:
            cv2.imshow('', frame)
        fps_.update()
        ##############

        if writeVideo_flag:
            # save a frame
            if len(boxs) != 0:  # check for yolo
                out.write(datum.cvOutputData)
            else:
                out.write(frame)
            frame_index = frame_index + 1
            list_file.write(str(frame_index) + ' ')
            if len(boxs) != 0:
                for i in range(0, len(boxs)):
                    list_file.write(
                        str(boxs[i][0]) + ' ' + str(boxs[i][1]) + ' ' +
                        str(boxs[i][2]) + ' ' + str(boxs[i][3]) + ' ')
            list_file.write('\n')

        fps = (fps + (1. / (time.time() - t1))) / 2
        print("fps= %f" % (fps))

        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    video_capture.release()
    if writeVideo_flag:
        out.release()
        list_file.close()
    fps_.stop()
    cv2.destroyAllWindows()
Пример #12
0
window_title = 'Video Pong'
countdown = old_frame_count = 0
img = new_img = diff = video = None
fps.start()
while True:
    old_img = new_img  # Use raw feed without overlay.
    ok, new_img = cap.read()
    if new_img is not None:
        img = new_img.copy()  # No motion blur.
        #fg1 = bg1.apply(img)
        #cv2.imshow('BG', fg1)
        #cv2.BackgroundSubtractor.apply(img)
    else:
        img = new_img = np.zeros((480, 640, 3), np.uint8)

    fps.update()

    if old_img is not None and img is not None:
        diff = cv2.absdiff(old_img, img) > tolerance

    # Handle ball.
    x += dx
    y += dy
    if not (r < x < 640 - r) or diff is not None and diff[y][x].any():
        dx *= -1
        ball_color = random_color()
    if not (r < y < 480 - r) or diff is not None and diff[y][x].any():
        dy *= -1
        ball_color = random_color()
    cv2.circle(img, (x, y), r, ball_color, 2)
Пример #13
0
    def run(self):
        #Activate Detector module
        self.detector = VideoInferencePage()

        # Create a VideoCapture object and read from input file
        # If the input is the camera, pass 0 instead of the video file name
        cap = cv2.VideoCapture(self.video_address)
        fps = None
        new_detected=[]
        # Check if camera opened successfully
        if (cap.isOpened()== False): 
            print("Error opening video stream or file")
         
        # Read until video is completed
        while(cap.isOpened() or shouldrun):
            # Capture frame-by-frame
            ret, frame = cap.read()
            if ret == True:
                if not self.detector.isready():
                    continue
                if not fps:
                    fps = FPS().start()
                elif fps.elapsed()>60:
                    fps = FPS().start()
                #feed the detector and wait for true result
                self.detector.send_frame(frame)
                result=self.detector.get_result()
                
                #Uncomment this if want to bypass the detector
                #result=cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

                if not isinstance(result, np.ndarray):
                    continue

                # Display the resulting frame
                convertToQtFormat = QtGui.QImage(result.data, result.shape[1], result.shape[0], QtGui.QImage.Format_RGB888)
                p = convertToQtFormat.scaled(1260, 720, QtCore.Qt.KeepAspectRatio)
                self.emit(QtCore.SIGNAL('newImage(QImage)'), p)
                fps.update()
                self.emit(QtCore.SIGNAL('newFPS(int)'), int(fps.fps()))

                passobject = self.detector.get_passingobject()
                passobject = []
                if len(new_detected)<len(passobject):
                    for objectID in passobject.keys():
                        if not objectID in new_detected:
                            new_detected.append(objectID)
                            #image parsing to base64
                            image = self.image_resize(passobject[objectID]['image'], height=300)
                            retval, buffer = cv2.imencode('.png', image)
                            image_base64 = base64.b64encode(buffer)
                            self.newdetected.emit(image_base64)

         
                # Press Q on keyboard to  exit
                if not shouldrun:
                    fps.stop()
                    self.detector.exit_detection()
                    break
         
            # restart stream
            else: 
                print "ret is false"
                if fps:
                    fps.stop()
                time.sleep(3)
                cap.release()
                cap = cv2.VideoCapture(self.video_address)
                if (cap.isOpened()== True) and fps: 
                    fps.start()
         
        # When everything done, release the video capture object
        cap.release()
         
        # Closes all the frames
        cv2.destroyAllWindows()
Пример #14
0
class Bike():
    def __init__(self, log=False):
        if str(log) == "True":
            dir_path = os.getcwd() + "/"
            file_name = time.strftime("%Y.%m.%d.%H.%M.%S") + ".log"
            log = dir_path + file_name
        self.log = log

        self.mtr = BigBoy()
        self.imu = IMU()
        self.fps = FPS(1.0)

        self.t0 = time.time()
        self.steps = 5
        self.max_steps = 60
        self.pos = 0
        self.threshold = 1.5
        self.tilt_constant = -25.0

        # record parameters at the top of the log

    def balance(self):
        x, y, self.tilt = self.imu.get_acceleration()
        self.goal = int(self.tilt_constant * self.tilt)
        self.response = 'Goal: ' + str(self.goal)
        self.try_step(self.goal - self.pos)
        time.sleep(0.005)

    def balance_initial(self):
        t = time.time() - self.t0
        x, y, self.tilt = self.imu.get_acceleration()

        if tilt < -self.threshold:
            self.response = 'leaning left'
            self.try_step(self.steps)
        elif tilt > self.threshold:
            self.response = 'leaning right'
            self.try_step(-self.steps)
        elif self.pos > 0:
            self.response = 'recentering right'
            self.try_step(-self.steps)
        elif self.pos < 0:
            self.response = 'recentering left'
            self.try_step(self.steps)

        if self.log:
            data = [
                t,
                x,
                y,
                self.pos,
                self.tilt,
                self.response,
            ]
            with open(self.log, 'a') as f:
                f.write(",".join(data) + "\n")

    def try_step(self, steps):
        if abs(self.pos + steps) <= self.max_steps:
            self.pos += steps
            self.mtr.step(steps)

    def updateFPS(self):
        self.fps.update(verbose=True)

    def cleanup(self):
        self.mtr.cleanup()
Пример #15
0
def recognize_video(detector,
                    embedder: Embedder,
                    recognizer: Recognizer,
                    detector_params='default',
                    source=0):

    # инициализация видеопотока
    print('Starting video stream...')
    vs = VideoStream(src=source).start()

    if not is_detector(detector):
        raise TypeError('Incorrect type of detector')

    # разогрев камеры
    time.sleep(0.5)

    # запуск оценщика пропускной способности FPS
    fps = FPS().start()

    # цикл по фреймам из видео
    while True:

        frame = vs.read()

        if detector_params == 'default':
            faces_roi, boxes = detector.calc_image(frame, return_mode='both')

        elif type(detector) == DetectorSSD:
            confidence = detector_params[0]
            faces_roi, boxes = detector.calc_image(frame,
                                                   confidence=confidence,
                                                   return_mode='both')

        elif type(detector) == DetectorVJ or type(detector) == DetectorLBP:
            [scale_factor, min_neighbors] = detector_params
            faces_roi, boxes = detector.calc_image(frame,
                                                   scale_factor=scale_factor,
                                                   min_neighbors=min_neighbors,
                                                   return_mode='both')

        elif type(detector) == DetectorHOG or type(detector) == DetectorMMOD:
            upsampling_times = detector_params[0]
            faces_roi, boxes = detector.calc_image(
                frame, upsampling_times=upsampling_times, return_mode='both')

        for i in range(len(faces_roi)):

            embeddings = embedder.calc_face(faces_roi[i])
            name = recognizer.recognize(embeddings)
            start_x, start_y, end_x, end_y = boxes[i]

            text = '{}'.format(name)
            y = start_y - 10 if start_y - 10 > 10 else start_y + 10
            cv2.rectangle(frame, (start_x, start_y), (end_x, end_y),
                          (0, 0, 255), 2)
            cv2.putText(frame, text, (start_x, y), cv2.FONT_HERSHEY_SIMPLEX,
                        0.45, (0, 0, 255), 2)

        # обновление счетчика FPS
        fps.update()

        # показ выходного фрейма
        cv2.imshow('Frame', frame)
        key = cv2.waitKey(1) & 0xFF

        # завершение при нажатии 'q'
        if key == ord("q"):
            break

    fps.stop()
    print('Elasped time: {:.2f}'.format(fps.elapsed()))
    print('Approx. FPS: {:.2f}'.format(fps.fps()))

    cv2.destroyAllWindows()
    vs.stop()
Пример #16
0
class Interface(QWidget):
    def __init__(self, path, config):
        QWidget.__init__(self)

        self.path = path
        self.config = config

        self.setWindowTitle('AR4maps')
        self.move(0, 0)
        self.video_size = QSize(VIDEO.WIDTH, VIDEO.HEIGHT)
        self.setup_ui()

        self.markerImg = cv.imread(self.path + self.config['target'])
        # cv.imshow("target",targetImg)
        self._cam = Camera().start()
        self._track = Tracking(self.markerImg)
        self._rendering = Rendering(self.markerImg, self.config['coords'])
        self._fps = FPS()

        self.setup_render()

    def setup_ui(self):
        self.main_layout = QHBoxLayout()
        self.main_layout.setContentsMargins(0, 0, 0, 0)

        ### CENTER LAYOUT
        self.center_layout = QVBoxLayout()
        self.main_layout.addLayout(self.center_layout)

        # AR
        self.pixmap = QLabel()
        self.pixmap.setFixedSize(self.video_size)
        self.pixmap.mousePressEvent = self.click_pixmap
        self.center_layout.addWidget(self.pixmap)

        ## SOUTH LAYOUT
        self.south_layout = QVBoxLayout()
        self.south_layout.setContentsMargins(20, 10, 20, 20)
        self.center_layout.addLayout(self.south_layout)
        # Feature Description
        #   Title
        self.feature_title = QLabel('<br/>')
        self.feature_title.setFont(QFont('Helvetica', 18))
        self.south_layout.addWidget(self.feature_title)
        #   Description
        self.feature_description = QLabel('<br/><br/><br/><br/><br/>')
        self.feature_description.setWordWrap(True)
        self.south_layout.addWidget(self.feature_description)
        self.south_layout.addStretch()
        #   Buttons
        self.south_btns_layout = QHBoxLayout()
        self.south_layout.addLayout(self.south_btns_layout)
        self.feature_website_btn = QPushButton('Website')
        self.feature_website_btn.hide()
        self.south_btns_layout.addWidget(self.feature_website_btn)
        self.feature_photos_btn = QPushButton('Photos')
        self.feature_photos_btn.hide()
        self.south_btns_layout.addWidget(self.feature_photos_btn)
        self.feature_video_btn = QPushButton('Video')
        self.feature_video_btn.hide()
        self.south_btns_layout.addWidget(self.feature_video_btn)
        self.south_btns_layout.addStretch()

        ### EAST LAYOUT
        self.east_layout = QVBoxLayout()
        self.east_layout.setContentsMargins(0, 10, 20, 20)
        self.main_layout.addLayout(self.east_layout)
        # Logo
        self.logo = QSvgWidget(self.path + self.config['logo'])
        self.logo.setMinimumSize(252, 129)
        self.logo.setMaximumSize(252, 129)
        self.east_layout.addWidget(self.logo)
        # Buttons
        for layer in self.config['layers']:
            btn = QPushButton(layer['name'])
            btn.clicked.connect(lambda state, x=layer: self.load_layer(x))
            self.east_layout.addWidget(btn)
        # Layer Description
        sep = QFrame()
        sep.setFrameShape(QFrame.HLine)
        self.east_layout.addWidget(sep)
        self.layer_title = QLabel('Select a layer...')
        self.layer_title.setFont(QFont('Helvetica', 18))
        self.east_layout.addWidget(self.layer_title)
        self.layer_description = QLabel('')
        self.layer_description.setWordWrap(True)
        self.east_layout.addWidget(self.layer_description)
        # FPS
        self.east_layout.addStretch()
        self.fps_label = QLabel()
        self.fps_label.setAlignment(Qt.AlignRight)
        self.east_layout.addWidget(self.fps_label)

        self.setLayout(self.main_layout)

        self.web = QWebEngineView()
        self.web.resize(VIDEO.WIDTH, VIDEO.HEIGHT)
        self.web.move(0, 0)
        self.web.hide()

    def load_layer(self, layer):
        self.layer_title.setText(layer['name'])
        self.layer_description.setText(layer['description'])
        self.feature_title.setText('Select an item on the screen...')
        self.feature_description.setText('')
        self._rendering.setHighlighted(None)
        self.feature_website_btn.hide()
        self.feature_photos_btn.hide()
        self.feature_video_btn.hide()
        with open(self.path + layer['file']) as json_file:
            data = json.load(json_file)
            self._rendering.setGeoJSON(data['features'])

    def click_pixmap(self, event):
        pos = (event.x(), event.y())
        feature = self._rendering.getClickedFeature(pos)
        self.feature_website_btn.hide()
        self.feature_photos_btn.hide()
        self.feature_video_btn.hide()
        if feature is not None:
            props = feature['properties']
            self.feature_title.setText(props['title'] if 'title' in
                                       props else 'NO TITLE')
            self.feature_description.setText(
                props['description'] if 'description' in props else '')
            self._rendering.setHighlighted(feature['uuid'])
            if 'website' in props:
                self.feature_website_btn.show()
                try:
                    self.feature_website_btn.clicked.disconnect()
                except Exception:
                    pass
                self.feature_website_btn.clicked.connect(
                    lambda state, x=props['website']: webbrowser.open(x))
            if 'photos' in props:
                self.feature_photos_btn.show()
                try:
                    self.feature_photos_btn.clicked.disconnect()
                except Exception:
                    pass
                self.feature_photos_btn.clicked.connect(
                    lambda state, x=props['photos']: self.display_photos(x))
            if 'video' in props:
                self.feature_video_btn.show()
                try:
                    self.feature_video_btn.clicked.disconnect()
                except Exception:
                    pass
                self.feature_video_btn.clicked.connect(
                    lambda state, x=props['video']: self.display_video(x))
        else:
            self.feature_title.setText('')
            self.feature_description.setText('')
            self._rendering.setHighlighted(None)

    def display_photos(self, photos):
        photos = list(map(lambda x: self.path + x, photos))
        self.slideshow = SlideShow(photos)
        self.slideshow.show()

    def display_video(self, url):
        self.web.load(QUrl(url))
        self.web.show()

    def setup_render(self):
        self._fps.start()
        self.timer = QTimer()
        self.timer.timeout.connect(self.render)
        self.timer.start(1000 / VIDEO.FPS)

    def render(self):
        _, frameImg = self._cam.read()
        frameImg = cv.cvtColor(frameImg, cv.COLOR_BGR2RGB)
        H = self._track.update(frameImg)
        self._rendering.update(H, frameImg)
        if (H is not None):
            # self._rendering.drawBorder()
            self._rendering.renderGeoJSON()
            # self._rendering.renderObj()

        image = QImage(frameImg, frameImg.shape[1], frameImg.shape[0],
                       frameImg.strides[0], QImage.Format_RGB888)
        self.pixmap.setPixmap(QPixmap.fromImage(image))
        self.fps_label.setText("{:.2f} FPS".format(self._fps.update()))

    def closeEvent(self, event):
        self._cam.stop()
        self._fps.stop()
        print("\033[0;30;102m[INFO]\033[0m {:.2f} seconds".format(
            self._fps.elapsed()))
        print("\033[0;30;102m[INFO]\033[0m {:.2f} FPS".format(self._fps.fps()))
Пример #17
0
    def run(self):
        #Activate Detector module
        self.detector = VideoInferencePage()

        # Create a VideoCapture object and read from input file
        # If the input is the camera, pass 0 instead of the video file name
        cap = cv2.VideoCapture(self.video_address)
        fps = None
        new_detected=[]
        # Check if camera opened successfully
        if (cap.isOpened()== False): 
            print("Error opening video stream or file")
         
        # Read until video is completed
        while(cap.isOpened() or shouldrun):
            # Capture frame-by-frame
            ret, frame = cap.read()
            global baseimageupd
            if ret == True:
                if not self.detector.isready():
                    continue
                if not fps:
                    fps = FPS().start()
                elif fps.elapsed()>60:
                    fps = FPS().start()


                if state=="take_off" and float(var_altitude) >= 10 and baseimageupd==False:
                    #print ("hahah2")
                    object_image = frame
                    baseimageupd = True
                    #cv2.imwrite("/media/ibrahim/Data/faster-rcnn/tools/img/baseimage.jpg",object_image)
                    image = self.image_resize(object_image, height=300)
                    retval, buffer = cv2.imencode('.png', image)
                    #print ("hahah22")
                    image_base64 = base64.b64encode(buffer)
                    self.newdetected.emit(image_base64)
                    #print ("hahah23")

                #feed the detector and wait for true result
                self.detector.send_frame(frame)
                result=self.detector.get_result()
                
                #Uncomment this if want to bypass the detector
                #result=cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

                if not isinstance(result, np.ndarray):
                    continue

                # Display the resulting frame
                convertToQtFormat = QtGui.QImage(result.data, result.shape[1], result.shape[0], QtGui.QImage.Format_RGB888)
                p = convertToQtFormat.scaled(1260, 720, QtCore.Qt.KeepAspectRatio)
                self.newimage.emit(p)
                
                #self.emit(QtCore.SIGNAL('newFPS(int)'), int(fps.fps()))

                passobject = self.detector.get_passingobject()
                #passobject = []
                if len(new_detected)<len(passobject):
                    for objectID in passobject.keys():
                        if not objectID in new_detected:
                            new_detected.append(objectID)
                            #image parsing to base64
                            #print (passobject[objectID]['image'])

                            try:
                                image = self.image_resize(passobject[objectID]['image'], height=300)
                                label = (passobject[objectID]['label'])
                                retval, buffer = cv2.imencode('.png', image)
                                image_base64 = base64.b64encode(buffer)
                                self.newinv.emit(image_base64, label)
                            except Exception as e:
                                print ("\n*************\nMissing Image\n***************\n")
                                continue

                            '''    
                            if passobject[objectID]['image'] != []:
                                image = self.image_resize(passobject[objectID]['image'], height=300)
                                label = (passobject[objectID]['label'])
                                retval, buffer = cv2.imencode('.png', image)
                                image_base64 = base64.b64encode(buffer)
                                self.newinv.emit(image_base64, label)
                            else:
                                print ("\n*************\nMissing Image\n***************\n")
                                continue
                            '''

                fps.update()
                self.new_fps.emit(int(fps.fps()))
                if self.detector.isobjectsupdated:
                    objects = self.detector.get_objects()

                    
                # Press Q on keyboard to  exit
                if not shouldrun:
                    fps.stop()
                    self.detector.exit_detection()
                    break
         
            # restart stream
            else: 
                print ("ret is false")
                if fps:
                    fps.stop()
                time.sleep(3)
                cap.release()
                cap = cv2.VideoCapture(self.video_address)
                if (cap.isOpened()== True) and fps: 
                    fps.start()
         
        # When everything done, release the video capture object
        cap.release()
         
        # Closes all the frames
        cv2.destroyAllWindows()
Пример #18
0
class PiVideoStream(QThread):
    """
    Thread that produces frames for further processing as a PyQtSignal.
    Picamera is set-up according to sensormode and splitter_port 0 is used for capturing image data.
    A video stream is set-up, using picamera splitter port 1 and resized to frameSize.
    Splitter_port 2 is used for capturing video at videoFrameSize.
    """
    image = None
    finished = pyqtSignal()
    postMessage = pyqtSignal(str)
    frame = pyqtSignal(np.ndarray)
    progress = pyqtSignal(int)
    captured = pyqtSignal()

    camera = PiCamera()
    videoStream = BytesIO()

    storagePath = None
    cropRect = [0] * 4

    ## @param ins is the number of instances created. This may not exceed 1.
    ins = 0

    def __init__(self):
        super().__init__()

        ## Instance limiter. Checks if an instance exists already. If so, it deletes the current instance.
        if PiVideoStream.ins >= 1:
            del self
            self.postMessage.emit(
                "{}: error; multiple instances of created, while only 1 instance is allowed"
                .format(__class__.__name__))
            return
        try:
            PiVideoStream.ins += 1
        except Exception as err:
            self.postMessage.emit("{}: error; type: {}, args: {}".format(
                self.__class__.__name__, type(err), err.args))
        else:
            warnings.filterwarnings('default', category=DeprecationWarning)
            self.settings = QSettings("settings.ini", QSettings.IniFormat)
            self.loadSettings()
##            self.initStream()

    def loadSettings(self):
        self.postMessage.emit(
            "{}: info; loading camera settings from {}".format(
                self.__class__.__name__, self.settings.fileName()))

        # load
        self.monochrome = self.settings.value('camera/monochrome',
                                              False,
                                              type=bool)
        self.sensorMode = int(self.settings.value('camera/sensor_mode'))
        self.frameRate = int(self.settings.value('camera/frame_rate'))

        # set frame sizes
        self.frameSize = frame_size_from_string(
            self.settings.value('frame_size'))
        self.captureFrameSize = frame_size_from_sensor_mode(self.sensorMode)
        self.videoFrameSize = frame_size_from_string(
            self.settings.value('camera/video_frame_size'))

        if not self.monochrome:
            self.frameSize = self.frameSize + (3, )

    @pyqtSlot()
    def initStream(self):
        if self.isRunning():
            self.requestInterruption()
            wait_signal(self.finished, 10000)
        # Set camera parameters
        self.camera.exposure_mode = 'backlight'  # 'auto'
        self.camera.awb_mode = 'flash'  # 'auto'
        self.camera.meter_mode = 'backlit'  # 'average'
        self.camera.sensor_mode = self.sensorMode
        self.camera.resolution = self.captureFrameSize
        self.camera.framerate = self.frameRate
        self.camera.image_effect = self.settings.value('camera/effect')
        self.camera.iso = int(self.settings.value(
            'camera/iso'))  # should force unity analog gain
        self.camera.video_denoise = self.settings.value('camera/video_denoise',
                                                        False,
                                                        type=bool)

        # Wait for the automatic gain control to settle
        wait_ms(3000)

        #         # Now fix the values
        #         self.camera.shutter_speed = self.camera.exposure_speed
        #         self.camera.exposure_mode = 'off'
        #         g = self.camera.awb_gains
        #         self.camera.awb_mode = 'off'
        #         self.camera.awb_gains = g

        ##            # Setup video port, GPU resizes frames, and compresses to mjpeg stream
        ##            self.camera.start_recording(self.videoStream, format='mjpeg', splitter_port=1, resize=self.frameSize)

        # Setup capture from video port 1
        if self.monochrome:
            self.rawCapture = PiYArray(self.camera, size=self.frameSize)
            self.captureStream = self.camera.capture_continuous(
                self.rawCapture,
                'yuv',
                use_video_port=True,
                splitter_port=1,
                resize=self.frameSize)
        else:
            self.rawCapture = PiRGBArray(self.camera, size=self.frameSize)
            self.captureStream = self.camera.capture_continuous(
                self.rawCapture,
                'bgr',
                use_video_port=True,
                splitter_port=1,
                resize=self.frameSize)
        # init crop rectangle
        if self.cropRect[2] == 0:
            self.cropRect[2] = self.camera.resolution[1]
        if self.cropRect[3] == 0:
            self.cropRect[3] = self.camera.resolution[0]

        # start the thread
        self.start(QThread.HighPriority)
        msg = "{}: info; video stream initialized with frame size = {} and {:d} channels".format(\
            __class__.__name__, str(self.camera.resolution), 1 if self.monochrome else 3)
        self.postMessage.emit(msg)

    @pyqtSlot()
    def run(self):
        try:
            self.fps = FPS().start()
            for f in self.captureStream:
                if self.isInterruptionRequested():
                    break
                self.rawCapture.seek(0)
                img = f.array  # grab the frame from the stream
                self.frame.emit(img)  #cv2.resize(img, self.frameSize[:2]))
                self.fps.update()

##                # Grab jpeg from an mpeg video stream
##                self.videoStream.seek(0)
##                buf = self.videoStream.read()
##                if buf.startswith(b'\xff\xd8'):
##                    # jpeg magic number is detected
##                    flag = cv2.IMREAD_GRAYSCALE if self.monochrome else cv2.IMREAD_COLOR
##                    img = cv2.imdecode(np.frombuffer(buf, dtype=np.uint8), flag)
##                    self.frame.emit(img)
##                    self.fps.update()
##                    self.videoStream.truncate(0)

        except Exception as err:
            self.postMessage.emit("{}: error; type: {}, args: {}".format(
                self.__class__.__name__, type(err), err.args))
        finally:
            self.fps.stop()
            img = np.zeros(shape=(self.frameSize[1], self.frameSize[0]),
                           dtype=np.uint8)
            cv2.putText(
                img, 'Camera suspended',
                (int(self.frameSize[0] / 2) - 150, int(self.frameSize[1] / 2)),
                cv2.FONT_HERSHEY_SIMPLEX, 1, (255), 1)
            for i in range(5):
                wait_ms(100)
                self.frame.emit(img)
            msg = "{}: info; finished, approx. processing speed: {:.2f} fps".format(
                self.__class__.__name__, self.fps.fps())
            self.postMessage.emit(msg)
            self.finished.emit()

    @pyqtSlot()
    def stop(self):
        self.postMessage.emit("{}: info; stopping".format(__class__.__name__))
        try:
            if self.isRunning():
                self.requestInterruption()
                wait_signal(self.finished, 10000)
        except Exception as err:
            msg = "{}: error; stopping method".format(self.__class__.__name__)
            print(msg)
        finally:
            self.quit(
            )  # Note that thread quit is required, otherwise strange things happen.

    @pyqtSlot(str)
    def takeImage(self, filename_prefix=None):
        if filename_prefix is not None:
            (head, tail) = os.path.split(filename_prefix)
            if not os.path.exists(head):
                os.makedirs(head)
            filename = os.path.sep.join([
                head,
                '{:016d}_'.format(round(time.time() * 1000)) + tail + '.png'
            ])
        else:
            filename = '{:016d}'.format(round(time.time() * 1000)) + '.png'
            # open path
            if self.storagePath is not None:
                filename = os.path.sep.join([self.storagePath, filename])
        try:
            self.camera.capture(filename,
                                use_video_port=False,
                                splitter_port=0,
                                format='png')
        except Exception as err:
            self.postMessage.emit("{}: error; type: {}, args: {}".format(
                self.__class__.__name__, type(err), err.args))

        self.captured.emit()
        self.postMessage.emit("{}: info; image written to {}".format(
            __class__.__name__, filename))

    @pyqtSlot(str, int)
    def recordClip(self, filename_prefix=None, duration=10):
        """
        Captures a videoclip of duration at resolution videoFrameSize.
        The GPU resizes the captured video to the intended resolution.
        Note that while it seems possble to change the sensormode, reverting to the original mode fails when capturing an image.
        In many cases, the intended framerate is not achieved. For that reason, ffprobe counts
        the total number of frames that were actually taken.        
        Next, the h264 video file is boxed using MP4box.
        For some reason, changing the framerate with MP4Box did not work out.        
        """
        if filename_prefix is not None:
            (head, tail) = os.path.split(filename_prefix)
            if not os.path.exists(head):
                os.makedirs(head)
            filename = os.path.sep.join([
                head, '{:016d}_{}s'.format(round(time.time() * 1000),
                                           round(duration)) + tail
            ])
        else:
            filename = '{:016d}_{}s'.format(round(time.time() * 1000),
                                            round(duration))
            # open path
            if self.storagePath is not None:
                filename = os.path.sep.join([self.storagePath, filename])

        # stop current video stream, maybe mpeg and h264 compression cannot run simultaneously?
        self.postMessage.emit("{}: info; starting recording for {} s".format(
            __class__.__name__, duration))

        try:
            # GPU resizes frames, and compresses to h264 stream
            self.camera.start_recording(filename + '.h264',
                                        format='h264',
                                        splitter_port=2,
                                        resize=self.videoFrameSize,
                                        sps_timing=True)
            wait_ms(duration * 1000)
            self.camera.stop_recording(splitter_port=2)

            # Wrap an MP4 box around the video
            nr_of_frames = check_output([
                "ffprobe", "-v", "error", "-count_frames", "-select_streams",
                "v:0", "-show_entries", "stream=nb_read_frames", "-of",
                "default=nokey=1:noprint_wrappers=1", filename + '.h264'
            ])
            real_fps = duration / float(nr_of_frames)
            self.postMessage.emit(
                "{}: info; video clip captured with real framerate: {} fps".
                format(__class__.__name__, real_fps))
            ##            run(["MP4Box", "-fps", str(self.frameRate), "-add", filename + '.h264:fps=' + str(real_fps), "-new", filename + '.mp4'])
            run([
                "MP4Box", "-fps",
                str(self.frameRate), "-add", filename + ".h264", "-new",
                filename + "_{}fr.mp4".format(int(nr_of_frames))
            ])
            run(["rm", filename + '.h264'])
        except Exception as err:
            self.postMessage.emit("{}: error; type: {}, args: {}".format(
                self.__class__.__name__, type(err), err.args))

        self.captured.emit()
        self.postMessage.emit("{}: info; video written to {}".format(
            __class__.__name__, filename))

    @pyqtSlot(str)
    def setStoragePath(self, path):
        self.storagePath = path

    @pyqtSlot(int)
    def setCropXp1(self, val):
        if 0 <= val <= self.cropRect[3]:
            self.cropRect[1] = val
        else:
            raise ValueError('crop x1')

    @pyqtSlot(int)
    def setCropXp2(self, val):
        if self.cropRect[1] < val < self.camera.resolution[1]:
            self.cropRect[3] = val
        else:
            raise ValueError('crop x2')

    @pyqtSlot(int)
    def setCropYp1(self, val):
        if 0 <= val <= self.cropRect[2]:
            self.cropRect[0] = val
        else:
            raise ValueError('crop y1')

    @pyqtSlot(int)
    def setCropYp2(self, val):
        if self.cropRect[0] < val < self.camera.resolution[0]:
            self.cropRect[2] = val
        else:
            raise ValueError('crop y2')
Пример #19
0
class PiVideoStream(QThread):
    signals = ObjectSignals()
    snapshotTaken = pyqtSignal()
    clipRecorded = pyqtSignal()
    camera = PiCamera()

    ## @param ins is the number of instances created. This may not exceed 1.
    ins = 0

    def __init__(self):
        super().__init__()

        ## Instance limiter. Checks if an instance exists already. If so, it deletes the current instance.
        if PiVideoStream.ins >= 1:
            del self
            self.msg(
                "error; multiple instances of {:s} created, while only 1 instance is allowed"
                .format(__class__.__name__))
            return
        try:
            PiVideoStream.ins += 1
        except Exception as err:
            traceback.print_exc()
            self.signals.error.emit(
                (type(err), err.args, traceback.format_exc()))

        warnings.filterwarnings('default', category=DeprecationWarning)
        self.settings = QSettings("settings.ini", QSettings.IniFormat)
        self.loadSettings()
        self.initStream()

    def loadSettings(self):
        self.msg("info; loading camera settings from " +
                 self.settings.fileName())
        frame_size_str = self.settings.value('camera/frame_size')
        (width, height) = frame_size_str.split('x')
        self.camera.resolution = raw_frame_size((int(width), int(height)))
        self.camera.framerate = int(self.settings.value('camera/frame_rate'))
        self.camera.image_effect = self.settings.value('camera/effect')
        self.camera.shutter_speed = int(
            self.settings.value('camera/shutter_speed'))
        self.camera.iso = int(self.settings.value(
            'camera/iso'))  # should force unity analog gain
        # set parameters for speed
        frame_size_str = self.settings.value('image_frame_size')
        (width, height) = frame_size_str.split('x')
        self.image_size = (int(width), int(height))
        self.camera.video_denoise = False
        self.monochrome = True
        self.use_video_port = True
        # dunno if setting awb mode manually is really useful
##        self.camera.awb_mode = 'off'
##        self.camera.awb_gains = 5.0
##        self.camera.meter_mode = 'average'
##        self.camera.exposure_mode = 'auto'  # 'sports' to reduce motion blur, 'off'after init to freeze settings

    @pyqtSlot()
    def initStream(self):
        # Initialize the camera stream
        if self.isRunning():
            # in case init gets called, while thread is running
            self.msg("info; video stream is running already")
        else:
            # init camera and open stream
            if self.monochrome:
                ##            self.camera.color_effects = (128,128) # return monochrome image, not required if we take Y frame only.
                self.rawCapture = PiYArray(self.camera,
                                           size=self.camera.resolution)
                self.stream = self.camera.capture_continuous(
                    self.rawCapture, 'yuv', self.use_video_port)
            else:
                self.rawCapture = PiRGBArray(self.camera,
                                             size=self.camera.resolution)
                self.stream = self.camera.capture_continuous(
                    self.rawCapture, 'bgr', self.use_video_port)
            # allocate memory
            self.frame = np.empty(self.camera.resolution +
                                  (1 if self.monochrome else 3, ),
                                  dtype=np.uint8)
            # restart thread
            self.start()
            self.wait_ms(1000)
            self.msg("info; video stream initialized with frame size = " +
                     str(self.camera.resolution))

    @pyqtSlot()
    def run(self):
        try:
            self.fps = FPS().start()
            for f in self.stream:
                if self.isInterruptionRequested():
                    self.signals.finished.emit()
                    return


##                self.rawCapture.truncate(0)  # Depricated: clear the stream in preparation for the next frame
                self.rawCapture.seek(0)
                self.frame = f.array  # grab the frame from the stream
                self.signals.ready.emit()
                self.signals.result.emit(
                    cv2.resize(
                        self.frame,
                        self.image_size))  # resize to speed up processing
                self.fps.update()
        except Exception as err:
            traceback.print_exc()
            self.signals.error.emit(
                (type(err), err.args, traceback.format_exc()))

    @pyqtSlot()
    def stop(self):
        self.msg("info; stopping")
        if self.isRunning():
            self.requestInterruption()
            self.wait_signal(self.signals.finished, 2000)
        self.fps.stop()
        self.quit()
        ##        self.frame.fill(0) # clear frame, not allowed since frame is read-only?
        ##        self.signals.ready.emit()
        ##        self.signals.result.emit(np.zeros(self.image_size)) # could produce an information image here
        self.msg(
            "info; video stream stopped, approx. acquisition speed: {:.2f} fps"
            .format(self.fps.fps()))

    def msg(self, text):
        if text:
            text = self.__class__.__name__ + ";" + str(text)
            print(text)
            self.signals.message.emit(text)

    @pyqtSlot()
    def changeCameraSettings(self,
                             frame_size=(640, 480),
                             frame_rate=24,
                             format='bgr',
                             effect='none',
                             use_video_port=False,
                             monochrome=True):
        '''
        The use_video_port parameter controls whether the camera’s image or video port is used to capture images.
        It defaults to False which means that the camera’s image port is used. This port is slow but produces better quality pictures.
        '''
        self.stop()
        self.camera.resolution = raw_frame_size(frame_size)
        self.camera.framerate = frame_rate
        self.camera.image_effect = effect
        self.use_video_port = use_video_port
        self.monochrome = monochrome
        self.initStream()

    def wait_ms(self, timeout):
        ''' Block loop until timeout (ms) elapses.
        '''
        loop = QEventLoop()
        QTimer.singleShot(timeout, loop.exit)
        loop.exec_()

    def wait_signal(self, signal, timeout=1000):
        ''' Block loop until signal emitted, or timeout (ms) elapses.
        '''
        loop = QEventLoop()
        signal.connect(loop.quit)  # only quit is a slot of QEventLoop
        QTimer.singleShot(timeout, loop.exit)
        loop.exec_()

    @pyqtSlot(str)
    def snapshot(self, filename_prefix=None):
        # open path
        (head, tail) = os.path.split(filename_prefix)
        if not os.path.exists(head):
            os.makedirs(head)
        filename = os.path.sep.join([
            head, '{:016d}_'.format(round(time.time() * 1000)) + tail + '.png'
        ])

        # write image
        self.wait_signal(self.signals.ready,
                         5000)  # wait for first frame to be shot
        cv2.imwrite(filename, self.frame)
        self.msg("info; image written to " + filename)

    @pyqtSlot(str, int)
    def recordClip(self, filename_prefix=None, duration=10):
        # open path
        (head, tail) = os.path.split(filename_prefix)
        if not os.path.exists(head):
            os.makedirs(head)
        filename = os.path.sep.join([
            head, '{:016d}_'.format(round(time.time() * 1000)) + tail + '.avi'
        ])

        self.msg(
            "TODO; changing camera settings may get the process killed after several hours, probably better to open the stream in video resolution from the start if the videorecording is required!"
        )

        # set video clip parameters
        frame_size_str = self.settings.value('camera/clip_frame_size')
        frame_size_str.split('x')
        frame_size = raw_frame_size((int(frame_size_str.split('x')[0]),
                                     int(frame_size_str.split('x')[1])))
        frame_rate = int(self.settings.value('camera/clip_frame_rate'))
        self.changeCameraSettings(frame_size=frame_size,
                                  frame_rate=frame_rate,
                                  use_video_port=True,
                                  monochrome=False)

        # define the codec and create VideoWriter object
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter(filename, fourcc, frame_rate, frame_size)
        self.msg("info; start recording video to " + filename)

        # write file
        for i in range(0, duration * frame_rate):
            self.signals.progress.emit(
                int(100 * i / (duration * frame_rate - 1)))
            self.wait_signal(self.signals.ready, 1000)
            if self.frame is not None:
                out.write(self.frame)

        # close
        out.release()
        self.msg("info; recording done")

        ##        self.camera.start_recording(filename)
        ##        self.camera.wait_recording(duration)
        ##        self.camera.stop_recording()

        # revert to original parameters
        self.loadSettings()
        self.initStream()
        self.clipRecorded.emit()
Пример #20
0
     if tas == None:
         pressed = pygame.key.get_pressed()
         controller = emulator.controllers[0]
         if pressed[pygame.K_RETURN]:
             controller.set_key(Controller.BUTTON_START)
         if pressed[pygame.K_RSHIFT]:
             controller.set_key(Controller.BUTTON_SELECT)
         if pressed[pygame.K_a]:
             controller.set_key(Controller.BUTTON_LEFT)
         if pressed[pygame.K_d]:
             controller.set_key(Controller.BUTTON_RIGHT)
         if pressed[pygame.K_w]:
             controller.set_key(Controller.BUTTON_UP)
         if pressed[pygame.K_s]:
             controller.set_key(Controller.BUTTON_DOWN)
         if pressed[pygame.K_j]:
             controller.set_key(Controller.BUTTON_B)
         if pressed[pygame.K_k]:
             controller.set_key(Controller.BUTTON_A)
     if tas != None:
         tas.update_controller(emulator.controllers[0])
             
     emulator.tick(screen)
     keep_going = screen.render()
     
     fps.update(target_fps)
     
     
 except Exception, e:
     print 'Exception occurred: ' + str(e)
     keep_going = False
Пример #21
0
class PiVideoStream(QThread):
    image = None
    finished = pyqtSignal()
    postMessage = pyqtSignal(str)
    frame = pyqtSignal(np.ndarray)
    progress = pyqtSignal(int)
    captured = pyqtSignal()

    camera = PiCamera()
    storagePath = None
    cropRect = [0] * 4

    ## @param ins is the number of instances created. This may not exceed 1.
    ins = 0

    def __init__(self):
        super().__init__()

        ## Instance limiter. Checks if an instance exists already. If so, it deletes the current instance.
        if PiVideoStream.ins >= 1:
            del self
            self.postMessage.emit(
                "{}: error; multiple instances of created, while only 1 instance is allowed"
                .format(__class__.__name__))
            return
        try:
            PiVideoStream.ins += 1
        except Exception as err:
            self.postMessage.emit("{}: error; type: {}, args: {}".format(
                self.__class__.__name__, type(err), err.args))
        else:
            warnings.filterwarnings('default', category=DeprecationWarning)
            self.settings = QSettings("settings.ini", QSettings.IniFormat)
            self.loadSettings()
##            self.initStream()

    def loadSettings(self):
        self.postMessage.emit(
            "{}: info; loading camera settings from {}".format(
                self.__class__.__name__, self.settings.fileName()))

        # load
        self.monochrome = self.settings.value('camera/monochrome',
                                              False,
                                              type=bool)
        self.use_video_port = self.settings.value('camera/use_video_port',
                                                  False,
                                                  type=bool)
        self.sensor_mode = int(self.settings.value('camera/sensor_mode'))

        # set frame sizes
        if self.sensor_mode == 0:
            self.frame_size = (4056, 3040)
        elif self.sensor_mode == 1:
            self.frame_size = (1920, 1080)
        elif self.sensor_mode == 2 or self.sensor_mode == 3:
            self.frame_size = (3280, 2464)
        elif self.sensor_mode == 4:
            self.frame_size = (1640, 1232)
        elif self.sensor_mode == 5:
            self.frame_size = (1640, 922)
        elif self.sensor_mode == 6:
            self.frame_size = (1280, 720)
        elif self.sensor_mode == 7:
            self.frame_size = (640, 480)
        else:
            raise ValueError

        frame_size_str = self.settings.value('display_frame_size')
        (width, height) = frame_size_str.split('x')
        self.display_frame_size = (int(width), int(height))
        if not self.monochrome:
            self.display_frame_size = self.display_frame_size + (3, )

        # set more camera parameters
        self.camera.resolution = self.frame_size
        self.camera.sensor_mode = self.sensor_mode
        self.camera.framerate = int(self.settings.value('camera/frame_rate'))
        self.camera.image_effect = self.settings.value('camera/effect')
        self.camera.shutter_speed = int(
            self.settings.value('camera/shutter_speed'))
        self.camera.iso = int(self.settings.value(
            'camera/iso'))  # should force unity analog gain
        self.camera.video_denoise = self.settings.value('camera/video_denoise',
                                                        False,
                                                        type=bool)

        # dunno if setting awb mode manually is really useful
##        self.camera.awb_mode = 'off'
##        self.camera.awb_gains = 5.0
##        self.camera.meter_mode = 'average'
##        self.camera.exposure_mode = 'auto'  # 'sports' to reduce motion blur, 'off'after init to freeze settings

    @pyqtSlot()
    def initStream(self):
        # Initialize the camera stream
        if self.isRunning():
            # in case init gets called, while thread is running
            self.postMessage.emit(
                "{}: error; video stream is already running".format(
                    __class__.__name__))
        else:
            # init camera and open stream
            if self.monochrome:
                ##            self.camera.color_effects = (128,128) # return monochrome image, not required if we take Y frame only.
                self.rawCapture = PiYArray(self.camera,
                                           size=self.camera.resolution)
                self.stream = self.camera.capture_continuous(
                    self.rawCapture, 'yuv', self.use_video_port)
            else:
                self.rawCapture = PiRGBArray(self.camera,
                                             size=self.camera.resolution)
                self.stream = self.camera.capture_continuous(
                    self.rawCapture, 'bgr', self.use_video_port)
            # allocate memory
            self.image = np.empty(self.camera.resolution +
                                  (1 if self.monochrome else 3, ),
                                  dtype=np.uint8)
            # init crop rectangle
            if self.cropRect[2] == 0:
                self.cropRect[2] = self.image.shape[1]
            if self.cropRect[3] == 0:
                self.cropRect[3] = self.image.shape[0]
            # restart thread
            self.start()
            wait_ms(1000)
            msg = "{}: info; video stream initialized with frame size = {} and {:d} channels".format(\
                __class__.__name__, str(self.camera.resolution), 1 if self.monochrome else 3)
            self.postMessage.emit(msg)

    @pyqtSlot()
    def run(self):
        try:
            self.fps = FPS().start()
            for f in self.stream:
                if self.isInterruptionRequested():
                    self.finished.emit()
                    return
                self.rawCapture.seek(0)
                self.image = f.array  # grab the frame from the stream
                ##                # Crop
                ##                if (self.cropRect[2] > self.cropRect[0]) and (self.cropRect[3] > self.cropRect[1]):
                ##                    self.frame.emit(self.image[self.cropRect[0]:self.cropRect[2], self.cropRect[1]:self.cropRect[3]])
                # Emit resized frame for speed
                self.frame.emit(
                    cv2.resize(self.image, self.display_frame_size[:2]))
                self.fps.update()
        except Exception as err:
            self.postMessage.emit("{}: error; type: {}, args: {}".format(
                self.__class__.__name__, type(err), err.args))

    @pyqtSlot()
    def stop(self):
        self.postMessage.emit("{}: info; stopping".format(__class__.__name__))
        if self.isRunning():
            self.requestInterruption()
            wait_signal(self.finished, 2000)
        self.fps.stop()
        msg = "{}: info; approx. processing speed: {:.2f} fps".format(
            self.__class__.__name__, self.fps.fps())
        self.postMessage.emit(msg)
        print(msg)
        self.quit()

    @pyqtSlot(str)
    def takeImage(self, filename_prefix=None):
        if filename_prefix is not None:
            (head, tail) = os.path.split(filename_prefix)
            if not os.path.exists(head):
                os.makedirs(head)
            filename = os.path.sep.join([
                head,
                '{:016d}_'.format(round(time.time() * 1000)) + tail + '.png'
            ])
        else:
            filename = '{:016d}'.format(round(time.time() * 1000)) + '.png'
            # open path
            if self.storagePath is not None:
                filename = os.path.sep.join([self.storagePath, filename])

        # write image
        wait_signal(self.frame, 5000)  # wait for first frame to be shot
        cv2.imwrite(filename, self.image)
        self.captured.emit()
        self.postMessage.emit("{}: info; image written to {}".format(
            __class__.__name__, filename))

    @pyqtSlot(str, int)
    def recordClip(self, filename_prefix=None, duration=10):
        # open path
        (head, tail) = os.path.split(filename_prefix)
        if not os.path.exists(head):
            os.makedirs(head)
        filename = os.path.sep.join([
            head, '{:016d}_'.format(round(time.time() * 1000)) + tail + '.avi'
        ])

        ##"TODO; changing camera settings may get the process killed after several hours, probably better to open the stream in video resolution from the start if the videorecording is required!")

        # set video clip parameters
        self.stop()
        frame_size_str = self.settings.value('camera/clip_frame_size')
        (width, height) = frame_size_str.split('x')
        self.camera.resolution = (int(width), int(height))
        self.camera.sensor_mode = int(
            self.settings.value('camera/clip_sensor_mode'))
        self.camera.framerate = int(
            self.settings.value('camera/clip_frame_rate'))
        self.camera.image_effect = effect
        self.use_video_port = True
        self.monochrome = True
        self.initStream()

        # define the codec and create VideoWriter object
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter(filename, fourcc, frame_rate, frame_size)
        self.msg("info; start recording video to " + filename)

        # write file
        for i in range(0, duration * frame_rate):
            self.progress.emit(int(100 * i / (duration * frame_rate - 1)))
            wait_signal(self.frame, 1000)
            if self.image is not None:
                out.write(self.image)

        # close
        out.release()
        self.msg("info; recording done")

        ##        self.camera.start_recording(filename)
        ##        self.camera.wait_recording(duration)
        ##        self.camera.stop_recording()

        # revert to original parameters
        self.loadSettings()
        self.initStream()
        self.clipRecorded.emit()

    @pyqtSlot(str)
    def setStoragePath(self, path):
        self.storagePath = path

    @pyqtSlot(int)
    def setCropXp1(self, val):
        if 0 <= val <= self.cropRect[3]:
            self.cropRect[1] = val
        else:
            raise ValueError('crop x1')

    @pyqtSlot(int)
    def setCropXp2(self, val):
        if self.cropRect[1] < val < self.camera.resolution[1]:
            self.cropRect[3] = val
        else:
            raise ValueError('crop x2')

    @pyqtSlot(int)
    def setCropYp1(self, val):
        if 0 <= val <= self.cropRect[2]:
            self.cropRect[0] = val
        else:
            raise ValueError('crop y1')

    @pyqtSlot(int)
    def setCropYp2(self, val):
        if self.cropRect[0] < val < self.camera.resolution[0]:
            self.cropRect[2] = val
        else:
            raise ValueError('crop y2')
Пример #22
0
            #cv.circle(frame,(int(xe[n]),int(ye[n])),int(uncertainty), (255, 255, 0),1)
            cv.circle(frame, (int(xe[n]), int(ye[n])), 3, (0, 255, 255), -1)
        # Draw the predicted path
        for n in range(len(xp)):
            uncertaintyP = (xpu[n] + ypu[n]) / 2
            # Draw prediction (circles), with uncertainty as radius
            cv.circle(frame, (int(xp[n]), int(yp[n])), int(uncertaintyP),
                      (0, 0, 255))
            cv.circle(frame, (int(xp[n]), int(yp[n])), 3, (255, 255, 255), -1)

    ########## DISPLAY ##########
    # check to see if the frame should be displayed to our screen
    cv.imshow('Frame', frame)
    cv.imshow('Vision', mask)
    out.write(frame)

    # update the FPS counter
    streamfps.update()
    lastframe = frame_read

################### CLEARING UP ###################
# stop the timer and display information

streamfps.stop()
print("[INFO] elasped time: {:.2f}".format(streamfps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(streamfps.fps_tot()))
print("Measurements: " + str(listPoints))

out.release()
stream.release()
cv.destroyAllWindows()
Пример #23
0
fps.start()

timeOut = 0



while frames < 300:
  #print "Main Thread Queue - > ",frame_queue.qsize() 
  with queue_lock:
    if not frame_queue.empty():
      timeOut = 0
      frame = frame_queue.get()
      sal = MPSaliency(frame)
      sal_map = sal.get_saliency_map()
      #sal_frame = (sal_map*255).round().astype(np.uint8)
      #frame = cv2.cvtColor(sal_frame, cv2.COLOR_GRAY2BGR)
      #out.write(frame)
      frames = frames + 1
      fps.update()
    else:
        pass
  

fps.stop()
stream.terminate()
cv2.destroyAllWindows()
#out.release()

print "FPS :: ", fps.fps()
print "DUR :: ", fps.elapsed()
Пример #24
0
rotate = 0


def signal_handler(sig, frame):
    print('You pressed Ctrl+C!')
    global running
    running = False


signal.signal(signal.SIGINT, signal_handler)

while running:

    # get frame
    frame = cam.get()
    fpss.update(verbose=True)

    # features
    mask = c_filter.apply_small(frame)
    reward = (mask[2, 0:20].sum() + mask[2, 1:19].sum() + mask[2, 2:18].sum() +
              mask[2, 3:17].sum() + mask[2, 4:16].sum() + mask[2, 5:15].sum() +
              mask[2, 6:14].sum() + mask[2, 7:13].sum() + mask[2, 8:12].sum() +
              mask[2, 9:11].sum())
    row_pos = c_filter.row_pos(mask)

    # set twist
    rotate = row_pos[3] * -0.45

    # reverse if no line
    b = mask.sum() < min_mask_sum
    if rev.update(mask.sum() < min_mask_sum):
Пример #25
0
class detectionBox:
    def __init__(self, camera_name, name, area, video_stream_reference, config,
                 runtime, db_reference):
        self.camera_name = camera_name
        self.name = name
        self.area = area  # bounding box for the search
        self.stream = video_stream_reference  # reference to the video feed
        self.old_detected_rect = []

        # threads cannot share alpr object, needs its own
        self.alpr = Alpr("us", config, runtime)
        self.alpr.set_top_n(1)

        self.fps = FPS().start_timer()

        self.stopped = False

        # stores license plate objects
        self.license_plate_list = []

        self.licensePlateService = licensePlateService(self,
                                                       db_reference).start()

    def start(self):
        Thread(target=self.update, args=()).start()
        return self

    def update(self):  # main method, do processing here
        while True:
            if self.stopped:
                return

            self.fps.update()

            # grab the most recent frame from the video thread
            frame = self.stream.read()
            frame_copy = copy.deepcopy(frame)
            cropped_frame = frame[int(self.area[1]):int(self.area[1] +
                                                        self.area[3]),
                                  int(self.area[0]):int(self.area[0] +
                                                        self.area[2])]

            # run the detector
            results = self.alpr.recognize_ndarray(cropped_frame)

            detected_rect = []
            # finds all rects in frame and stores in detectedRect
            if results['results']:
                for plate in results['results']:

                    # offset so points are relative to the frame, not cropped frame, use to find bounding rect
                    left_bottom = (plate['coordinates'][3]['x'] + self.area[0],
                                   plate['coordinates'][3]['y'] + self.area[1])
                    right_bottom = (plate['coordinates'][2]['x'] +
                                    self.area[0],
                                    plate['coordinates'][2]['y'] +
                                    self.area[1])
                    right_top = (plate['coordinates'][1]['x'] + self.area[0],
                                 plate['coordinates'][1]['y'] + self.area[1])
                    left_top = (plate['coordinates'][0]['x'] + self.area[0],
                                plate['coordinates'][0]['y'] + self.area[1])

                    all_points = np.array(
                        [left_bottom, right_bottom, left_top, right_top])
                    bounding_rect = cv2.boundingRect(all_points)  # X, Y, W, H

                    detected_rect.append(bounding_rect)

                    # convert lpr results into a license plate object and store in license_plate_list
                    plate_number = plate['plate']
                    #plate_image = frame[int(bounding_rect[1]):int(bounding_rect[1] + bounding_rect[3]), int(bounding_rect[0]):int(bounding_rect[0] + bounding_rect[2])]
                    plate_image = cv2.resize(frame_copy, (720, 480),
                                             interpolation=cv2.INTER_AREA)
                    plate_datetime = datetime.datetime.now()
                    plate_time = get_system_uptime()
                    plate_confidence = plate['confidence']
                    new_plate = licensePlate(plate_number, plate_image,
                                             plate_time, self.camera_name,
                                             self.name, plate_datetime,
                                             plate_confidence)
                    self.license_plate_list.append(new_plate)
                    self.licensePlateService.notify(
                    )  # help out the poor thread

                self.old_detected_rect = detected_rect  # this way, detectedRect will be erased and operated on but oldDetectedRect will always have something in it

            else:
                # no results
                self.old_detected_rect = []

    def draw(self, frame):
        # return frame with drawings on it
        # draw plates detected and bounding boxes
        # is this necessary? The bounding boxes of the search areas should not overlap
        # should check for overlapping bounding boxes in constructor

        # draw plates detected
        # print(len(self.oldDetectedRect))
        for rect in self.old_detected_rect:
            cv2.rectangle(frame, rect, (0, 255, 0), 2)

        # draw search box and name
        cv2.putText(frame, self.name, (self.area[0], self.area[1] - 5),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (15, 15, 255), 2)
        cv2.rectangle(frame, self.area, (0, 0, 255), 2)

        # return drawn frame
        return frame

    def stop(self):
        self.fps.stop()
        print(self.name, "FPS: ", self.fps.fps())
        self.licensePlateService.stop()
        self.stopped = True