Пример #1
0
def from_stream():

    fps = FPS().start()
    cam = WebcamVideoStream().start()

    max_frames = 50
    i = 0

    while True:

        frame = cam.read()

        if i > max_frames:

            fps.stop()
            print(fps.elapsed())
            print(fps.fps())
            break

        i += 1

        testcone(frame, stream=True)
        fps.update()
        cv2.imshow('', frame)
        cv2.waitKey(1)
Пример #2
0
    def run(self):
        """
			run() must be called by the user to start, draw and refresh everything on the screen.

		"""

        # Initialize the windows fps.
        self.fps = FPS(1.0 / self.fps_limit)

        # Check if the window should close
        while not glfw.window_should_close(self.window):

            # Set the current glfw context to self.window.
            glfw.make_context_current(self.window)

            # Set the background color (black).
            glClearColor(self.background[0], self.background[1],
                         self.background[2], self.background[3])

            # Update/refresh the window.
            glfw.poll_events()
            glClear(GL_COLOR_BUFFER_BIT)

            # Check the fps counter to see if things should be drawn to the screen.
            if self.fps.tick(glfw.get_time()):
                self.on_run()
                glfw.swap_buffers(self.window)

        glfw.terminate()
    def __init__(self):
        # self.cam = cv2.VideoCapture(0)
        # self.cam.set(3, 320)
        # self.cam.set(4, 240)
        self.cam = WebcamVideoStream(src=0, resolution=(640, 480)).start()
        self.fps = FPS().start()

        ret, self.frame = self.cam.read()

        self.suspend_tracking = SuspendTracking(teta=3)

        self.height, self.width = self.frame.shape[:2]
        self.kernel_erode = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                      (3, 3))
        self.kernel_dilate = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                       (7, 7))

        cv2.namedWindow('camshift')
        self.obj_select = RectSelector('camshift', self.onmouse)

        radius = 3
        n_point = 8 * radius
        self.lbpDesc = LBP(n_point, radius)

        self.HSV_CHANNELS = (
            (24, [0, 180], "hue"),  # Hue
            (8, [0, 256], "sat"),  # Saturation
            (8, [0, 256], "val")  # Value
        )

        self.show_backproj = False
        self.track_window = None
        self.histHSV = []
        self.track_box = None
Пример #4
0
    def __init__(self, resource=None):
        super(CV2VideoStreamer, self).__init__()
        if resource is None:
            print "You must give an argument to open a video stream."
            print "  It can be a number as video device,  e.g.: 0 would be /dev/video0"
            print "  It can be a url of a stream,         e.g.: rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov"
            print "  It can be a video file,              e.g.: samples/moon.avi"
            print "  It can be a class generating images, e.g.: TimeStampVideo"
            exit(0)

        # If given a number interpret it as a video device
        if isinstance(resource, int) or len(resource) < 3:
            self.resource_name = "/dev/video" + str(resource)
            resource = int(resource)
            self.vidfile = False
        else:
            self.resource_name = str(resource)
            self.vidfile = True
        print "Trying to open resource: " + self.resource_name

        if isinstance(resource, VideoSource):
            self.cap = resource
        else:
            self.cap = cv2.VideoCapture(resource)

        if not self.cap.isOpened():
            print "Error opening resource: " + str(resource)
            exit(0)
        self.fps = FPS().start()
Пример #5
0
    def __init__(self, video_src):
        #树莓派ip
        #self.server_address='rtmp://localhost/dji/stream.h264'
        #self.server_address='rtmp://127.0.0.1:1935/dji'
        self.server_address = 'http://192.168.40.146:8000/stream.mjpg'
        #self.server_address='rtsp://:192.168.40.118/1'
        #self.server_address=0
        #self.server_address='udp://@:8000 --demux=h264'
        #self.cam = video.create_capture(self.server_address)
        self.cam = WebcamVideoStream(self.server_address).start()
        ret, self.frame = self.cam.read()
        self.fish_cali = fish_calibration(self.frame)
        self.drag_start = None
        self.list_camshift = []
        self.show_backproj = False
        self.newcamshift = None
        self.selection = None
        self.lock = False
        self.mdp = MyUdp()
        #self.count=0
        self.light = self.get_light()

        self.swicht = False
        #self.list_camshift.append(self.get_car('red.jpg',0))
        #self.list_camshift.append(self.get_car('green.jpg',1))

        self.fps = FPS().start()

        #wifi模块IP
        self.mdp.client_address = ('192.168.40.31', 8899)
        cv2.namedWindow('TUCanshift')
        cv2.setMouseCallback('TUCanshift', self.onmouse)
Пример #6
0
def blocking_video_test():
    # grab a pointer to the video stream and initialize the FPS counter
    print("[INFO] sampling frames from webcam...")
    stream = cv2.VideoCapture(SRC)
    fps = FPS().start()

    # loop over some frames
    while fps._numFrames < 1000:
        # grab the frame from the stream and resize it to have a maximum
        # width of 400 pixels
        (grabbed, frame) = stream.read()
        frame = imutils.resize(frame, width=VID_WIDTH)

        # check to see if the frame should be displayed to our screen
        if DISPLAY:
            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1) & 0xFF

        # update the FPS counter
        fps.update()

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # do a bit of cleanup
    stream.release()
    cv2.destroyAllWindows()
Пример #7
0
def threaded_video_test():
    # created a *threaded* video stream, allow the camera sensor to warmup,
    # and start the FPS counter
    print("[INFO] sampling THREADED frames from webcam...")
    vs = WebcamVideoStream(src=SRC).start()
    fps = FPS().start()

    # loop over some frames...this time using the threaded stream
    while fps._numFrames < NUM_FRAMES:
        # grab the frame from the threaded video stream and resize it
        # to have a maximum width of 400 pixels
        frame = vs.read()
        frame = imutils.resize(frame, width=VID_WIDTH)

        # check to see if the frame should be displayed to our screen
        if DISPLAY:
            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1) & 0xFF

        # update the FPS counter
        fps.update()

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
Пример #8
0
 def __init__(self, pace=None, **kwargs):
     super().__init__(**kwargs)
     # self.stop_event: set it to stop the thread
     self._stop_event = Event()
     self.fps = FPS()
     if pace:
         self.pacer = Pacer(pace)
     else:
         self.pacer = None
Пример #9
0
class GUIFaceFinder:
    fps = FPS()
    state = IdleState()
    resized = False

    @staticmethod
    def css_roi_for_frame_shape(frame_shape):
        w_percent = 0.30
        h_percent = 0.15
        frame_h, frame_w, _ = frame_shape
        roi_top = int(frame_h * h_percent)
        roi_right = int(frame_w * (1 - w_percent))
        roi_bottom = int(frame_h * (1 - h_percent))
        roi_left = int(frame_w * w_percent)
        return (roi_top, roi_right, roi_bottom, roi_left)

    def __update_state(self, people, percent):
        if percent < 0.8:
            if self.state.__class__ != IdleState:
                self.state = IdleState()
        else:
            in_the_frame = [p for p in people if p.in_the_frame]

            if len(in_the_frame) == 0:
                if percent > 0.8:
                    if self.state.__class__ != SearchingState:
                        self.state = SearchingState()
            elif in_the_frame[0].id is None:
                if self.state.__class__ != NoMatchState:
                    self.state = NoMatchState()
            else:
                if self.state.__class__ != MatchState:
                    self.state = MatchState()

    def draw(self, frame, face, people, driver_license_face, percent):
        if not self.resized:
            (roi_top, _roi_right, roi_bottom, _roi_left) = GUIFaceFinder.css_roi_for_frame_shape(frame.shape)
            padding = 32
            banners_height = roi_top-padding
            rects_height = roi_bottom-roi_top

            IdleState.banner = imutils.resize(IdleState.banner, height=banners_height)
            SearchingState.banner = imutils.resize(SearchingState.banner, height=banners_height)
            MatchState.banner = imutils.resize(MatchState.banner, height=banners_height)
            NoMatchState.banner = imutils.resize(NoMatchState.banner, height=banners_height)

            SearchingState.rects = imutils.resize(driver_license_face, height=rects_height)
            MatchState.rects = imutils.resize(MatchState.rects, height=rects_height)
            NoMatchState.rects = imutils.resize(NoMatchState.rects, height=rects_height)
            self.resized = True

        self.__update_state(people, percent)
        self.fps.draw(frame)
        TimeAverageRenderer.draw_for_percentage(frame, percent)
        self.state.update(people)
        self.state.draw(frame, face)
Пример #10
0
def load():
    random.seed()
    pygame.init()
    pygame.display.set_caption('Unnamed')
    G.screen_w      = SCREEN_W
    G.screen_h      = SCREEN_H
    G.screen_mode   = SCREEN_MODE
    G.screen        = pygame.display.set_mode([G.screen_w, G.screen_h], G.screen_mode)
    G.screen.fill(BG_COLOR)
    pygame.display.flip()
    G.background    = G.screen.copy()
    G.dirty_rects   = []
    G.fps           = FPS()
Пример #11
0
    def __init__(self):
        # self.cam = cv2.VideoCapture(0)
        # self.cam.set(3, 320)
        # self.cam.set(4, 240)
        self.cam = WebcamVideoStream(src=0, resolution=(640, 480)).start()
        self.fps = FPS().start()

        ret, self.frame = self.cam.read()

        self.conf = {
            'ColorFrameNum': 7,
            'LBPFrameNum': 7,
            'MaxFrameDiffClr': 15,
            'MaxLBPFrameUpdate': 30,
            'L_Weight': 0.3,
            'A_Weight': 0.7,
            'B_Weight': 0.7
        }

        self.ColorCheck = AdaptiveThreshold(teta=3, max_lost_cnt=1)
        self.LBPCheck = AdaptiveThreshold(teta=2, max_lost_cnt=1)

        self.ColorDistance = LABDistance()
        self.LBPDistance = LocalBinaryPatterns(
            numPoints=8,
            radius=2,
            update_prev_hist=self.conf['MaxLBPFrameUpdate'])

        self.isLost = False
        self.isLBPLost = False

        self.height, self.width = self.frame.shape[:2]

        self.kernel_e = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
        self.kernel_d = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
        self.kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))

        cv2.namedWindow('camshift')
        self.obj_select = RectSelector('camshift', self.onmouse)

        self.LAB_CHANNELS = (
            (24, [0, 256], "light"),  # L
            (24, [0, 256], "a"),  # a
            (24, [0, 256], "b")  # b
        )

        self.show_backproj = False
        self.track_window = None
        self.histLAB = []
        self.track_box = None
Пример #12
0
    def test01():
        """can we instantiate? """
        fps = FPS().start()
        pacer = Pacer(DESIRED_FPS).start()

        while fps.n_frames < N_TEST_FRAMES:
            print(datetime.datetime.now())
            fps.update()
            pacer.update()

        fps.stop()
        print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
        print("[INFO] n_frames: %i" % fps.n_frames)
Пример #13
0
    def __init__(self, src=0, resolution=(320, 240), framerate=32):
        # initialize the video camera stream and read the first frame
        # from the stream
        self.stream = cv2.VideoCapture(src)
        self.fps = FPS()
        if self.stream and self.stream.isOpened():
            self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, resolution[0])
            self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, resolution[1])
            self.stream.set(cv2.CAP_PROP_FPS, framerate)
        (self.grabbed, self.frame) = self.stream.read()

        # initialize the variable used to indicate if the thread should
        # be stopped
        self.stopped = False
Пример #14
0
    def __init__(self, camera, db_service, alpr_config, alpr_run_time, gui):
        self.stopped = False

        self.camera_name = camera.name
        self.cam = videoStream(src=camera.url)

        self.guiFPS = FPS()
        self.gui = gui
        self.detection_boxes = []
        for search_box in camera.aoi_list:
            for search_box_name in search_box:
                new_box = detectionBox(camera.name, search_box_name,
                                       search_box[search_box_name], self.cam,
                                       alpr_config, alpr_run_time, db_service)
                self.detection_boxes.append(new_box)
Пример #15
0
	def begin(self):
		self.enemy = Entity(
			Vec2D(Xudia.renderer.width-10, Xudia.renderer.height/2),
			Xudia.Graphic('<<==\n|===\n<<=='),
			None

		)
		hitbox = Hitbox(self.enemy, 4, 3)
		self.enemy.hitbox = hitbox
		
		self.add_entity(self.enemy)

		self.add_entity(Ship(Xudia.renderer.width/2, Xudia.renderer.height/2))
		
		self.add_entity(FPS())

		Xudia.input.add_listener('q', Xudia.engine.stop)
Пример #16
0
    def __init__(self):
        self.env = Environment()
        self.bot = Robot()
        self.fps = FPS()

        self.start = [18, 1]
        self.targetCoverage = 100
        self.timeAvailable = 360
        self.startTime = -1.0
        self.goalVisited = False

        self.executeEndSequence = False
        self.endSequence = []
        self.endSequenceIndex = 1

        self.realignASAP = False
        self.forwardcount = 0
Пример #17
0
    def __init__(self, log=False):
        if str(log) == "True":
            dir_path = os.getcwd() + "/"
            file_name = time.strftime("%Y.%m.%d.%H.%M.%S") + ".log"
            log = dir_path + file_name
        self.log = log

        self.mtr = BigBoy()
        self.imu = IMU()
        self.fps = FPS(1.0)

        self.t0 = time.time()
        self.steps = 5
        self.max_steps = 60
        self.pos = 0
        self.threshold = 1.5
        self.tilt_constant = -25.0
Пример #18
0
 def run(self):
     try:
         self.fps = FPS().start()
         for f in self.stream:
             if self.isInterruptionRequested():
                 self.finished.emit()
                 return
             self.rawCapture.seek(0)
             self.image = f.array  # grab the frame from the stream
             ##                # Crop
             ##                if (self.cropRect[2] > self.cropRect[0]) and (self.cropRect[3] > self.cropRect[1]):
             ##                    self.frame.emit(self.image[self.cropRect[0]:self.cropRect[2], self.cropRect[1]:self.cropRect[3]])
             # Emit resized frame for speed
             self.frame.emit(
                 cv2.resize(self.image, self.display_frame_size[:2]))
             self.fps.update()
     except Exception as err:
         self.postMessage.emit("{}: error; type: {}, args: {}".format(
             self.__class__.__name__, type(err), err.args))
Пример #19
0
    def __init__(self, path, config):
        QWidget.__init__(self)

        self.path = path
        self.config = config

        self.setWindowTitle('AR4maps')
        self.move(0, 0)
        self.video_size = QSize(VIDEO.WIDTH, VIDEO.HEIGHT)
        self.setup_ui()

        self.markerImg = cv.imread(self.path + self.config['target'])
        # cv.imshow("target",targetImg)
        self._cam = Camera().start()
        self._track = Tracking(self.markerImg)
        self._rendering = Rendering(self.markerImg, self.config['coords'])
        self._fps = FPS()

        self.setup_render()
Пример #20
0
    def __init__(self, video_src):
        #树莓派ip
        self.mdp=MyUdp()
        #self.server_address='http://%s:8000/stream.mjpg' % MyUdp.get_piIP('raspberrypi')
        self.server_address='http://192.168.56.240:8000/stream.mjpg'
        #self.server_address='rtmp://127.0.0.1/live/stream'
        #self.server_address='rtmp://127.0.0.1:1935/dji'
        #self.server_address='http://192.168.56.146:8000/stream.mjpg'
        #self.server_address='http://192.168.191.3:8000/stream.mjpg'

        #self.server_address='rtsp://:192.168.40.118/1'
        #self.server_address=0
        #self.server_address='udp://@:8000 --demux=h264'
        #self.cam = video.create_capture(self.server_address)
        self.cam = WebcamVideoStream(self.server_address).start()
        ret, self.frame = self.cam.read()
        self.fish_cali=fish_calibration(self.frame)
        self.drag_start = None
        self.list_camshift=[]
        self.show_backproj = False
        self.newcamshift=None
        self.selection=None
        self.lock=False
        
        #self.count=0
        self.light=self.get_light()

        self.swicht=False
        #self.list_camshift.append(self.get_car('red.jpg',0))
        #self.list_camshift.append(self.get_car('yellow.jpg',1))
        #H,S
        self.BACKGROUND_PARAM=App.calc_HS(cv2.cvtColor(self.frame,cv2.COLOR_BGR2HSV))
        

        self.fps = FPS().start()

        #wifi模块IP
        #self.mdp.client_address=('192.168.56.31', 8899)  
        #新车
        self.mdp.client_address=('192.168.56.207', 8899)  
        cv2.namedWindow('TUCanshift')
        cv2.setMouseCallback('TUCanshift', self.onmouse)
Пример #21
0
def run_main():
    size = width, height = 1280, 720
    speed = [2, 2]
    black = 0, 0, 0

    screen = pygame.display.set_mode(size)

    sf = StarField(size)
    fps = FPS()

    while 1:
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                sys.exit()

        screen.fill(black)

        sf.do_things(screen)
        fps.show_fps(screen)

        pygame.display.flip()
Пример #22
0
    def __init__(self, camera_name, name, area, video_stream_reference, config,
                 runtime, db_reference):
        self.camera_name = camera_name
        self.name = name
        self.area = area  # bounding box for the search
        self.stream = video_stream_reference  # reference to the video feed
        self.old_detected_rect = []

        # threads cannot share alpr object, needs its own
        self.alpr = Alpr("us", config, runtime)
        self.alpr.set_top_n(1)

        self.fps = FPS().start_timer()

        self.stopped = False

        # stores license plate objects
        self.license_plate_list = []

        self.licensePlateService = licensePlateService(self,
                                                       db_reference).start()
Пример #23
0
    def run(self):
        try:
            self.fps = FPS().start()
            for f in self.captureStream:
                if self.isInterruptionRequested():
                    break
                self.rawCapture.seek(0)
                img = f.array  # grab the frame from the stream
                self.frame.emit(img)  #cv2.resize(img, self.frameSize[:2]))
                self.fps.update()

##                # Grab jpeg from an mpeg video stream
##                self.videoStream.seek(0)
##                buf = self.videoStream.read()
##                if buf.startswith(b'\xff\xd8'):
##                    # jpeg magic number is detected
##                    flag = cv2.IMREAD_GRAYSCALE if self.monochrome else cv2.IMREAD_COLOR
##                    img = cv2.imdecode(np.frombuffer(buf, dtype=np.uint8), flag)
##                    self.frame.emit(img)
##                    self.fps.update()
##                    self.videoStream.truncate(0)

        except Exception as err:
            self.postMessage.emit("{}: error; type: {}, args: {}".format(
                self.__class__.__name__, type(err), err.args))
        finally:
            self.fps.stop()
            img = np.zeros(shape=(self.frameSize[1], self.frameSize[0]),
                           dtype=np.uint8)
            cv2.putText(
                img, 'Camera suspended',
                (int(self.frameSize[0] / 2) - 150, int(self.frameSize[1] / 2)),
                cv2.FONT_HERSHEY_SIMPLEX, 1, (255), 1)
            for i in range(5):
                wait_ms(100)
                self.frame.emit(img)
            msg = "{}: info; finished, approx. processing speed: {:.2f} fps".format(
                self.__class__.__name__, self.fps.fps())
            self.postMessage.emit(msg)
            self.finished.emit()
Пример #24
0
    def run(self):
        try:
            self.fps = FPS().start()
            for f in self.stream:
                if self.isInterruptionRequested():
                    self.signals.finished.emit()
                    return


##                self.rawCapture.truncate(0)  # Depricated: clear the stream in preparation for the next frame
                self.rawCapture.seek(0)
                self.frame = f.array  # grab the frame from the stream
                self.signals.ready.emit()
                self.signals.result.emit(
                    cv2.resize(
                        self.frame,
                        self.image_size))  # resize to speed up processing
                self.fps.update()
        except Exception as err:
            traceback.print_exc()
            self.signals.error.emit(
                (type(err), err.args, traceback.format_exc()))
Пример #25
0
# DEBUG values
SET_GUI = False  # Do or do not show GUI
DEBUG_MOTORSPEED = False  # Do or do not write motor speed commands on console
DEBUG_TIMING = False  # Do or do not write how much time each processing step takes on console
DEBUG_CIRCLEPOS = True  # Do or do not write detected circle position on console

# Initialize the motor object
motor = mw.MyMotor("/dev/ttyACM0", 115200)
motor.pwm = 50

# initialize the camera
width = 320
height = 240
camera = PiVideoStream((width, height), 30).start()
counter = FPS()
counter.start()
# allow the camera to warmup capture frames from the camera
time.sleep(0.5)

# detection variables
posX = None  # X position
posX_prev = 0  # X position in the previous iteration
posY = None  # Y position
posX_exp_filter_coeff = 0.8  # The amount of how much the current measurement changes the position. [0,1]. current = alpha * measurement + (1-alpha) * previous
radius = None  # Circle radius
radius_prev = 0  # Previous circle radius
rad_exp_filter_coeff = 0.8  # The amount of how much the current measurement changes the radius. [0,1]. current = alpha * measurement + (1-alpha) * previous
speed = 0  # Speed to send to the motor controller
angleCorr = 0  # The difference between the two tracks so the robot turns
roi = None  # Part of the image where we expect to find the ball
Пример #26
0
if run_on_pi:
    show_frame = False
    filename = None

KEY_ESC = 27

# initialize objects
cam = Camera(filename)
disp = Display('Barry', show_frame)
disp_mask = Display('Barry2', show_frame)
rec = Recorder('./barry.avi', 20, (320, 240), sparse=1)
twist = Twist(forward=0.5)
rev = Reverse(30, 60)
ada_drive = AdaDrive()
fpss = FPS(1.0)
c_filter = Filter(filter_values[time_of_day])
#rl = RLStateAction((5, 20), mimic_param, state_action_param)
rl = RLStateAction((5, ), mimic_param, state_action_param)

# initialize variables
running = True
key = -1
frame_count = 0
rotate = 0


def signal_handler(sig, frame):
    print('You pressed Ctrl+C!')
    global running
    running = False
Пример #27
0
    if window_title: cv2.imshow(window_title, img)


captions = []


def caption(write_args, duration=1):
    global captions, fps
    frames = int(duration * fps.fps())
    captions.append([frames, *write_args])
    print(write_args[0])


print(__doc__)

fps = FPS(60)
cap = Camera(mirror=True).start()
#TODO: bg1 = cv2.BackgroundSubtractorMOG2(history=3, nmixtures=5, backgroundRatio=0.0001)
w = cap.width or 640
h = cap.height or 480
ball_color = random_color()
r = 20
x = w // 2 - r // 2
y = h // 2 - r // 2
dx = dy = w // fps.limit // 2  # Ball crosses screen in about 2 seconds.

tolerance = 34  # Bounce at this amount of color change.
window_title = 'Video Pong'
countdown = old_frame_count = 0
img = new_img = diff = video = None
fps.start()
Пример #28
0
    def follow(self):

        # Find OpenCV version
        (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')

        if int(major_ver) == 3 and int(minor_ver) < 3:
            tracker = cv2.Tracker_create(self.tracker_algorithm.upper())

        else:
            # Initialize a dictionary that maps strings to their corresponding
            # OpenCV object tracker implementations
            OPENCV_OBJECT_TRACKERS = {
                "csrt": cv2.TrackerCSRT_create,
                "kcf": cv2.TrackerKCF_create,
                "boosting": cv2.TrackerBoosting_create,
                "mil": cv2.TrackerMIL_create,
                "tld": cv2.TrackerTLD_create,
                "medianflow": cv2.TrackerMedianFlow_create,
                "mosse": cv2.TrackerMOSSE_create
            }

            # Grab the appropriate object tracker using our dictionary of
            # OpenCV object tracker objects
            tracker = OPENCV_OBJECT_TRACKERS[self.tracker_algorithm]()

        cap = cv2.VideoCapture(self.video)

        if not cap.isOpened():
            print("Error opening video stream or file")
            exit()

        # Loop over frames from the video stream
        while (cap.isOpened()):
            retval, frame = cap.read()
            # Resize the frame (so we can process it faster)
            frame = cv2.resize(frame, (640, 480))
            # Grab the frame dimensions
            (H, W) = frame.shape[:2]

            # Check to see if we are currently tracking an object
            if self.roi is not None:
                # Grab the new bounding box coordinates of the object
                (success, box) = tracker.update(frame)

                # Check to see if the tracking was a success
                if success:
                    (x, y, w, h) = [int(v) for v in box]
                    cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0),
                                  2)

                # Update the FPS counter
                self.fps.update()
                self.fps.stop()

                # Initialize the set of information we'll be displaying on the frame
                info = [
                    ("Tracking Algorithm", self.tracker_algorithm),
                    ("Success", "Yes" if success else "No"),
                    ("FPS", "{:.2f}".format(self.fps.fps())),
                ]

                # Loop over the info tuples and draw them on our frame
                for (i, (k, v)) in enumerate(info):
                    text = "{}: {}".format(k, v)
                    cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 1)

            # Display the resulting frame
            cv2.imshow('Frame', frame)

            key = cv2.waitKey(1) & 0xFF

            if key == ord('r'):
                roi = None
                tracker.update(frame)

            # If the 's' key is selected, we are going to "select" a bounding box to track
            if key == ord('s'):
                # Select the bounding box of the object we want to track (make
                # Sure you press ENTER or SPACE after selecting the ROI)
                self.roi = cv2.selectROI('Frame',
                                         frame,
                                         fromCenter=False,
                                         showCrosshair=True)
                # Start OpenCV object tracker using the supplied bounding box
                # coordinates, then start the FPS throughput estimator as well
                tracker.init(frame, self.roi)
                self.fps = FPS().start()

            # If the 'q' key was pressed, break from the loop
            elif key == ord('q'):
                break

        cap.release()
        cv2.destroyAllWindows()
Пример #29
0
    #cap = cv2.VideoCapture('http://192.168.40.146:8000/stream.mjpg')
    #cap = cv2.VideoCapture(source)
    cap = cv2.VideoCapture(source)
    time.sleep(2)

    cap.set(cv2.CAP_PROP_SETTINGS, 1)
    #AWC=cap.get(cv2.CAP_PROP_AUTOFOCUS)

    #AWC=cap.get(cv2.CAP_PROP_IOS_DEVICE_WHITEBALANCE)
    #print AWC
    return cap


if __name__ == '__main__':
    from fps import FPS
    fps = FPS().start()
    cap = cv2.VideoCapture(1)
    #cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640);
    #cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480);
    time.sleep(2)
    #print cap.set(cv2.CAP_PROP_WHITE_BALANCE_BLUE_U,4000)

    #AWC=cap.set(cv2.CAP_PROP_FPS,0)
    while True:
        _, frame = cap.read()
        #print cap.get(cv2.CAP_PROP_XI_AUTO_WB)
        f = fps.approx_compute()
        cv2.putText(frame, 'FPS {:.3f}'.format(f), (10, 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1,
                    cv2.LINE_AA)
        cv2.imshow('test', frame)
Пример #30
0
ye = None

lastframe = None

################### START VIDEOSTREAM ###################
print("[INFO] sampling frames from webcam...")

# Check if we have a video or a webcam
if video_file is not None:
    stream = cv.VideoCapture(video_file)
    fps = stream.get(cv.CAP_PROP_FPS)
# otherwise, we are reading from a webcam
else:
    stream = WebcamVideoStream(src=0).start()
# measurements
streamfps = FPS().start()

out = cv.VideoWriter('outframe.avi', cv.VideoWriter_fourcc('M', 'J', 'P', 'G'),
                     15, (frame_w, frame_h))

################### START CAPTURING ###################
# loop over every frame
while (True):
    key = cv.waitKey(40) & 0xFF
    #if key== ord("c"): crop = True # Crop only to the region of interest
    if key == ord("p"):
        P = np.diag([100, 100, 100, 100])**2  # Make the filter uncertain again
    if key == ord("q") or key == 27:
        break  # quitting when ESCAPE or q is pressed
    if key == ord(" "):
        pause = not pause  # pause when spacebar is pressed, unpause when pressed again