예제 #1
0
    def __init__(self, name, src, width, height, exposure, set_fps=30):

        # Default fps to 30

        print("Creating Capture for " + name)

        self._lock = Lock()
        self._condition = Condition()
        self.fps = Rate()
        self.set_fps = set_fps
        self.duration = Duration()
        self.name = name
        self.exposure = exposure
        self.iso = 800
        self.brightness = 1
        self.src = src
        self.width = width
        self.height = height

        # initialize the variable used to indicate if the thread should
        # be stopped
        self._stop = False
        self.stopped = True

        self.grabbed = False
        self.frame = None
        self.timestamp = "timestamp_goes_here"
        self.outFrame = None
        self.count = 0
        self.outCount = self.count

        self.monochrome = False

        print("Capture created for " + self.name)
예제 #2
0
 def __init__(self, src = 0):
     self.src = src
     self.cam = cv2.VideoCapture(f'rtsp://*****:*****@192.168.0.15:554//h264Preview_0{self.src}_sub')
     self.img = None
     self.count = 0
     self.fps = Rate()
     self.timestamp = 0
     self.event = Event()
     self.running = False
예제 #3
0
 def __init__(self, process):
     self._process = process
     self.running = False
     self.stopped = True
     self._count = 0
     self.count = self._count
     self._meta = []
     self.meta = self._meta.copy()
     self.event = Event()
     self.fps = Rate()
예제 #4
0
 def __init__(self, src=0):
     self.src = src
     self.cam = cv2.VideoCapture(src)
     self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, 640)  #1280)
     self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)  #720)
     self.img = None
     self.count = 0
     self.fps = Rate()
     self.timestamp = 0
     self.event = Event()
     self.running = False
예제 #5
0
    def __init__(self, address, port, cam_num, verbose=False):

        self.src = int(cam_num)
        self.count = 0
        self.fps = Rate()
        self.timestamp = 0
        self.address = address
        self.port = port
        self.verbose = verbose

        self.camProcess = None
        self.cam_queue = None
        self.stopbit = None
        self.camlink = f'rtsp://{rtsp_user}:{password}@{rtsp_ip}//h264Preview_0{self.src}_sub'
        self.framerate = 7
예제 #6
0
    def __init__(self, src=0):
        self.src = src
        self.cam = cv2.VideoCapture(
            f'rtsp://{rtsp_user}:{password}@{rtsp_ip}//h264Preview_0{self.src}_sub'
        )

        if self.cam.isOpened() == False:
            print("\n\nVideoCapture Failed!\n\n")
        else:
            print('\n\nVideoCapture SUCCESS!\n\n')
        self.img = None
        self.count = 0
        self.fps = Rate()
        self.timestamp = 0
        self.event = Event()
        self.running = False
        self.stopped = False
예제 #7
0
    def __init__(self, mode, cams, procs):
        print("Creating Display")
        self.fps = Rate()
        self.duration = Duration()
        self.mode = mode
        self.cams = cams
        self.procs = procs

        self._frame = None
        self.frame = None
        self.count = 0
        self.isNew = False

        # initialize the variable used to indicate if the thread should
        # be stopped
        self._stop = False
        self.stopped = True

        print("Display created")
예제 #8
0
def process(address, port, cam_num, verbose=False):
    """Image processing loop

    Args:
        address (string): ip address to send stream frames
        port (string): ip port to send stream frames
        cam_num (integer): camera number to stream
        verbose (bool, optional): [description]. Defaults to False.
    """
    print(f'PROCESS = {address}, {port}, {cam_num}')
    with ImageCapture(int(cam_num)) as cam:
        cam.start()

        context = zmq.Context()
        socket = context.socket(zmq.PUB)
        socket.connect('tcp://' + address + ':' + port)
        socket.set_hwm(1)

        stream = True

        running = True

        fps = Rate()
        fps.start()

        lastframecount = 0

        frame = Frame()
        frame.srcid = cam.src

        next_time = time.perf_counter() + 1.0

        while running:
            frame.count, frame.img, frame.timestamp = cam.read()
            frame.camfps = cam.fps.fps()

            if time.perf_counter() > next_time:
                next_time += 1.0
                if verbose:
                    print(f'FPS = {frame.camfps}')

            if verbose and frame.img is not None:
                cv2.imshow(f'CAM {frame.srcid}', frame.img)
                cv2.waitKey(1)

            if frame.count is not None:
                if frame.count != lastframecount:
                    lastframecount = frame.count
                    if stream:
                        socket.send_pyobj(frame)

                    fps.update()
                    frame.streamfps = fps.fps()
예제 #9
0
    def __init__(self,stream,ipdictionary, ipselection):
        print("Creating Processor for " + stream.name)
        self._lock = Lock()
        self._condition = Condition()
        self.fps = Rate()
        self.duration = Duration()
        self.stream = stream
        self.name = self.stream.name
        self.ipdictionary = ipdictionary
        self.ipselection = ipselection
        self.ip = self.ipdictionary[ipselection]

        self._frame = None
        self.frame = None
        self.count = 0
        self.isNew = False
        
        # initialize the variable used to indicate if the thread should
        # be stopped
        self._stop = False
        self.stopped = True

        print("Processor created for " + self.name)
예제 #10
0
class ImageProcessor:
    def __init__(self, process):
        self._process = process
        self.running = False
        self.stopped = True
        self._count = 0
        self.count = self._count
        self._meta = []
        self.meta = self._meta.copy()
        self.event = Event()
        self.fps = Rate()

    def start(self, wait=True, timeout=5.0):
        # start the thread to read frames from the video stream
        print("STARTING ImageProcess...")
        t = Thread(target=self.update, args=())
        t.daemon = True
        t.start()
        start = time.time()
        if wait:
            while not self.isRunning() and ((time.time() - start) <= timeout):
                time.sleep(0.1)

        if not self.isRunning():
            print("WARNING: ImageProcess may not have started!!!")

        return self

    def stop(self, wait=True, timeout=5.0):
        self.running = False
        start = time.time()
        while not self.stopped and ((time.time() - start) <= timeout):
            time.sleep(0.1)

        if self.isRunning():
            print("WARNING: ImageProcess may not have stopped!!!")

    def isRunning(self):
        return self.running

    def process(self, source, count):
        if not self.event.isSet():
            #print(f"Triggering on {count}")
            # copy previous meta data and start a new processing cycle
            self.count = self._count
            self.meta = self._meta.copy()
            self.img = source
            self._count = count
            self.event.set()

        return (self.count, self.meta)

    def update(self):
        print("ImageProcessor STARTED!")
        self.fps.start()
        self.stopped = False
        self.running = True
        while (self.running):
            if self.event.wait(0.250):
                #print(f"IMAGE PROCESSING frame {self._count}")
                self._meta = self._process.process(source0=self.img,
                                                   overlay=False)
                #print(f"Frame {self._count} Processed")
                self.fps.update()
                self.event.clear()

        self.stopped = True
        print("ImageProcessor STOPPED")

    def overlay(self, meta, source):
        self._process.overlay(meta, source)
예제 #11
0
runTime = 0
nextTime = time.time() + 1

thismodname = os.path.splitext(os.path.basename(__file__))[0]
videofile = cv2.VideoWriter(thismodname + '.avi',
                            cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
                            framerate, (displayWidth, displayHeight))

count = 0
record = False
stream = False

running = True

camfps = Rate()

camfps.start()

processor = ImageProcessor(yolo)
processor.start()

procCount = 0
meta = []

while (running):

    if (time.time() > nextTime):
        nextTime = nextTime + 1
        runTime = runTime + 1
예제 #12
0
    def startMain(self):

        #set  queue size
        self.cam_queue = mp.Queue(maxsize=7)

        self.stopbit = mp.Event()
        self.camProcess = vs.StreamCapture(self.camlink, self.stopbit,
                                           self.cam_queue, self.framerate)
        self.t = Thread(target=self.camProcess.run)
        self.t.setDaemon = True
        self.t.start()

        context = zmq.Context()
        socket = context.socket(zmq.PUB)
        socket.connect('tcp://' + self.address + ':' + self.port)
        socket.set_hwm(1)

        fps = Rate()
        fps.start()

        lastframecount = 0

        frame = Frame()
        frame.srcid = self.src

        next_time = time.perf_counter() + 1.0

        try:
            self.fps.start()
            while True:

                if not self.cam_queue.empty():
                    # print('Got frame')
                    cmd, val = self.cam_queue.get()
                    self.timestamp = datetime.datetime.now()
                    self.fps.update()

                    # if cmd == vs.StreamCommands.RESOLUTION:
                    #     pass #print(val)

                    if cmd == vs.StreamCommands.FRAME:
                        frame.count += 1
                        frame.img = val
                        frame.timestamp = self.timestamp
                        frame.camfps = self.fps.fps()

                        if time.perf_counter() > next_time:
                            next_time += 1.0
                            if self.verbose:
                                print(
                                    f'FPS = {frame.camfps:.2f}  {frame.streamfps:.2f}'
                                )

                        if self.verbose and frame.img is not None:
                            cv2.imshow(f'CAM {frame.srcid}', frame.img)
                            cv2.waitKey(1)

                        if frame.count is not None:
                            if frame.count != lastframecount:
                                lastframecount = frame.count
                                socket.send_pyobj(frame)

                                fps.update()
                                frame.streamfps = fps.fps()
                else:
                    time.sleep(1 / self.framerate)

        except KeyboardInterrupt:
            print('Caught Keyboard interrupt')

        except Exception as e:
            print('Caught Main Exception')
            print(e)

        self.stopCamStream()
        cv2.destroyAllWindows()
예제 #13
0
class mainStreamClass:
    def __init__(self, address, port, cam_num, verbose=False):

        self.src = int(cam_num)
        self.count = 0
        self.fps = Rate()
        self.timestamp = 0
        self.address = address
        self.port = port
        self.verbose = verbose

        self.camProcess = None
        self.cam_queue = None
        self.stopbit = None
        self.camlink = f'rtsp://{rtsp_user}:{password}@{rtsp_ip}//h264Preview_0{self.src}_sub'
        self.framerate = 7

    def startMain(self):

        #set  queue size
        self.cam_queue = mp.Queue(maxsize=7)

        self.stopbit = mp.Event()
        self.camProcess = vs.StreamCapture(self.camlink, self.stopbit,
                                           self.cam_queue, self.framerate)
        self.t = Thread(target=self.camProcess.run)
        self.t.setDaemon = True
        self.t.start()

        context = zmq.Context()
        socket = context.socket(zmq.PUB)
        socket.connect('tcp://' + self.address + ':' + self.port)
        socket.set_hwm(1)

        fps = Rate()
        fps.start()

        lastframecount = 0

        frame = Frame()
        frame.srcid = self.src

        next_time = time.perf_counter() + 1.0

        try:
            self.fps.start()
            while True:

                if not self.cam_queue.empty():
                    # print('Got frame')
                    cmd, val = self.cam_queue.get()
                    self.timestamp = datetime.datetime.now()
                    self.fps.update()

                    # if cmd == vs.StreamCommands.RESOLUTION:
                    #     pass #print(val)

                    if cmd == vs.StreamCommands.FRAME:
                        frame.count += 1
                        frame.img = val
                        frame.timestamp = self.timestamp
                        frame.camfps = self.fps.fps()

                        if time.perf_counter() > next_time:
                            next_time += 1.0
                            if self.verbose:
                                print(
                                    f'FPS = {frame.camfps:.2f}  {frame.streamfps:.2f}'
                                )

                        if self.verbose and frame.img is not None:
                            cv2.imshow(f'CAM {frame.srcid}', frame.img)
                            cv2.waitKey(1)

                        if frame.count is not None:
                            if frame.count != lastframecount:
                                lastframecount = frame.count
                                socket.send_pyobj(frame)

                                fps.update()
                                frame.streamfps = fps.fps()
                else:
                    time.sleep(1 / self.framerate)

        except KeyboardInterrupt:
            print('Caught Keyboard interrupt')

        except Exception as e:
            print('Caught Main Exception')
            print(e)

        self.stopCamStream()
        cv2.destroyAllWindows()

    def stopCamStream(self):
        print('in stopCamStream')

        if self.stopbit is not None:
            self.stopbit.set()
            while not self.cam_queue.empty():
                try:
                    _ = self.cam_queue.get()
                except:
                    break
                self.cam_queue.close()

            self.camProcess.join()

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        print(f'\n\n\n\nReleasing Camera {self.src}')
        try:
            self.stopCamStream()
        except:
            pass
예제 #14
0
class Processor:
    def __init__(self,stream,ipdictionary, ipselection):
        print("Creating Processor for " + stream.name)
        self._lock = Lock()
        self._condition = Condition()
        self.fps = Rate()
        self.duration = Duration()
        self.stream = stream
        self.name = self.stream.name
        self.ipdictionary = ipdictionary
        self.ipselection = ipselection
        self.ip = self.ipdictionary[ipselection]

        self._frame = None
        self.frame = None
        self.count = 0
        self.isNew = False
        
        # initialize the variable used to indicate if the thread should
        # be stopped
        self._stop = False
        self.stopped = True

        print("Processor created for " + self.name)
        
    def start(self):
        print("STARTING Processor for " + self.name)
        t = Thread(target=self.update, args=())
        t.daemon = True
        t.start()
        return self

    def update(self):
        print("Processor for " + self.name + " RUNNING")
        # keep looping infinitely until the thread is stopped
        self.stopped = False
        self.fps.start()

        lastIpSelection = self.ipselection
        
        while True:
            # if the thread indicator variable is set, stop the thread
            if (self._stop == True):
                self._stop = False
                self.stopped = True
                return

            # otherwise, read the next frame from the stream
            # grab the frame from the threaded video stream
            (self._frame, count, timestamp, isNew) = self.stream.read()
            self.duration.start()
            self.fps.update()

            if (lastIpSelection != self.ipselection):
                self.ip = self.ipdictionary[self.ipselection]
                lastIpSelection = self.ipselection

            if (isNew == True):
                # TODO: Insert processing code then forward display changes
                self.ip.process(self._frame)
                
                # Now that image processing is complete, place results
                # into an outgoing buffer to be grabbed at the convenience
                # of the reader
                self._condition.acquire()
                self._lock.acquire()
                self.count = count
                self.isNew = isNew
                self.frame = self._frame.copy()
                self.timestamp = timestamp
                self._lock.release()
                self._condition.notifyAll()
                self._condition.release()

            self.duration.update()
                
        print("Processor for " + self.name + " STOPPING")

    def updateSelection(self, ipselection):
        self.ipselection = ipselection

    def read(self):
        # return the frame most recently processed if the frame
        # is not being updated at this exact moment
        self._condition.acquire()
        self._condition.wait()
        self._condition.release()
        if (self._lock.acquire() == True):
            self.outFrame = self.frame
            self.outCount = self.count
            self.outTimestamp = self.timestamp
            self._lock.release()
            return (self.outFrame, self.outCount, self.outTimestamp, True)
        else:
            return (self.outFrame, self.outCount, "No Time Stamp", False)
          
    def stop(self):
        # indicate that the thread should be stopped
        self._stop = True
        self._condition.acquire()
        self._condition.notifyAll()
        self._condition.release()

    def isStopped(self):
        return self.stopped
		
예제 #15
0
class ImageCapture:
    def __init__(self, src = 0):
        self.src = src
        self.cam = cv2.VideoCapture(f'rtsp://*****:*****@192.168.0.15:554//h264Preview_0{self.src}_sub')
        self.img = None
        self.count = 0
        self.fps = Rate()
        self.timestamp = 0
        self.event = Event()
        self.running = False
        
    def start(self, wait = True, timeout = 5.0):        
        # start the thread to read frames from the video stream
        print("STARTING ImageCapture...")
        t = Thread(target=self.update, args=())
        t.daemon = True
        t.start()
        start = time.time()
        if wait:
            while not self.isRunning() and ((time.time() - start) <= timeout):
                time.sleep(0.1)

        if not self.isRunning():
            print("WARNING: ImageCapture may not have started!!!")

        return self

    def stop(self, wait = True, timeout = 5.0):
        self.running = False
        start = time.time()
        while not self.stopped and ((time.time() - start) <= timeout):
            time.sleep(0.1)

        if self.isRunning():
            print("WARNING: ImageProcess may not have stopped!!!")
        

    def isRunning(self):
        return self.running
    
    def read(self):
        if self.event.wait(0.250):
            return (self.count, self.img, self.timestamp)
        else:
            return (None, None, None)

    def update(self):
        print("ImageCapture STARTED!")
        self.fps.start()
        self.stopped = False
        self.running = True
        while (self.running):
            ret,self.img = self.cam.read()
            self.timestamp = datetime.datetime.now()
            if (ret):
                self.count += 1
                self.fps.update()
                self.event.set()
                

        self.stopped = True
        print("ImageCapture STOPPED")    
예제 #16
0

cam = ImageCapture(int(args.n))
time.sleep(5)
cam.start()

import zmq
context = zmq.Context()
socket = context.socket(zmq.PUB)
socket.connect('tcp://'+args.address+':'+args.port)

stream = True

running = True

fps = Rate()
fps.start()

lastframecount = 0

class Frame:
    def __init__(self):
        self.timestamp = 0
        self.count = 0
        self.img = 0
        self.camfps = 0
        self.streamfps = 0
        self.srcid = 0

frame = Frame()
frame.srcid = cam.src
예제 #17
0
class Display:
    def __init__(self, mode, cams, procs):
        print("Creating Display")
        self.fps = Rate()
        self.duration = Duration()
        self.mode = mode
        self.cams = cams
        self.procs = procs

        self._frame = None
        self.frame = None
        self.count = 0
        self.isNew = False

        # initialize the variable used to indicate if the thread should
        # be stopped
        self._stop = False
        self.stopped = True

        print("Display created")

    def start(self):
        print("STARTING Display")
        t = Thread(target=self.update, args=())
        t.daemon = True
        t.start()
        return self

    def setmode(self, mode):
        self.mode = mode

    def update(self):
        print("Display RUNNING")
        # keep looping infinitely until the thread is stopped
        self.stopped = False
        self.fps.start()

        while True:
            # if the thread indicator variable is set, stop the thread
            if (self._stop == True):
                self._stop = False
                self.stopped = True
                return

            try:
                camModeValue = self.mode
                cameraSelection = self.cams[camModeValue]
                processorSelection = self.procs[camModeValue]
            except:
                camModeValue = 'Default'
                cameraSelection = self.cams[list(self.cams.keys())[0]]
                processorSelection = self.procs[list(self.procs.keys())[0]]

            # otherwise, read the next frame from the stream
            # grab the frame from the threaded video stream

            (img, count, timestamp, isNew) = processorSelection.read()
            self.duration.start()
            self.fps.update()

            if (count != self.count):
                self.count = count
                camFps = cameraSelection.fps.fps()
                procFps = processorSelection.fps.fps()
                procDuration = processorSelection.duration.duration()

                cv2.putText(img, timestamp, (0, 20), cv2.FONT_HERSHEY_PLAIN, 1,
                            (0, 255, 0), 1)

                cv2.putText(
                    img, "CamFPS: {:.1f}".format(camFps) + " Exp: " +
                    str(cameraSelection.exposure) + " Frame: " +
                    str(self.count), (0, 40), cv2.FONT_HERSHEY_PLAIN, 1,
                    (0, 255, 0), 1)
                if (procFps != 0.0):
                    cv2.putText(
                        img, "ProcFPS: {:.1f}".format(procFps) +
                        " : {:.0f}".format(100 * procDuration * procFps) + "%",
                        (0, 60), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1)

                cv2.putText(img, "Cam: " + camModeValue, (0, 80),
                            cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1)
                cv2.putText(img, "Proc: " + processorSelection.ipselection,
                            (0, 100), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0),
                            1)

                self.frame = img.copy()

            self.duration.update()

        print("Display for " + self.name + " STOPPING")

    def stop(self):
        # indicate that the thread should be stopped
        self._stop = True

    def isStopped(self):
        return self.stopped
예제 #18
0
class Capture:
    def __init__(self, name, src, width, height, exposure, set_fps=30):

        # Default fps to 30

        print("Creating Capture for " + name)

        self._lock = Lock()
        self._condition = Condition()
        self.fps = Rate()
        self.set_fps = set_fps
        self.duration = Duration()
        self.name = name
        self.exposure = exposure
        self.iso = 800
        self.brightness = 1
        self.src = src
        self.width = width
        self.height = height

        # initialize the variable used to indicate if the thread should
        # be stopped
        self._stop = False
        self.stopped = True

        self.grabbed = False
        self.frame = None
        self.timestamp = "timestamp_goes_here"
        self.outFrame = None
        self.count = 0
        self.outCount = self.count

        self.monochrome = False

        print("Capture created for " + self.name)

    def start(self):

        # start the thread to read frames from the video stream
        print("STARTING Capture for " + self.name)
        t = Thread(target=self.update, args=())
        t.daemon = True
        t.start()
        return self

    def update(self):
        print("Capture for " + self.name + " RUNNING")

        # keep looping infinitely until the thread is stopped
        self.stopped = False
        self.fps.start()

        lastExposure = self.exposure

        # if platform.system() == "Linux":
        #     cmd = ['v4l2-ctl', '--device='+str(self.src),'--list-formats-ext']
        #     returned_output = subprocess.check_output(cmd)
        #     print(returned_output.decode("utf-8"))

        #     cmd = ['v4l2-ctl', '--list-ctrls']
        #     returned_output = subprocess.check_output(cmd)
        #     print(returned_output.decode("utf-8"))

        self.camera = cv2.VideoCapture(self.src, apiPreference=cv2.CAP_ANY)

        # OpenCV VideoCapture properties that can be set()
        # CV_CAP_PROP_POS_MSEC Current position of the video file in milliseconds.
        # CV_CAP_PROP_POS_FRAMES 0-based index of the frame to be decoded/captured next.
        # CV_CAP_PROP_POS_AVI_RATIO Relative position of the video file: 0 - start of the film, 1 - end of the film.
        # CV_CAP_PROP_FRAME_WIDTH Width of the frames in the video stream.
        # CV_CAP_PROP_FRAME_HEIGHT Height of the frames in the video stream.
        # CV_CAP_PROP_FPS Frame rate.
        # CV_CAP_PROP_FOURCC 4-character code of codec.
        # CV_CAP_PROP_FRAME_COUNT Number of frames in the video file.
        # CV_CAP_PROP_FORMAT Format of the Mat objects returned by retrieve() .
        # CV_CAP_PROP_MODE Backend-specific value indicating the current capture mode.
        # CV_CAP_PROP_BRIGHTNESS Brightness of the image (only for cameras).
        # CV_CAP_PROP_CONTRAST Contrast of the image (only for cameras).
        # CV_CAP_PROP_SATURATION Saturation of the image (only for cameras).
        # CV_CAP_PROP_HUE Hue of the image (only for cameras).
        # CV_CAP_PROP_GAIN Gain of the image (only for cameras).
        # CV_CAP_PROP_EXPOSURE Exposure (only for cameras).
        # CV_CAP_PROP_CONVERT_RGB Boolean flags indicating whether images should be converted to RGB.
        # CV_CAP_PROP_WHITE_BALANCE_U The U value of the whitebalance setting (note: only supported by DC1394 v 2.x backend currently)
        # CV_CAP_PROP_WHITE_BALANCE_V The V value of the whitebalance setting (note: only supported by DC1394 v 2.x backend currently)
        # CV_CAP_PROP_RECTIFICATION Rectification flag for stereo cameras (note: only supported by DC1394 v 2.x backend currently)
        # CV_CAP_PROP_ISO_SPEED The ISO speed of the camera (note: only supported by DC1394 v 2.x backend currently)
        # CV_CAP_PROP_BUFFERSIZE Amount of frames stored in internal buffer memory (note: only supported by DC1394 v 2.x backend currently)

        # print("SETTINGS: ",self.camera.get(cv2.CAP_PROP_SETTINGS))
        # print("FORMAT: ",self.camera.get(cv2.CAP_PROP_FORMAT))
        # print("MODE:", self.camera.get(cv2.CAP_PROP_MODE))
        # print("CHANNEL:", self.camera.get(cv2.CAP_PROP_CHANNEL))
        # print("AUTOFOCUS:", self.camera.get(cv2.CAP_PROP_AUTOFOCUS))
        # print("AUTOEXP:", self.camera.get(cv2.CAP_PROP_AUTO_EXPOSURE))
        # self.exposure = self.camera.get(cv2.CAP_PROP_EXPOSURE)
        # print("EXPOSURE:", self.exposure)
        #print("PIXFMT:",self.camera.get(cv2.CAP_PROP_CODEC_PIXEL_FORMAT))

        # if platform.system() == "Linux":
        #     cmd = ['v4l2-ctl', '-V']
        #     returned_output = subprocess.check_output(cmd)
        #     print(returned_output.decode("utf-8"))

        # print("----------------------")
        # self.camera.set(cv2.CAP_PROP_CHANNEL,1)
        # self.camera.set(cv2.CAP_PROP_AUTOFOCUS, 1)
        # self.camera.set(cv2.CAP_PROP_AUTO_EXPOSURE, 1)
        # print("CHANNEL:", self.camera.get(cv2.CAP_PROP_CHANNEL))
        # print("AUTOFOCUS:", self.camera.get(cv2.CAP_PROP_AUTOFOCUS))
        # print("AUTOEXP:", self.camera.get(cv2.CAP_PROP_AUTO_EXPOSURE))

        # self.camera.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)
        # self.camera.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)
        # print(self.camera.get(cv2.CAP_PROP_FRAME_WIDTH), self.camera.get(cv2.CAP_PROP_FRAME_HEIGHT))

        # cmd = ['v4l2-ctl', '--set-fmt-video=pixelformat=MJPG']
        # returned_output = subprocess.check_output(cmd)
        # print(returned_output.decode("utf-8"))
        # if platform.system() == "Linux":
        #     cmd = ['v4l2-ctl', '-V']
        #     returned_output = subprocess.check_output(cmd)
        #     print(returned_output.decode("utf-8"))
        #     print(self.camera.get(cv2.CAP_PROP_FRAME_WIDTH), self.camera.get(cv2.CAP_PROP_FRAME_HEIGHT))

        # self.camera.setPixelFormat(VideoMode.PixelFormat.kYUYV)

        # self.camera.set(cv2.CAP_PROP_FPS, self.set_fps)

        # self.camera.setBrightness(1)
        #self.camera.set(cv2.CAP_PROP_BRIGHTNESS, self.brightness)

        # p = self.camera.enumerateVideoModes()
        # for pi in p:
        #     print(pi.fps, pi.height, pi.width, pi.pixelFormat)

        # self.setMonochrome(self.monochrome)

        count = 0
        while True:
            # if the thread indicator variable is set, stop the thread
            if (self._stop == True):
                self._stop = False
                self.stopped = True
                return

            # if (lastExposure != self.exposure):
            #     self.setExposure()
            #     lastExposure = self.exposure

            # Tell the CvSink to grab a frame from the camera and put it
            # in the source image.  If there is an error notify the output.
            #time, img = cvSink.grabFrame(img)
            ret_val, img = self.camera.read()
            timestamp = datetime.datetime.now(
            )  #Close but not exact, need to work out better sync

            if ret_val == 0:
                self._grabbed = False
                # Send the output the error.
                #self.outstream.notifyError(cvSink.getError())
                # skip the rest of the current iteration
                continue

            self._grabbed = True

            self.duration.start()
            self.fps.update()

            # if something was grabbed and retreived then lock
            # the outboundw buffer for the update
            # This limits the blocking to just the copy operations
            # later we may consider a queue or double buffer to
            # minimize blocking
            if (self._grabbed == True):

                timestamp_string = datetime.datetime.fromtimestamp(
                    timestamp.timestamp(), datetime.timezone.utc).isoformat()

                self._condition.acquire()
                self._lock.acquire()
                self.count = self.count + 1
                self.grabbed = self._grabbed
                self.frame = img.copy()
                self.timestamp = timestamp_string
                self._lock.release()
                self._condition.notifyAll()
                self._condition.release()

            self.duration.update()

        print("Capture for " + self.name + " STOPPING")

    def read(self):
        # return the frame most recently read if the frame
        # is not being updated at this exact moment
        self._condition.acquire()
        self._condition.wait()
        self._condition.release()
        if (self._lock.acquire() == True):
            self.outFrame = self.frame
            self.outCount = self.count
            self.outTimestamp = self.timestamp
            self._lock.release()
            return (self.outFrame, self.outCount, self.outTimestamp, True)
        else:
            return (self.outFrame, self.outCount, "NoTimeStamp", False)

    def processUserCommand(self, key):
        # if key == ord('x'):
        #     return True
        # elif key == ord('d'):
        #     self.contrast+=1
        #     self.stream.set(cv2.CAP_PROP_CONTRAST,self.contrast)
        #     print("CONTRAST = " + str(self.contrast))
        # elif key == ord('a'):
        #     self.contrast-=1
        #     self.stream.set(cv2.CAP_PROP_CONTRAST,self.contrast)
        #     print("CONTRAST = " + str(self.contrast))
        # elif key == ord('e'):
        #     self.saturation+=1
        #     self.stream.set(cv2.CAP_PROP_SATURATION,self.saturation)
        #     print("SATURATION = " + str(self.saturation))
        # elif key == ord('q'):
        #     self.saturation-=1
        #     self.stream.set(cv2.CAP_PROP_SATURATION,self.saturation)
        #     print("SATURATION = " + str(self.saturation))
        # el
        # if key == ord('z'):
        #     self.exposure = self.exposure - 1
        #     self.setExposure()
        #     print("EXPOSURE = " + str(self.exposure))
        # elif key == ord('c'):
        #     self.exposure = self.exposure + 1
        #     self.setExposure()
        #     print("EXPOSURE = " + str(self.exposure))
        # elif key == ord('w'):
        #     self.brightness+=1
        #     self.camera.set(cv2.CAP_PROP_BRIGHTNESS,self.brightness)
        #     print("BRIGHT = " + str(self.brightness))
        # elif key == ord('s'):
        #     self.brightness-=1
        #     self.camera.set(cv2.CAP_PROP_BRIGHTNESS,self.brightness)
        #     print("BRIGHT = " + str(self.brightness))
        # elif key == ord('p'):
        #     self.iso = self.iso + 100
        #     self.camera.set(cv2.CAP_PROP_ISO_SPEED, self.iso)
        #     print("ISO = " + str(self.iso))
        # elif key == ord('i'):
        #     self.iso = self.iso - 100
        #     self.camera.set(cv2.CAP_PROP_ISO_SPEED, self.iso)
        #     print("ISO = " + str(self.iso))
        # elif key == ord('m'):
        #     self.setMonochrome(not self.monochrome)
        #     print("MONOCHROME = " + str(self.monochrome))

        return False

    def setMonochrome(self, monochrome):
        self.monochrome = monochrome
        # self.camera.set(cv2.CAP_PROP_MONOCHROME, 1 if self.monochrome else 0)

    def updateExposure(self, exposure):
        self.exposure = exposure

    def setExposure(self):
        # self.camera.set(cv2.CAP_PROP_EXPOSURE, self.exposure)
        pass

    def stop(self):
        # indicate that the thread should be stopped
        self._stop = True
        self._condition.acquire()
        self._condition.notifyAll()
        self._condition.release()

    def isStopped(self):
        return self.stopped
예제 #19
0
class ImageCapture:
    def __init__(self, src=0):
        self.src = src
        self.cam = cv2.VideoCapture(
            f'rtsp://{rtsp_user}:{password}@{rtsp_ip}//h264Preview_0{self.src}_sub'
        )

        if self.cam.isOpened() == False:
            print("\n\nVideoCapture Failed!\n\n")
        else:
            print('\n\nVideoCapture SUCCESS!\n\n')
        self.img = None
        self.count = 0
        self.fps = Rate()
        self.timestamp = 0
        self.event = Event()
        self.running = False
        self.stopped = False

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        print(f'\n\n\n\nReleasing Camera {self.src}')
        try:
            self.cam.release()
        except:
            pass

    def start(self, wait=True, timeout=5.0):
        # start the thread to read frames from the video stream
        print("STARTING ImageCapture...")
        t = Thread(target=self.update, args=())
        t.daemon = True
        t.start()
        start = time.time()
        if wait:
            while not self.isRunning() and ((time.time() - start) <= timeout):
                time.sleep(0.1)

        if not self.isRunning():
            print("WARNING: ImageCapture may not have started!!!")

        return self

    def stop(self, timeout=5.0):
        self.running = False
        start = time.time()
        while not self.stopped and ((time.time() - start) <= timeout):
            time.sleep(0.1)

        if self.isRunning():
            print("WARNING: ImageProcess may not have stopped!!!")

    def isRunning(self):
        return self.running

    def read(self):
        if self.event.wait(0.250):
            return (self.count, self.img, self.timestamp)
        else:
            return (None, None, None)

    def update(self):
        print("ImageCapture STARTED!")
        self.fps.start()
        self.stopped = False
        self.running = True
        while self.running:
            ret, self.img = self.cam.read()
            self.timestamp = datetime.datetime.now()
            if ret:
                self.count += 1
                self.fps.update()
                self.event.set()

        self.stopped = True
        print("ImageCapture STOPPED")