def __init__(self, name, src, width, height, exposure): print("Creating Camera " + name) self.name = name self.src = src self.stream = cv2.VideoCapture(src) self.stream.set(cv2.CAP_PROP_FRAME_WIDTH,width) self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT,height) self.exposure = None self.setExposure(exposure) self.fps = FrameRate() self.running = False # Dict maps user (client) to the Cubbyhole instance used to pass it frames self.userDict = {} self.userDictLock = Lock() # Protects shared access to userDict self.rate = self.stream.get(cv2.CAP_PROP_FPS) print("RATE = " + str(self.rate)) self.brightness = self.stream.get(cv2.CAP_PROP_BRIGHTNESS) print("BRIGHT = " + str(self.brightness)) self.contrast = self.stream.get(cv2.CAP_PROP_CONTRAST) print("CONTRAST = " + str(self.contrast)) self.saturation = self.stream.get(cv2.CAP_PROP_SATURATION) print("SATURATION = " + str(self.saturation)) print("EXPOSURE = " + str(self.exposure))
class Processor: def __init__(self, name, camera, pipeline): print("Creating Processor: camera=" + camera.name + " pipeline=" + pipeline.name) self.name = name self.camera = camera # Lock to protect access to pipeline member var self.lock = Lock() self.pipeline = pipeline self.cubby = Cubbyhole() self.fps = FrameRate() self.running = False def start(self): print("Processor " + self.name + " STARTING") t = Thread(target=self.run, args=()) t.daemon = True t.start() return self def run(self): print("Processor " + self.name + " RUNNING") self.running = True while True: frame = self.camera.read(self) self.fps.start() self.lock.acquire() pipeline = self.pipeline self.lock.release() pipeline.process(frame) self.cubby.put(frame) self.fps.stop() def setPipeline(self, pipeline): if pipeline == self.pipeline: return self.lock.acquire() self.pipeline = pipeline self.lock.release() print( "Processor " + self.name + " pipeline now=" + pipeline.name) def read(self): return self.cubby.get() def isRunning(self): return self.running
def __init__(self,name,src,width,height,exposure,set_fps=30): # Default fps to 30 print("Creating BucketCapture for " + name) self._lock = Lock() self._condition = Condition() self.fps = FrameRate() self.set_fps = set_fps self.duration = FrameDuration() self.name = name self.exposure = exposure self.src = src self.width = width self.height = height # initialize the variable used to indicate if the thread should # be stopped self._stop = False self.stopped = True self.grabbed = False self.frame = None self.outFrame = None self.count = 0 self.outCount = self.count print("BucketCapture created for " + self.name)
def __init__(self, name,src,width,height,exposure): print("Creating BucketCapture for " + name) self._lock = Lock() self._condition = Condition() self.fps = FrameRate() self.duration = FrameDuration() self.name = name self.src = src # initialize the video camera stream and read the first frame # from the stream self.stream = cv2.VideoCapture(src) self.stream.set(cv2.CAP_PROP_FRAME_WIDTH,width) self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT,height) self.exposure = exposure self.setExposure() self.rate = self.stream.get(cv2.CAP_PROP_FPS) print("RATE = " + str(self.rate)) self.brightness = self.stream.get(cv2.CAP_PROP_BRIGHTNESS) print("BRIGHT = " + str(self.brightness)) self.contrast = self.stream.get(cv2.CAP_PROP_CONTRAST) print("CONTRAST = " + str(self.contrast)) self.saturation = self.stream.get(cv2.CAP_PROP_SATURATION) print("SATURATION = " + str(self.saturation)) print("EXPOSURE = " + str(self.exposure)) ## self.iso = self.stream.get(cv2.CAP_PROP_ISO_SPEED) ## print("ISO = " + str(self.iso)) (self._grabbed, self._frame) = self.stream.read() if (self._grabbed == True): self.grabbed = self._grabbed self.frame = self._frame self.outFrame = self.frame self.count = 1 self.outCount = self.count else: self.grabbed = False self.frame = None self.outFrame = None self.count = 0 self.outCount = self.count # initialize the variable used to indicate if the thread should # be stopped self._stop = False self.stopped = True print("BucketCapture created for " + self.name)
def __init__(self, name, camera, pipeline): print("Creating Processor: camera=" + camera.name + " pipeline=" + pipeline.name) self.name = name self.camera = camera # Lock to protect access to pipeline member var self.lock = Lock() self.pipeline = pipeline self.cubby = Cubbyhole() self.fps = FrameRate() self.running = False
class ImgSink: def __init__(self): self.fps = FrameRate() self.bitrate = BitRate() self.cubby = Cubbyhole() # Gets frames from selected processor, # displays vid in local window, # compresses frame to jpg buffer & hands buffer to web server. # Called from main thread - note, imshow can only be called from main thread or big crash! def show(self): theProcessor = processors[currentCam.value] img = theProcessor.read() self.fps.start() # Write some useful info on the frame camFps, camUtil = theProcessor.camera.fps.get() procFps, procUtil = theProcessor.fps.get() srvFps, srvUtil = self.fps.get() srvBitrate = self.bitrate.get() cv2.putText(img, "{:.1f} : {:.0f}%".format(camFps, 100*camUtil), (0, 20), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1) cv2.putText(img, "{:.1f} : {:.0f}%".format(procFps, 100*procUtil), (0, 40), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1) cv2.putText(img, "{:.1f} : {:.0f}% : {:.2f}".format(srvFps, 100*srvUtil, srvBitrate), (0, 60), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1) cv2.putText(img, currentCam.value, (0, 80), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1) cv2.putText(img, theProcessor.pipeline.name, (0, 100), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1) # Compress image to jpeg and stash in cubbyhole for webserver to grab _, jpg = cv2.imencode(".jpg", img, (cv2.IMWRITE_JPEG_QUALITY, 80)) buf = bytearray(jpg) self.cubby.put(buf) self.bitrate.update(len(buf)) self.fps.stop() # Show the final image in local window and watch for keypresses. cv2.imshow( "Image", img) key = cv2.waitKey(1) return key # Web Server calls this to get jpeg to send def get(self): return self.cubby.get()
def __init__(self, mode, cams, procs): print("Creating BucketDisplay") self.fps = FrameRate() self.duration = FrameDuration() self.mode = mode self.cams = cams self.procs = procs self._frame = None self.frame = None self.count = 0 self.isNew = False # initialize the variable used to indicate if the thread should # be stopped self._stop = False self.stopped = True print("BucketDisplay created")
def __init__(self,stream,ipdictionary, ipselection): print("Creating BucketProcessor for " + stream.name) self._lock = Lock() self._condition = Condition() self.fps = FrameRate() self.duration = FrameDuration() self.stream = stream self.name = self.stream.name self.ipdictionary = ipdictionary self.ipselection = ipselection self.ip = self.ipdictionary[ipselection] self._frame = None self.frame = None self.count = 0 self.isNew = False # initialize the variable used to indicate if the thread should # be stopped self._stop = False self.stopped = True print("BucketProcessor created for " + self.name)
def __init__(self,stream,ip): print("Creating ImageProcessor for " + stream.name) self._lock = Lock() self._condition = Condition() self.fps = FrameRate() self.duration = FrameDuration() self.stream = stream self.ip = ip (self._frame, self.count, self.isNew) = self.stream.read() if (self.isNew == True): self.count = 1 self.frame = self._frame else: self.frame = None self.count = 0 # initialize the variable used to indicate if the thread should # be stopped self._stop = False self.stopped = True print("ImageProcessor created for " + self.stream.name)
class BucketCapture: def __init__(self, name,src,width,height,exposure): print("Creating BucketCapture for " + name) self._lock = Lock() self._condition = Condition() self.fps = FrameRate() self.duration = FrameDuration() self.name = name self.src = src # initialize the video camera stream and read the first frame # from the stream self.stream = cv2.VideoCapture(src) self.stream.set(cv2.CAP_PROP_FRAME_WIDTH,width) self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT,height) self.exposure = exposure self.setExposure() self.rate = self.stream.get(cv2.CAP_PROP_FPS) print("RATE = " + str(self.rate)) self.brightness = self.stream.get(cv2.CAP_PROP_BRIGHTNESS) print("BRIGHT = " + str(self.brightness)) self.contrast = self.stream.get(cv2.CAP_PROP_CONTRAST) print("CONTRAST = " + str(self.contrast)) self.saturation = self.stream.get(cv2.CAP_PROP_SATURATION) print("SATURATION = " + str(self.saturation)) print("EXPOSURE = " + str(self.exposure)) ## self.iso = self.stream.get(cv2.CAP_PROP_ISO_SPEED) ## print("ISO = " + str(self.iso)) (self._grabbed, self._frame) = self.stream.read() if (self._grabbed == True): self.grabbed = self._grabbed self.frame = self._frame self.outFrame = self.frame self.count = 1 self.outCount = self.count else: self.grabbed = False self.frame = None self.outFrame = None self.count = 0 self.outCount = self.count # initialize the variable used to indicate if the thread should # be stopped self._stop = False self.stopped = True print("BucketCapture created for " + self.name) def start(self): # start the thread to read frames from the video stream print("STARTING BucketCapture for " + self.name) t = Thread(target=self.update, args=()) t.daemon = True t.start() return self def update(self): print("BucketCapture for " + self.name + " RUNNING") # keep looping infinitely until the thread is stopped self.stopped = False self.fps.start() lastExposure = self.exposure while True: # if the thread indicator variable is set, stop the thread if (self._stop == True): self.stop = False self.stopped = True return if (lastExposure != self.exposure): self.setExposure() lastExposure = self.exposure # otherwise, read the next frame from the stream (self._grabbed, self._frame) = self.stream.read() self.duration.start() self.fps.update() # if something was grabbed and retreived then lock # the outboundw buffer for the update # This limits the blocking to just the copy operations # later we may consider a queue or double buffer to # minimize blocking if (self._grabbed == True): self._condition.acquire() self._lock.acquire() self.count = self.count + 1 self.grabbed = self._grabbed self.frame = self._frame self._lock.release() self._condition.notifyAll() self._condition.release() self.duration.update() print("BucketCapture for " + self.name + " STOPPING") def read(self): # return the frame most recently read if the frame # is not being updated at this exact moment self._condition.acquire() self._condition.wait() self._condition.release() if (self._lock.acquire() == True): self.outFrame = self.frame self.outCount = self.count self._lock.release() return (self.outFrame, self.outCount, True) else: return (self.outFrame, self.outCount, False) def processUserCommand(self, key): if key == ord('x'): return True elif key == ord('w'): self.brightness+=1 self.stream.set(cv2.CAP_PROP_BRIGHTNESS,self.brightness) print("BRIGHT = " + str(self.brightness)) elif key == ord('s'): self.brightness-=1 self.stream.set(cv2.CAP_PROP_BRIGHTNESS,self.brightness) print("BRIGHT = " + str(self.brightness)) elif key == ord('d'): self.contrast+=1 self.stream.set(cv2.CAP_PROP_CONTRAST,self.contrast) print("CONTRAST = " + str(self.contrast)) elif key == ord('a'): self.contrast-=1 self.stream.set(cv2.CAP_PROP_CONTRAST,self.contrast) print("CONTRAST = " + str(self.contrast)) elif key == ord('e'): self.saturation+=1 self.stream.set(cv2.CAP_PROP_SATURATION,self.saturation) print("SATURATION = " + str(self.saturation)) elif key == ord('q'): self.saturation-=1 self.stream.set(cv2.CAP_PROP_SATURATION,self.saturation) print("SATURATION = " + str(self.saturation)) elif key == ord('z'): self.exposure+=1 setExposure(self.exposure) print("EXPOSURE = " + str(self.exposure)) elif key == ord('c'): self.exposure-=1 setExposure(self.exposure) print("EXPOSURE = " + str(self.exposure)) ## elif key == ord('p'): ## self.iso +=1 ## self.stream.set(cv2.CAP_PROP_ISO_SPEED, self.iso) ## elif key == ord('i'): ## self.iso -=1 ## self.stream.set(cv2.CAP_PROP_ISO_SPEED, self.iso) return False def updateExposure(self, exposure): self.exposure = exposure def setExposure(self): # cv2 exposure control DOES NOT WORK ON PI self.stream.set(cv2.CAP_PROP_EXPOSURE,self.exposure) # cv2 exposure control DOES NOT WORK ON PI self.stream.set(cv2.CAP_PROP_EXPOSURE,self.exposure) if (platform.system() == 'Windows'): self.stream.set(cv2.CAP_PROP_EXPOSURE,self.exposure) else: cmd = ['v4l2-ctl --device=' + str(self.src) + ' -c exposure_auto=1 -c exposure_absolute=' + str(self.exposure)] call(cmd,shell=True) def stop(self): # indicate that the thread should be stopped self._stop = True self._condition.acquire() self._condition.notifyAll() self._condition.release() def isStopped(self): return self.stopped
class Camera: def __init__(self, name, src, width, height, exposure): print("Creating Camera " + name) self.name = name self.src = src self.stream = cv2.VideoCapture(src) self.stream.set(cv2.CAP_PROP_FRAME_WIDTH,width) self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT,height) self.exposure = None self.setExposure(exposure) self.fps = FrameRate() self.running = False # Dict maps user (client) to the Cubbyhole instance used to pass it frames self.userDict = {} self.userDictLock = Lock() # Protects shared access to userDict self.rate = self.stream.get(cv2.CAP_PROP_FPS) print("RATE = " + str(self.rate)) self.brightness = self.stream.get(cv2.CAP_PROP_BRIGHTNESS) print("BRIGHT = " + str(self.brightness)) self.contrast = self.stream.get(cv2.CAP_PROP_CONTRAST) print("CONTRAST = " + str(self.contrast)) self.saturation = self.stream.get(cv2.CAP_PROP_SATURATION) print("SATURATION = " + str(self.saturation)) print("EXPOSURE = " + str(self.exposure)) def start(self): print("Camera " + self.name + " STARTING") t = Thread(target=self.run, args=()) t.daemon = True t.start() return self def run(self): print("Camera " + self.name + " RUNNING") self.running = True while True: (grabbed, frame) = self.stream.read() self.fps.start() # grabbed will be false if camera has been disconnected. # How to deal with that?? # Should probably try to reconnect somehow? Don't know how... if grabbed: # Pass a copy of the frame to each user in userDict self.userDictLock.acquire() values = self.userDict.values() self.userDictLock.release() for mb in values: mb.put(frame.copy()) self.fps.stop() def read(self, user): # See if this user already registered in userDict. # If not, create a new Cubbyhole instance to pass frames to user. # If so, just get the user's Cubbyhole instance. # Then get the frame from the Cubbyhole & return it. self.userDictLock.acquire() if not user in self.userDict: self.userDict[user] = Cubbyhole() mb = self.userDict[user] self.userDictLock.release() return mb.get() def processUserCommand(self, key): if key == ord('w'): self.brightness+=1 self.stream.set(cv2.CAP_PROP_BRIGHTNESS,self.brightness) print("BRIGHT = " + str(self.brightness)) elif key == ord('s'): self.brightness-=1 self.stream.set(cv2.CAP_PROP_BRIGHTNESS,self.brightness) print("BRIGHT = " + str(self.brightness)) elif key == ord('d'): self.contrast+=1 self.stream.set(cv2.CAP_PROP_CONTRAST,self.contrast) print("CONTRAST = " + str(self.contrast)) elif key == ord('a'): self.contrast-=1 self.stream.set(cv2.CAP_PROP_CONTRAST,self.contrast) print("CONTRAST = " + str(self.contrast)) elif key == ord('e'): self.saturation+=1 self.stream.set(cv2.CAP_PROP_SATURATION,self.saturation) print("SATURATION = " + str(self.saturation)) elif key == ord('q'): self.saturation-=1 self.stream.set(cv2.CAP_PROP_SATURATION,self.saturation) print("SATURATION = " + str(self.saturation)) elif key == ord('z'): self.setExposure(self.exposure+1) print("EXPOSURE = " + str(self.exposure)) elif key == ord('c'): self.setExposure(self.exposure-1) print("EXPOSURE = " + str(self.exposure)) ## elif key == ord('p'): ## self.iso +=1 ## self.stream.set(cv2.CAP_PROP_ISO_SPEED, self.iso) ## elif key == ord('i'): ## self.iso -=1 ## self.stream.set(cv2.CAP_PROP_ISO_SPEED, self.iso) def setExposure(self, exposure): if self.exposure == exposure : return self.exposure = exposure # cv2 exposure control DOES NOT WORK ON PI if (platform.system() == 'Windows' or platform.system() == 'Darwin'): self.stream.set(cv2.CAP_PROP_EXPOSURE, self.exposure) else: cmd = ['v4l2-ctl --device=' + str(self.src) + ' -c exposure_auto=1 -c exposure_absolute=' + str(self.exposure)] call(cmd,shell=True) return def isRunning(self): return self.running
class CamHTTPHandler(BaseHTTPRequestHandler): _stop = False fps = FrameRate() def stop(self): self._self = True def do_GET(self): print(self.path) self.fps.start() if self.path.endswith('.mjpg'): self.send_response(200) self.send_header( 'Content-type', 'multipart/x-mixed-replace; boundary=--jpgboundary') self.end_headers() while (frontProcessor.isStopped() == False): try: try: camModeValue = camMode.value cameraSelection = camera[camModeValue] processorSelection = processor[camModeValue] except: camModeValue = 'frontCam' cameraSelection = camera[camModeValue] processorSelection = processor[camModeValue] (img, count, isNew) = processorSelection.read() if (isNew == False): continue camFps = cameraSelection.fps.fps() procFps = processorSelection.fps.fps() procDuration = processorSelection.duration.duration() cv2.putText(img, "{:.1f}".format(camFps), (0, 20), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1) if (procFps != 0.0): cv2.putText( img, "{:.1f}".format(procFps) + " : {:.0f}".format(100 * procDuration * procFps) + "%", (0, 40), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1) cv2.putText(img, "{:.1f}".format(self.fps.fps()), (0, 60), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1) cv2.putText(img, camModeValue, (0, 80), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1) cv2.putText(img, processorSelection.ipselection, (0, 100), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1) r, buf = cv2.imencode(".jpg", img) self.wfile.write("--jpgboundary\r\n") self.send_header('Content-type', 'image/jpeg') self.send_header('Content-length', str(len(buf))) self.end_headers() self.wfile.write(bytearray(buf)) self.wfile.write('\r\n') self.fps.update() except KeyboardInterrupt: break return if self.path.endswith('.html') or self.path == "/": self.send_response(200) self.send_header('Content-type', 'text/html') self.end_headers() self.wfile.write('<html><head></head><body>') self.wfile.write('<img src="http://127.0.0.1:8080/cam.mjpg"/>') self.wfile.write('</body></html>') return
class BucketCapture: def __init__(self,name,src,width,height,exposure,set_fps=30): # Default fps to 30 print("Creating BucketCapture for " + name) self._lock = Lock() self._condition = Condition() self.fps = FrameRate() self.set_fps = set_fps self.duration = FrameDuration() self.name = name self.exposure = exposure self.src = src self.width = width self.height = height # initialize the variable used to indicate if the thread should # be stopped self._stop = False self.stopped = True self.grabbed = False self.frame = None self.outFrame = None self.count = 0 self.outCount = self.count print("BucketCapture created for " + self.name) def start(self): # start the thread to read frames from the video stream print("STARTING BucketCapture for " + self.name) t = Thread(target=self.update, args=()) t.daemon = True t.start() return self def update(self): print("BucketCapture for " + self.name + " RUNNING") # keep looping infinitely until the thread is stopped self.stopped = False self.fps.start() lastExposure = self.exposure cs = CameraServer.getInstance() cs.enableLogging() self.camera = cs.startAutomaticCapture(dev=self.src) self.camera.setResolution(self.width, self.height) self.camera.setPixelFormat(VideoMode.PixelFormat.kYUYV) self.camera.setFPS(self.set_fps) self.camera.setExposureManual(self.exposure) self.camera.setBrightness(1) p = self.camera.enumerateVideoModes() for pi in p: print(pi.fps, pi.height, pi.width, pi.pixelFormat) # Get a CvSink. This will capture images from the camera cvSink = cs.getVideo() # (optional) Setup a CvSource. This will send images back to the Dashboard self.outstream = cs.putVideo(self.name, self.width, self.height) # Allocating new images is very expensive, always try to preallocate img = np.zeros(shape=(self.height, self.width, 3), dtype=np.uint8) while True: # if the thread indicator variable is set, stop the thread if (self._stop == True): self._stop = False self.stopped = True return if (lastExposure != self.exposure): self.setExposure() lastExposure = self.exposure # Tell the CvSink to grab a frame from the camera and put it # in the source image. If there is an error notify the output. time, img = cvSink.grabFrame(img) if time == 0: self._grabbed = False # Send the output the error. self.outstream.notifyError(cvSink.getError()); # skip the rest of the current iteration continue self._grabbed = True self.duration.start() self.fps.update() # if something was grabbed and retreived then lock # the outboundw buffer for the update # This limits the blocking to just the copy operations # later we may consider a queue or double buffer to # minimize blocking if (self._grabbed == True): self._condition.acquire() self._lock.acquire() self.count = self.count + 1 self.grabbed = self._grabbed self.frame = img.copy() self._lock.release() self._condition.notifyAll() self._condition.release() self.duration.update() print("BucketCapture for " + self.name + " STOPPING") def read(self): # return the frame most recently read if the frame # is not being updated at this exact moment self._condition.acquire() self._condition.wait() self._condition.release() if (self._lock.acquire() == True): self.outFrame = self.frame self.outCount = self.count self._lock.release() return (self.outFrame, self.outCount, True) else: return (self.outFrame, self.outCount, False) ## def processUserCommand(self, key): ## if key == ord('x'): ## return True ## elif key == ord('w'): ## self.brightness+=1 ## self.stream.set(cv2.CAP_PROP_BRIGHTNESS,self.brightness) ## print("BRIGHT = " + str(self.brightness)) ## elif key == ord('s'): ## self.brightness-=1 ## self.stream.set(cv2.CAP_PROP_BRIGHTNESS,self.brightness) ## print("BRIGHT = " + str(self.brightness)) ## elif key == ord('d'): ## self.contrast+=1 ## self.stream.set(cv2.CAP_PROP_CONTRAST,self.contrast) ## print("CONTRAST = " + str(self.contrast)) ## elif key == ord('a'): ## self.contrast-=1 ## self.stream.set(cv2.CAP_PROP_CONTRAST,self.contrast) ## print("CONTRAST = " + str(self.contrast)) ## elif key == ord('e'): ## self.saturation+=1 ## self.stream.set(cv2.CAP_PROP_SATURATION,self.saturation) ## print("SATURATION = " + str(self.saturation)) ## elif key == ord('q'): ## self.saturation-=1 ## self.stream.set(cv2.CAP_PROP_SATURATION,self.saturation) ## print("SATURATION = " + str(self.saturation)) ## elif key == ord('z'): ## self.exposure+=1 ## setExposure(self.exposure) ## print("EXPOSURE = " + str(self.exposure)) ## elif key == ord('c'): ## self.exposure-=1 ## setExposure(self.exposure) ## print("EXPOSURE = " + str(self.exposure)) #### elif key == ord('p'): #### self.iso +=1 #### self.stream.set(cv2.CAP_PROP_ISO_SPEED, self.iso) #### elif key == ord('i'): #### self.iso -=1 #### self.stream.set(cv2.CAP_PROP_ISO_SPEED, self.iso) ## ## return False def updateExposure(self, exposure): self.exposure = exposure def setExposure(self): self.camera.setExposureManual(self.exposure); pass def stop(self): # indicate that the thread should be stopped self._stop = True self._condition.acquire() self._condition.notifyAll() self._condition.release() def isStopped(self): return self.stopped
class BucketProcessor: def __init__(self,stream,ipdictionary, ipselection): print("Creating BucketProcessor for " + stream.name) self._lock = Lock() self._condition = Condition() self.fps = FrameRate() self.duration = FrameDuration() self.stream = stream self.name = self.stream.name self.ipdictionary = ipdictionary self.ipselection = ipselection self.ip = self.ipdictionary[ipselection] self._frame = None self.frame = None self.count = 0 self.isNew = False # initialize the variable used to indicate if the thread should # be stopped self._stop = False self.stopped = True print("BucketProcessor created for " + self.name) def start(self): print("STARTING BucketProcessor for " + self.name) t = Thread(target=self.update, args=()) t.daemon = True t.start() return self def update(self): print("BucketProcessor for " + self.name + " RUNNING") # keep looping infinitely until the thread is stopped self.stopped = False self.fps.start() lastIpSelection = self.ipselection while True: # if the thread indicator variable is set, stop the thread if (self._stop == True): self._stop = False self.stopped = True return # otherwise, read the next frame from the stream # grab the frame from the threaded video stream (self._frame, count, isNew) = self.stream.read() self.duration.start() self.fps.update() if (lastIpSelection != self.ipselection): self.ip = self.ipdictionary[self.ipselection] lastIpSelection = self.ipselection if (isNew == True): # TODO: Insert processing code then forward display changes self.ip.process(self._frame) # Now that image processing is complete, place results # into an outgoing buffer to be grabbed at the convenience # of the reader self._condition.acquire() self._lock.acquire() self.count = self.count + 1 self.isNew = isNew self.frame = self._frame self._lock.release() self._condition.notifyAll() self._condition.release() self.duration.update() print("BucketProcessor for " + self.name + " STOPPING") def updateSelection(self, ipselection): self.ipselection = ipselection def read(self): # return the frame most recently processed if the frame # is not being updated at this exact moment self._condition.acquire() self._condition.wait() self._condition.release() if (self._lock.acquire() == True): self.outFrame = self.frame self.outCount = self.count self._lock.release() return (self.outFrame, self.outCount, True) else: return (self.outFrame, self.outCount, False) def stop(self): # indicate that the thread should be stopped self._stop = True self._condition.acquire() self._condition.notifyAll() self._condition.release() def isStopped(self): return self.stopped
def __init__(self): self.fps = FrameRate() self.bitrate = BitRate() self.cubby = Cubbyhole()
# at a time (it can be selectable in the future) and that the same vision # pipeline should NOT be sent to different image processors as this is simply # confusing and can cause some comingling of data (depending on how the vision # pipeline was defined... we can't control the use of object-specific internals # being run from multiple threads... so don't do it!) bucketProcessor = ImageProcessor(bucketCam, faces).start() print("Waiting for ImageProcessors to start...") while (bucketProcessor.isStopped() == True): time.sleep(0.001) print("ImageProcessors appear online!") # Continue feeding display or streams in foreground told to stop fps = FrameRate() # Keep track of display rate TODO: Thread that too! fps.start() # Loop forever displaying the images for initial testing # # NOTE: NOTE: NOTE: NOTE: # cv2.imshow in Linux relies upon X11 binding under the hood. These binding are NOT inherently thread # safe unless you jump through some hoops to tell the interfaces to operate in a multi-threaded # environment (i.e., within the same process). # # For most purposes, here, we don't need to jump through those hoops or create separate processes and # can just show the images at the rate of the slowest pipeline plus the speed of the remaining pipelines. # # LATER we will create display threads that stream the images as requested at their separate rates. # while (True):
class BucketDisplay: def __init__(self, mode, cams, procs): print("Creating BucketDisplay") self.fps = FrameRate() self.duration = FrameDuration() self.mode = mode self.cams = cams self.procs = procs self._frame = None self.frame = None self.count = 0 self.isNew = False # initialize the variable used to indicate if the thread should # be stopped self._stop = False self.stopped = True print("BucketDisplay created") def start(self): print("STARTING BucketDisplay") t = Thread(target=self.update, args=()) t.daemon = True t.start() return self def update(self): print("BucketDisplay RUNNING") # keep looping infinitely until the thread is stopped self.stopped = False self.fps.start() while True: # if the thread indicator variable is set, stop the thread if (self._stop == True): self._stop = False self.stopped = True return try: camModeValue = self.mode cameraSelection = self.cams[camModeValue] processorSelection = self.procs[camModeValue] except: camModeValue = 'Default' cameraSelection = self.cams[list(self.cams.keys())[0]] processorSelection = self.procs[list(self.procs.keys())[0]] # otherwise, read the next frame from the stream # grab the frame from the threaded video stream (img, count, isNew) = processorSelection.read() self.duration.start() self.fps.update() if (isNew == True): camFps = cameraSelection.fps.fps() procFps = processorSelection.fps.fps() procDuration = processorSelection.duration.duration() cv2.putText(img, "{:.1f}".format(camFps), (0, 20), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1) if (procFps != 0.0): cv2.putText( img, "{:.1f}".format(procFps) + " : {:.0f}".format(100 * procDuration * procFps) + "%", (0, 40), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1) cv2.putText(img, "{:.1f}".format(self.fps.fps()), (0, 60), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1) cv2.putText(img, camModeValue, (0, 80), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1) cv2.putText(img, processorSelection.ipselection, (0, 100), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1) cameraSelection.outstream.putFrame(img) self.duration.update() delta = (1.0 / 15.0) - self.duration.elapsed() if delta > 0: pass time.sleep(delta) print("BucketDisplay for " + self.name + " STOPPING") def stop(self): # indicate that the thread should be stopped self._stop = True def isStopped(self): return self.stopped
class CamUdp: _stop = False fps = FrameRate() def stop(self): self._self = True def do(self): # A UDP socket is an object we will use to send data to an address mySocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM, proto=socket.IPPROTO_UDP) if (mySocket == None): print("Unable to create mySocket socket") # Allow the address/port pair to be reused by other processes mySocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Set the time-to-live for messages to 1 so they do not go past the # local network segment. If we need to let the message go further, # multicasting to many networks we can set the TTL value as high # as 255 ttl = struct.pack('b', 1) mySocket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl) # 'binding' a socket just means associating it with an # interface address and a port on that address with the socket # object. All network traffic to the address and port will signal # the socket each time a message comes in. # The interface address can be a specific IP address associated # with network interface hardware or we can request that the # first interface found is used (IP_ANY). In some environments # IP_ANY is '0.0.0.0', in this environment the empty string has # the same effect. # # Ports are a way of identifying packets to be routed to specific # user path on an interface print("Binding...") mySocket.bind((IP_ANY, 0)) # Now we just start sending self.fps.start() lastTime = 0 while (frontProcessor.isStopped() == False): try: try: camModeValue = camMode.value cameraSelection = camera[camModeValue] processorSelection = processor[camModeValue] except: camModeValue = 'frontCam' cameraSelection = camera[camModeValue] processorSelection = processor[camModeValue] timeNow = time.time() if ((timeNow - lastTime) > 0.05): # maximum of 20 fps for display lastTime = timeNow (img, count, isNew) = processorSelection.read() if (isNew == False): continue camFps = cameraSelection.fps.fps() procFps = processorSelection.fps.fps() procDuration = processorSelection.duration.duration() cv2.putText(img, "{:.1f}".format(camFps), (0, 20), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1) if (procFps != 0.0): cv2.putText( img, "{:.1f}".format(procFps) + " : {:.0f}".format(100 * procDuration * procFps) + "%", (0, 40), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1) cv2.putText(img, "{:.1f}".format(self.fps.fps()), (0, 60), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1) cv2.putText(img, camModeValue, (0, 80), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1) cv2.putText(img, processorSelection.ipselection, (0, 100), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1) # This encoding is relatively inexpensive (about 6 to 7 ms on a Pi3) # The compression ratio is generally really good and most of the time is spent # sending the byte array # but the transmission through the network may be encumbered by the startEncode = time.time() r, buf = cv2.imencode(".jpg", img) endEncode = time.time() ba = bytearray(buf) mySocket.sendto(ba, (IP_MULTICAST_GROUP, BV_PORT)) endSend = time.time() self.fps.update() bvTable.putNumber("Display_FPS", int(self.fps.fps())) bvTable.putNumber("Display_Encode_msec", int((endEncode - startEncode) * 1000)) bvTable.putNumber("Display_Send_msec", int((endSend - endEncode) * 1000)) bvTable.putNumber("Display_BufSize", len(ba)) bi = bytearray(img) bvTable.putNumber("Display_OrigSize", len(bi)) except KeyboardInterrupt: break return