def __init__( self, videoPath, imageProcessingEndpoint = "", imageProcessingParams = "", showVideo = False, verbose = False, loopVideo = True, convertToGray = False, resizeWidth = 0, resizeHeight = 0, annotate = False, sendToHubCallback = None): self.videoPath = videoPath if self.__IsInt(videoPath): #case of a usb camera (usually mounted at /dev/video* where * is an int) self.isWebcam = True else: #case of a video file self.isWebcam = False self.imageProcessingEndpoint = imageProcessingEndpoint if imageProcessingParams == "": self.imageProcessingParams = "" else: self.imageProcessingParams = json.loads(imageProcessingParams) self.showVideo = showVideo self.verbose = verbose self.loopVideo = loopVideo self.convertToGray = convertToGray self.resizeWidth = resizeWidth self.resizeHeight = resizeHeight self.annotate = (self.imageProcessingEndpoint != "") and self.showVideo & annotate self.nbOfPreprocessingSteps = 0 self.autoRotate = False self.sendToHubCallback = sendToHubCallback self.vs = None if self.convertToGray: self.nbOfPreprocessingSteps +=1 if self.resizeWidth != 0 or self.resizeHeight != 0: self.nbOfPreprocessingSteps +=1 if self.verbose: print("Initialising the camera capture with the following parameters: ") print(" - Video path: " + self.videoPath) print(" - Image processing endpoint: " + self.imageProcessingEndpoint) print(" - Image processing params: " + json.dumps(self.imageProcessingParams)) print(" - Show video: " + str(self.showVideo)) print(" - Loop video: " + str(self.loopVideo)) print(" - Convert to gray: " + str(self.convertToGray)) print(" - Resize width: " + str(self.resizeWidth)) print(" - Resize height: " + str(self.resizeHeight)) print(" - Annotate: " + str(self.annotate)) print(" - Send processing results to hub: " + str(self.sendToHubCallback is not None)) print() self.displayFrame = None if self.showVideo: self.imageServer = ImageServer(5012, self) self.imageServer.start()
def __init__(self, videoPath="", verbose=True, videoW=0, videoH=0, fontScale=1.0, inference=True, confidenceLevel=0.5, detectionSampleRate=10, imageProcessingEndpoint=""): self.videoPath = videoPath self.verbose = verbose self.videoW = videoW self.videoH = videoH self.inference = inference self.confidenceLevel = confidenceLevel self.useStream = False self.useStreamHttp = False self.useMovieFile = False self.frameCount = 0 self.vStream = None self.vCapture = None self.displayFrame = None self.fontScale = float(fontScale) self.captureInProgress = False self.imageResp = None self.url = "" self.detectionSampleRate = detectionSampleRate self.imageProcessingEndpoint = imageProcessingEndpoint print("VideoCapture::__init__()") print("OpenCV Version : %s" % (cv2.__version__)) print( "===============================================================") print("Initialising Video Capture with the following parameters: ") print(" - Video path : " + self.videoPath) print(" - Video width : " + str(self.videoW)) print(" - Video height : " + str(self.videoH)) print(" - Font Scale : " + str(self.fontScale)) print(" - Inference? : " + str(self.inference)) print(" - ConficenceLevel : " + str(self.confidenceLevel)) print(" - Dct smpl rate : " + str(self.detectionSampleRate)) print(" - Imageproc.Endpt.: " + str(self.imageProcessingEndpoint)) print("") self.imageServer = ImageServer(80, self) self.imageServer.start() self.yoloInference = YoloInference(self.fontScale)
def __init__(self, videoPath, imageProcessingEndpoint="", imageProcessingParams="", showVideo=False, verbose=False, resizeWidth=0, resizeHeight=0, sendToHubCallback=None): self.videoPath = videoPath self.imageProcessingEndpoint = imageProcessingEndpoint if imageProcessingParams == "": self.imageProcessingParams = "" else: self.imageProcessingParams = json.loads(imageProcessingParams) self.showVideo = showVideo self.verbose = verbose self.resizeWidth = resizeWidth self.resizeHeight = resizeHeight self.nbOfPreprocessingSteps = 0 self.autoRotate = False self.sendToHubCallback = sendToHubCallback self.vs = None if self.resizeWidth != 0 or self.resizeHeight != 0: self.nbOfPreprocessingSteps += 1 if self.verbose: print( "Initialising the camera capture with the following parameters: " ) print(" - Video path: ", self.videoPath) print(" - Image processing endpoint: " + self.imageProcessingEndpoint) print(" - Image processing params: " + json.dumps(self.imageProcessingParams)) print(" - Show video: " + str(self.showVideo)) print(" - Resize width: " + str(self.resizeWidth)) print(" - Resize height: " + str(self.resizeHeight)) print(" - Send processing results to hub: " + str(self.sendToHubCallback is not None)) print() self.displayFrame = None if self.showVideo: self.imageServer = ImageServer(5012, self) self.imageServer.start()
class CameraCapture(object): def __IsInt(self,string): try: int(string) return True except ValueError: return False def __init__( self, videoPath, imageProcessingEndpoint = "", imageProcessingParams = "", showVideo = False, verbose = False, loopVideo = True, convertToGray = False, resizeWidth = 0, resizeHeight = 0, annotate = False, sendToHubCallback = None): self.videoPath = videoPath if self.__IsInt(videoPath): #case of a usb camera (usually mounted at /dev/video* where * is an int) self.isWebcam = True else: #case of a video file self.isWebcam = False self.imageProcessingEndpoint = imageProcessingEndpoint if imageProcessingParams == "": self.imageProcessingParams = "" else: self.imageProcessingParams = json.loads(imageProcessingParams) self.showVideo = showVideo self.verbose = verbose self.loopVideo = loopVideo self.convertToGray = convertToGray self.resizeWidth = resizeWidth self.resizeHeight = resizeHeight self.annotate = (self.imageProcessingEndpoint != "") and self.showVideo & annotate self.nbOfPreprocessingSteps = 0 self.autoRotate = False self.sendToHubCallback = sendToHubCallback self.vs = None if self.convertToGray: self.nbOfPreprocessingSteps +=1 if self.resizeWidth != 0 or self.resizeHeight != 0: self.nbOfPreprocessingSteps +=1 if self.verbose: print("Initialising the camera capture with the following parameters: ") print(" - Video path: " + self.videoPath) print(" - Image processing endpoint: " + self.imageProcessingEndpoint) print(" - Image processing params: " + json.dumps(self.imageProcessingParams)) print(" - Show video: " + str(self.showVideo)) print(" - Loop video: " + str(self.loopVideo)) print(" - Convert to gray: " + str(self.convertToGray)) print(" - Resize width: " + str(self.resizeWidth)) print(" - Resize height: " + str(self.resizeHeight)) print(" - Annotate: " + str(self.annotate)) print(" - Send processing results to hub: " + str(self.sendToHubCallback is not None)) print() self.displayFrame = None if self.showVideo: self.imageServer = ImageServer(5012, self) self.imageServer.start() def __annotate(self, frame, response): AnnotationParserInstance = AnnotationParser() #TODO: Make the choice of the service configurable listOfRectanglesToDisplay = AnnotationParserInstance.getCV2RectanglesFromProcessingService1(response) for rectangle in listOfRectanglesToDisplay: cv2.rectangle(frame, (rectangle(0), rectangle(1)), (rectangle(2), rectangle(3)), (0,0,255),4) return def __sendFrameForProcessing(self, frame): headers = {'Content-Type': 'application/octet-stream'} try: response = requests.post(self.imageProcessingEndpoint, headers = headers, params = self.imageProcessingParams, data = frame) except Exception as e: print('__sendFrameForProcessing Excpetion -' + str(e)) return "[]" if self.verbose: try: print("Response from external processing service: (" + str(response.status_code) + ") " + json.dumps(response.json())) except Exception: print("Response from external processing service (status code): " + str(response.status_code)) return json.dumps(response.json()) def __displayTimeDifferenceInMs(self, endTime, startTime): return str(int((endTime-startTime) * 1000)) + " ms" def __enter__(self): if self.isWebcam: #The VideoStream class always gives us the latest frame from the webcam. It uses another thread to read the frames. self.vs = VideoStream(int(self.videoPath)).start() time.sleep(1.0)#needed to load at least one frame into the VideoStream class #self.capture = cv2.VideoCapture(int(self.videoPath)) else: #In the case of a video file, we want to analyze all the frames of the video thus are not using VideoStream class self.capture = cv2.VideoCapture(self.videoPath) return self def get_display_frame(self): return self.displayFrame def start(self): frameCounter = 0 perfForOneFrameInMs = None while True: if self.showVideo or self.verbose: startOverall = time.time() if self.verbose: startCapture = time.time() frameCounter +=1 if self.isWebcam: frame = self.vs.read() else: frame = self.capture.read()[1] if frameCounter == 1: if self.capture.get(cv2.CAP_PROP_FRAME_WIDTH) < self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT): self.autoRotate = True if self.autoRotate: frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE) #The counterclockwise is random...It coudl well be clockwise. Is there a way to auto detect it? if self.verbose: if frameCounter == 1: if not self.isWebcam: print("Original frame size: " + str(int(self.capture.get(cv2.CAP_PROP_FRAME_WIDTH))) + "x" + str(int(self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))) print("Frame rate (FPS): " + str(int(self.capture.get(cv2.CAP_PROP_FPS)))) print("Frame number: " + str(frameCounter)) print("Time to capture (+ straighten up) a frame: " + self.__displayTimeDifferenceInMs(time.time(), startCapture)) startPreProcessing = time.time() #Loop video if not self.isWebcam: if frameCounter == self.capture.get(cv2.CAP_PROP_FRAME_COUNT): if self.loopVideo: frameCounter = 0 self.capture.set(cv2.CAP_PROP_POS_FRAMES, 0) else: break #Pre-process locally if self.nbOfPreprocessingSteps == 1 and self.convertToGray: preprocessedFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) if self.nbOfPreprocessingSteps == 1 and (self.resizeWidth != 0 or self.resizeHeight != 0): preprocessedFrame = cv2.resize(frame, (self.resizeWidth, self.resizeHeight)) if self.nbOfPreprocessingSteps > 1: preprocessedFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) preprocessedFrame = cv2.resize(preprocessedFrame, (self.resizeWidth,self.resizeHeight)) if self.verbose: print("Time to pre-process a frame: " + self.__displayTimeDifferenceInMs(time.time(), startPreProcessing)) startEncodingForProcessing = time.time() #Process externally if self.imageProcessingEndpoint != "": #Encode frame to send over HTTP if self.nbOfPreprocessingSteps == 0: encodedFrame = cv2.imencode(".jpg", frame)[1].tostring() else: encodedFrame = cv2.imencode(".jpg", preprocessedFrame)[1].tostring() if self.verbose: print("Time to encode a frame for processing: " + self.__displayTimeDifferenceInMs(time.time(), startEncodingForProcessing)) startProcessingExternally = time.time() #Send over HTTP for processing response = self.__sendFrameForProcessing(encodedFrame) if self.verbose: print("Time to process frame externally: " + self.__displayTimeDifferenceInMs(time.time(), startProcessingExternally)) startSendingToEdgeHub = time.time() #forwarding outcome of external processing to the EdgeHub if response != "[]" and self.sendToHubCallback is not None: self.sendToHubCallback(response) if self.verbose: print("Time to message from processing service to edgeHub: " + self.__displayTimeDifferenceInMs(time.time(), startSendingToEdgeHub)) startDisplaying = time.time() #Display frames if self.showVideo: try: if self.nbOfPreprocessingSteps == 0: if self.verbose and (perfForOneFrameInMs is not None): cv2.putText(frame, "FPS " + str(round(1000/perfForOneFrameInMs, 2)),(10, 35),cv2.FONT_HERSHEY_SIMPLEX,1.0,(0,0,255), 2) if self.annotate: #TODO: fix bug with annotate function self.__annotate(frame, response) self.displayFrame = cv2.imencode('.jpg', frame)[1].tobytes() else: if self.verbose and (perfForOneFrameInMs is not None): cv2.putText(preprocessedFrame, "FPS " + str(round(1000/perfForOneFrameInMs, 2)),(10, 35),cv2.FONT_HERSHEY_SIMPLEX,1.0,(0,0,255), 2) if self.annotate: #TODO: fix bug with annotate function self.__annotate(preprocessedFrame, response) self.displayFrame = cv2.imencode('.jpg', preprocessedFrame)[1].tobytes() except Exception as e: print("Could not display the video to a web browser.") print('Exception -' + str(e)) if self.verbose: if 'startDisplaying' in locals(): print("Time to display frame: " + self.__displayTimeDifferenceInMs(time.time(), startDisplaying)) elif 'startSendingToEdgeHub' in locals(): print("Time to display frame: " + self.__displayTimeDifferenceInMs(time.time(), startSendingToEdgeHub)) else: print("Time to display frame: " + self.__displayTimeDifferenceInMs(time.time(), startEncodingForProcessing)) perfForOneFrameInMs = int((time.time()-startOverall) * 1000) if not self.isWebcam: waitTimeBetweenFrames = max(int(1000 / self.capture.get(cv2.CAP_PROP_FPS))-perfForOneFrameInMs, 1) print("Wait time between frames :" + str(waitTimeBetweenFrames)) if cv2.waitKey(waitTimeBetweenFrames) & 0xFF == ord('q'): break if self.verbose: perfForOneFrameInMs = int((time.time()-startOverall) * 1000) print("Total time for one frame: " + self.__displayTimeDifferenceInMs(time.time(), startOverall)) def __exit__(self, exception_type, exception_value, traceback): if not self.isWebcam: self.capture.release() if self.showVideo: self.imageServer.close() cv2.destroyAllWindows()
class VideoCapture(object): def __init__(self, videoPath="", verbose=True, videoW=0, videoH=0, fontScale=1.0, inference=True, confidenceLevel=0.5): self.videoPath = videoPath self.verbose = verbose self.videoW = videoW self.videoH = videoH self.inference = inference self.confidenceLevel = confidenceLevel self.useStream = False self.useMovieFile = False self.frameCount = 0 self.vStream = None self.vCapture = None self.displayFrame = None self.fontScale = float(fontScale) self.captureInProgress = False print("VideoCapture::__init__()") print("OpenCV Version : %s" % (cv2.__version__)) print( "===============================================================") print("Initialising Video Capture with the following parameters: ") print(" - Video path : " + self.videoPath) print(" - Video width : " + str(self.videoW)) print(" - Video height : " + str(self.videoH)) print(" - Font Scale : " + str(self.fontScale)) print(" - Inference? : " + str(self.inference)) print(" - ConficenceLevel : " + str(self.confidenceLevel)) print("") self.imageServer = ImageServer(80, self) self.imageServer.start() self.yoloInference = YoloInference(self.fontScale) def __IsCaptureDev(self, videoPath): try: return '/dev/video' in videoPath.lower() except ValueError: return False def __IsRtsp(self, videoPath): try: return 'rtsp:' in videoPath.lower() except ValueError: return False def __IsYoutube(self, videoPath): try: if 'www.youtube.com' in videoPath.lower( ) or 'youtu.be' in videoPath.lower(): return True else: return False except ValueError: return False def __enter__(self): if self.verbose: print("videoCapture::__enter__()") self.setVideoSource(self.videoPath) return self def setVideoSource(self, newVideoPath): if self.captureInProgress: self.captureInProgress = False time.sleep(1.0) if self.vCapture: self.vCapture.release() self.vCapture = None elif self.vStream: self.vStream.stop() self.vStream = None if self.__IsRtsp(newVideoPath): print("\r\n===> RTSP Video Source") self.useStream = True self.useMovieFile = False self.videoPath = newVideoPath if self.vStream: self.vStream.start() self.vStream = None if self.vCapture: self.vCapture.release() self.vCapture = None self.vStream = VideoStream(newVideoPath).start() # Needed to load at least one frame into the VideoStream class time.sleep(1.0) self.captureInProgress = True elif self.__IsYoutube(newVideoPath): print("\r\n===> YouTube Video Source") self.useStream = False self.useMovieFile = True # This is video file self.downloadVideo(newVideoPath) self.videoPath = newVideoPath if self.vCapture.isOpened(): self.captureInProgress = True else: print( "===========================\r\nWARNING : Failed to Open Video Source\r\n===========================\r\n" ) elif self.__IsCaptureDev(newVideoPath): print("===> Webcam Video Source") if self.vStream: self.vStream.start() self.vStream = None if self.vCapture: self.vCapture.release() self.vCapture = None self.videoPath = newVideoPath self.useMovieFile = False self.useStream = False self.vCapture = cv2.VideoCapture(newVideoPath) if self.vCapture.isOpened(): self.captureInProgress = True else: print( "===========================\r\nWARNING : Failed to Open Video Source\r\n===========================\r\n" ) else: print( "===========================\r\nWARNING : No Video Source\r\n===========================\r\n" ) self.useStream = False self.useYouTube = False self.vCapture = None self.vStream = None return self def downloadVideo(self, videoUrl): if self.captureInProgress: bRestartCapture = True time.sleep(1.0) if self.vCapture: print("Relase vCapture") self.vCapture.release() self.vCapture = None else: bRestartCapture = False if os.path.isfile('/app/video.mp4'): os.remove("/app/video.mp4") print("Start downloading video") os.system("youtube-dl -o /app/video.mp4 -f mp4 " + videoUrl) print("Download Complete") self.vCapture = cv2.VideoCapture("/app/video.mp4") time.sleep(1.0) self.frameCount = int(self.vCapture.get(cv2.CAP_PROP_FRAME_COUNT)) if bRestartCapture: self.captureInProgress = True def get_display_frame(self): return self.displayFrame def videoStreamReadTimeoutHandler(signum, frame): raise Exception("VideoStream Read Timeout") def start(self): while True: if self.captureInProgress: self.__Run__() if not self.captureInProgress: time.sleep(1.0) def __Run__(self): print( "===============================================================") print("videoCapture::__Run__()") print(" - Stream : " + str(self.useStream)) print(" - useMovieFile : " + str(self.useMovieFile)) cameraH = 0 cameraW = 0 frameH = 0 frameW = 0 if self.useStream and self.vStream: cameraH = int(self.vStream.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)) cameraW = int(self.vStream.stream.get(cv2.CAP_PROP_FRAME_WIDTH)) elif self.useStream == False and self.vCapture: cameraH = int(self.vCapture.get(cv2.CAP_PROP_FRAME_HEIGHT)) cameraW = int(self.vCapture.get(cv2.CAP_PROP_FRAME_WIDTH)) else: print("Error : No Video Source") return if self.videoW != 0 and self.videoH != 0 and self.videoH != cameraH and self.videoW != cameraW: needResizeFrame = True frameH = self.videoH frameW = self.videoW else: needResizeFrame = False frameH = cameraH frameW = cameraW if needResizeFrame: print("Original frame size : " + str(cameraW) + "x" + str(cameraH)) print(" New frame size : " + str(frameW) + "x" + str(frameH)) print(" Resize : " + str(needResizeFrame)) else: print("Camera frame size : " + str(cameraW) + "x" + str(cameraH)) print(" frame size : " + str(frameW) + "x" + str(frameH)) # Check camera's FPS if self.useStream: cameraFPS = int(self.vStream.stream.get(cv2.CAP_PROP_FPS)) else: cameraFPS = int(self.vCapture.get(cv2.CAP_PROP_FPS)) if cameraFPS == 0: print("Error : Could not get FPS") raise Exception("Unable to acquire FPS for Video Source") return print("Frame rate (FPS) : " + str(cameraFPS)) currentFPS = cameraFPS perFrameTimeInMs = 1000 / cameraFPS signal.signal(signal.SIGALRM, self.videoStreamReadTimeoutHandler) while True: # Get current time before we capture a frame tFrameStart = time.time() if not self.captureInProgress: break if self.useMovieFile: currentFrame = int(self.vCapture.get(cv2.CAP_PROP_POS_FRAMES)) if currentFrame >= self.frameCount: self.vCapture.set(cv2.CAP_PROP_POS_FRAMES, 0) try: # Read a frame if self.useStream: # Timeout after 10s signal.alarm(10) frame = self.vStream.read() signal.alarm(0) else: frame = self.vCapture.read()[1] except Exception as e: print("ERROR : Exception during capturing") raise (e) # Resize frame if flagged if needResizeFrame: frame = cv2.resize(frame, (self.videoW, self.videoH)) # Run Object Detection if self.inference: self.yoloInference.runInference(frame, frameW, frameH, self.confidenceLevel) # Calculate FPS timeElapsedInMs = (time.time() - tFrameStart) * 1000 currentFPS = 1000.0 / timeElapsedInMs if (currentFPS > cameraFPS): # Cannot go faster than Camera's FPS currentFPS = cameraFPS # Add FPS Text to the frame cv2.putText(frame, "FPS " + str(round(currentFPS, 1)), (10, int(30 * self.fontScale)), cv2.FONT_HERSHEY_SIMPLEX, self.fontScale, (0, 0, 255), 2) self.displayFrame = cv2.imencode('.jpg', frame)[1].tobytes() timeElapsedInMs = (time.time() - tFrameStart) * 1000 if (1000 / cameraFPS) > timeElapsedInMs: # This is faster than image source (e.g. camera) can feed. waitTimeBetweenFrames = perFrameTimeInMs - timeElapsedInMs #if self.verbose: #print(" Wait time between frames :" + str(int(waitTimeBetweenFrames))) time.sleep(waitTimeBetweenFrames / 1000.0) def __exit__(self, exception_type, exception_value, traceback): if self.vCapture: self.vCapture.release() self.imageServer.close() cv2.destroyAllWindows()
class CameraCapture(object): def __init__(self, videoPath, imageProcessingEndpoint="", imageProcessingParams="", showVideo=False, verbose=False, resizeWidth=0, resizeHeight=0, sendToHubCallback=None): self.videoPath = videoPath self.imageProcessingEndpoint = imageProcessingEndpoint if imageProcessingParams == "": self.imageProcessingParams = "" else: self.imageProcessingParams = json.loads(imageProcessingParams) self.showVideo = showVideo self.verbose = verbose self.resizeWidth = resizeWidth self.resizeHeight = resizeHeight self.nbOfPreprocessingSteps = 0 self.autoRotate = False self.sendToHubCallback = sendToHubCallback self.vs = None if self.resizeWidth != 0 or self.resizeHeight != 0: self.nbOfPreprocessingSteps += 1 if self.verbose: print( "Initialising the camera capture with the following parameters: " ) print(" - Video path: ", self.videoPath) print(" - Image processing endpoint: " + self.imageProcessingEndpoint) print(" - Image processing params: " + json.dumps(self.imageProcessingParams)) print(" - Show video: " + str(self.showVideo)) print(" - Resize width: " + str(self.resizeWidth)) print(" - Resize height: " + str(self.resizeHeight)) print(" - Send processing results to hub: " + str(self.sendToHubCallback is not None)) print() self.displayFrame = None if self.showVideo: self.imageServer = ImageServer(5012, self) self.imageServer.start() def __sendFrameForProcessing(self, frame): headers = {'Content-Type': 'application/octet-stream'} try: response = requests.post(self.imageProcessingEndpoint, headers=headers, params=self.imageProcessingParams, data=frame) except Exception as e: print('__sendFrameForProcessing Excpetion -' + str(e)) return "[]" if self.verbose: try: print("Response from external processing service: (" + str(response.status_code) + ") " + json.dumps(response.json())) except Exception: print( "Response from external processing service (status code): " + str(response.status_code)) return json.dumps(response.json()) def __displayTimeDifferenceInMs(self, endTime, startTime): return str(int((endTime - startTime) * 1000)) + " ms" def __enter__(self): self.vs = VideoStream(int(self.videoPath)).start() time.sleep( 1.0) #needed to load at least one frame into the VideoStream class return self def get_display_frame(self): return self.displayFrame async def start(self): frameCounter = 0 perfForOneFrameInMs = None while True: time.sleep(5) if self.showVideo or self.verbose: startOverall = time.time() if self.verbose: startCapture = time.time() frameCounter += 1 frame = self.vs.read() if self.verbose: print("Frame number: " + str(frameCounter)) print("Time to capture (+ straighten up) a frame: " + self.__displayTimeDifferenceInMs(time.time(), startCapture)) startPreProcessing = time.time() #Pre-process locally if self.nbOfPreprocessingSteps == 1 and (self.resizeWidth != 0 or self.resizeHeight != 0): preprocessedFrame = cv2.resize( frame, (self.resizeWidth, self.resizeHeight)) if self.verbose: print("Time to pre-process a frame: " + self.__displayTimeDifferenceInMs(time.time(), startPreProcessing)) startEncodingForProcessing = time.time() #Process externally if self.imageProcessingEndpoint != "": #Encode frame to send over HTTP if self.nbOfPreprocessingSteps == 0: encodedFrame = cv2.imencode(".jpg", frame)[1].tostring() else: encodedFrame = cv2.imencode( ".jpg", preprocessedFrame)[1].tostring() if self.verbose: print("Time to encode a frame for processing: " + self.__displayTimeDifferenceInMs( time.time(), startEncodingForProcessing)) startProcessingExternally = time.time() #Send over HTTP for processing response = self.__sendFrameForProcessing(encodedFrame) if self.verbose: print("Time to process frame externally: " + self.__displayTimeDifferenceInMs( time.time(), startProcessingExternally)) startSendingToEdgeHub = time.time() #forwarding outcome of external processing to the EdgeHub if response != "[]" and self.sendToHubCallback is not None: await self.sendToHubCallback(response) if self.verbose: print( "Time to message from processing service to edgeHub: " + self.__displayTimeDifferenceInMs( time.time(), startSendingToEdgeHub)) startDisplaying = time.time() #Display frames if self.showVideo: try: if self.nbOfPreprocessingSteps == 0: if self.verbose and (perfForOneFrameInMs is not None): cv2.putText( frame, "FPS " + str(round(1000 / perfForOneFrameInMs, 2)), (10, 35), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2) # data = json.loads(response) # offset=35 # for item in data["predictions"]: # offset = offset+35 # cv2.putText(frame, item["tagName"]+" "+str(item["probability"]),(10, offset),cv2.FONT_HERSHEY_SIMPLEX,1.0,(0,0,255), 2) self.displayFrame = cv2.imencode('.jpg', frame)[1].tobytes() else: if self.verbose and (perfForOneFrameInMs is not None): cv2.putText( preprocessedFrame, "FPS " + str(round(1000 / perfForOneFrameInMs, 2)), (10, 35), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2) # data = json.loads(response) # offset=35 # for item in data["predictions"]: # offset = offset+35 # cv2.putText(preprocessedFrame, item["tagName"]+" "+str(item["probability"]),(10, offset),cv2.FONT_HERSHEY_SIMPLEX,1.0,(0,0,255), 2) self.displayFrame = cv2.imencode( '.jpg', preprocessedFrame)[1].tobytes() except Exception as e: print("Could not display the video to a web browser.") print('Excpetion -' + str(e)) if self.verbose: if 'startDisplaying' in locals(): print("Time to display frame: " + self.__displayTimeDifferenceInMs( time.time(), startDisplaying)) elif 'startSendingToEdgeHub' in locals(): print("Time to display frame: " + self.__displayTimeDifferenceInMs( time.time(), startSendingToEdgeHub)) else: print("Time to display frame: " + self.__displayTimeDifferenceInMs( time.time(), startEncodingForProcessing)) perfForOneFrameInMs = int((time.time() - startOverall) * 1000) if self.verbose: perfForOneFrameInMs = int((time.time() - startOverall) * 1000) print("Total time for one frame: " + self.__displayTimeDifferenceInMs(time.time(), startOverall)) def __exit__(self, exception_type, exception_value, traceback): if self.showVideo: self.imageServer.close() cv2.destroyAllWindows()
def __init__(self, videoPath, imageProcessingEndpoint="", imageProcessingParams="", showVideo=False, verbose=False, loopVideo=True, convertToGray=False, resizeWidth=0, resizeHeight=0, annotate=False, sendToHubCallback=None, delay=0, computerVisionEndpoint_FULL=None, computerVisionKey=None): self.delay = delay self.computerVisionEndpoint_FULL = computerVisionEndpoint_FULL self.computerVisionKey = computerVisionKey self.videoPath = videoPath if self.__IsInt(videoPath): #case of a usb camera (usually mounted at /dev/video* where * is an int) self.isWebcam = True else: #case of a video file self.isWebcam = False self.imageProcessingEndpoint = imageProcessingEndpoint if imageProcessingParams == "": self.imageProcessingParams = "" else: # self.imageProcessingParams = json.loads(imageProcessingParams) self.imageProcessingParams = json.loads( '{"visualFeatures":"tags"}') self.showVideo = showVideo self.verbose = verbose self.loopVideo = loopVideo self.convertToGray = convertToGray self.resizeWidth = resizeWidth self.resizeHeight = resizeHeight self.annotate = (self.imageProcessingEndpoint != "") and self.showVideo & annotate self.nbOfPreprocessingSteps = 0 self.autoRotate = False self.sendToHubCallback = sendToHubCallback self.vs = None if self.convertToGray: self.nbOfPreprocessingSteps += 1 if self.resizeWidth != 0 or self.resizeHeight != 0: self.nbOfPreprocessingSteps += 1 if self.verbose: print( "Initialising the camera capture with the following parameters: " ) print(" - Video path: " + self.videoPath) print(" - Image processing endpoint: " + self.imageProcessingEndpoint) print(" - Image processing params: " + json.dumps(self.imageProcessingParams)) print(" - Show video: " + str(self.showVideo)) print(" - Loop video: " + str(self.loopVideo)) print(" - Convert to gray: " + str(self.convertToGray)) print(" - Resize width: " + str(self.resizeWidth)) print(" - Resize height: " + str(self.resizeHeight)) print(" - Annotate: " + str(self.annotate)) print(" - Send processing results to hub: " + str(self.sendToHubCallback is not None)) print() self.displayFrame = None if self.showVideo: self.imageServer = ImageServer(5012, self) self.imageServer.start() if self.computerVisionEndpoint_FULL: # self.credentials = CognitiveServicesCredentials(self.computerVisionKey) # self.client = ComputerVisionClient(self.computerVisionEndpoint_FULL, self.credentials) self.computerVisionHeader = { 'Ocp-Apim-Subscription-Key': self.computerVisionKey, 'Content-Type': 'application/octet-stream' }
def __init__(self, videoPath, imageProcessingEndpoint="", imageProcessingParams="", imageStorageEndpoint="", storeImage=False, showVideo=False, verbose=False, loopVideo=True, convertToGray=False, resizeWidth=0, resizeHeight=0, annotate=False, sendToHubCallback=None): self.videoPath = videoPath if self.__IsInt(videoPath): #case of a usb camera (usually mounted at /dev/video* where * is an int) self.isWebcam = True else: #case of a video file self.isWebcam = False self.imageProcessingEndpoint = imageProcessingEndpoint if imageProcessingParams == "": self.imageProcessingParams = "" else: self.imageProcessingParams = json.loads(imageProcessingParams) if imageStorageEndpoint == "": self.imageStorageEndpoint = "" else: self.imageStorageEndpoint = json.loads(imageStorageEndpoint) self.storeImage = storeImage self.showVideo = showVideo self.verbose = verbose self.loopVideo = loopVideo self.convertToGray = convertToGray self.resizeWidth = resizeWidth self.resizeHeight = resizeHeight self.annotate = (self.imageProcessingEndpoint != "") and self.showVideo & annotate self.nbOfPreprocessingSteps = 0 self.autoRotate = False self.sendToHubCallback = sendToHubCallback self.vs = None if self.convertToGray: self.nbOfPreprocessingSteps += 1 if self.resizeWidth != 0 or self.resizeHeight != 0: self.nbOfPreprocessingSteps += 1 if self.verbose: print( "Initialising the camera capture with the following parameters: " ) print(" - Video path: " + self.videoPath) print(" - Image processing endpoint: " + self.imageProcessingEndpoint) print(" - Image processing params: " + json.dumps(self.imageProcessingParams)) print(" - Image storage endpoint: " + json.dumps(self.imageStorageEndpoint)) print(" - Store image: " + str(self.storeImage)) print(" - Show video: " + str(self.showVideo)) print(" - Loop video: " + str(self.loopVideo)) print(" - Convert to gray: " + str(self.convertToGray)) print(" - Resize width: " + str(self.resizeWidth)) print(" - Resize height: " + str(self.resizeHeight)) print(" - Annotate: " + str(self.annotate)) print(" - Send processing results to hub: " + str(self.sendToHubCallback is not None)) print() self.displayFrame = None if self.showVideo: self.imageServer = ImageServer(5012, self) self.imageServer.start() if self.storeImage: try: # Create the BlockBlockService that is used to call the Blob service for the storage account self.block_blob_service = BlockBlobService( account_name=self.imageStorageEndpoint["accountname"], account_key=self.imageStorageEndpoint["accountkey"]) # Create a container self.block_blob_service.create_container( self.imageStorageEndpoint["containername"]) except Exception as e: print("Failed to set up blob container: " + str(e)) # Allow container to continue but override storeImage self.storeImage = False
#coding=utf-8 ##测试部分的代码 import tensorflow as tf import ImageServer from models import DAN testSet = ImageServer.Load(datasetDir + "challengingSet.npz") def getLabelsForDataset(imageServer): nSamples = imageServer.gtLandmarks.shape[0] nLandmarks = imageServer.gtLandmarks.shape[1] y = np.zeros((nSamples, nLandmarks, 2), dtype=np.float32) y = imageServer.gtLandmarks return y.reshape((nSamples, nLandmarks * 2)) nSamples = testSet.gtLandmarks.shape[0] imageHeight = testSet.imgSize[0] imageWidth = testSet.imgSize[1] nChannels = testSet.imgs.shape[1] Xtest = testSet.imgs Ytest = getLabelsForDataset(testSet) meanImg = testSet.meanImg stdDevImg = testSet.stdDevImg
def main(imageProcessingEndpoint="", imageProcessingParams="", showVideo=False, verbose=False, loopVideo=True, convertToGray=False, resizeWidth=0, resizeHeight=0, annotate=False): ''' Capture a camera feed, send it to processing and forward outputs to EdgeHub :param str imageProcessingEndpoint: service endpoint to send the frames to for processing. Example: "http://face-detect-service:8080". Leave empty when no external processing is needed (Default). Optional. :param str imageProcessingParams: query parameters to send to the processing service. Example: "'returnLabels': 'true'". Empty by default. Optional. :param bool showVideo: show the video in a windows. False by default. Optional. :param bool verbose: show detailed logs and perf timers. False by default. Optional. :param bool loopVideo: when reading from a video file, it will loop this video. True by default. Optional. :param bool convertToGray: convert to gray before sending to external service for processing. False by default. Optional. :param int resizeWidth: resize frame width before sending to external service for processing. Does not resize by default (0). Optional. :param int resizeHeight: resize frame width before sending to external service for processing. Does not resize by default (0). Optional.ion( :param bool annotate: when showing the video in a window, it will annotate the frames with rectangles given by the image processing service. False by default. Optional. Rectangles should be passed in a json blob with a key containing the string rectangle, and a top left corner + bottom right corner or top left corner with width and height. ''' try: print("\nPython %s\n" % sys.version) print("Camera Capture Azure IoT Edge Module. Press Ctrl-C to exit.") try: global hubManager global twin_flag hubManager = HubManager(10000, IoTHubTransportProvider.MQTT, verbose) print("waiting for twin props") while twin_flag: time.sleep(1) pass except IoTHubError as iothub_error: print("Unexpected error %s from IoTHub" % iothub_error) return global camera_capture global imageServer while True: with CameraCapture(video_path, imageProcessingEndpoint, imageProcessingParams, showVideo, verbose, loopVideo, convertToGray, resizeWidth, resizeHeight, annotate, send_to_Hub_callback) as cameraCapture: camera_capture = cameraCapture if showVideo: if imageServer == None: imageServer = ImageServer(5012, cameraCapture) imageServer.start() else: imageServer.setCameraCapture(cameraCapture) cameraCapture.start() except KeyboardInterrupt: print("Camera capture module stopped")