def change_sources(self): self.source_number += 1 if self.source_number == len(self.change_list): self.source_number = 0 self.vs.close() self.vs = VideoStream(self.change_list[self.source_number]) self.vs.run()
def getServerInfo(self): self.serverInfo = {} for file in os.listdir(VIDEO_DIR): if file.endswith(VIDEO_FILE_EXT): videoSream = VideoStream(file) videoSream.countFrame() self.serverInfo[file] = videoSream.frameCnt
def __enter__(self): if self.isWebcam: # The VideoStream class always gives us the latest frame from the webcam. It uses another thread to read the frames. self.vs = VideoStream(int(self.videoPath)).start() # needed to load at least one frame into the VideoStream class time.sleep(1.0) #self.capture = cv2.VideoCapture(int(self.videoPath)) else: # In the case of a video file, we want to analyze all the frames of the video thus are not using VideoStream class self.capture = cv2.VideoCapture(self.videoPath) return self
def open_stream(self): server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.connect((self.ip_address, XiaoYiActionCamera.DEFAULT_PORT)) server.send(b'{"msg_id":257,"token":0}') data = str(server.recv(512)) while "rval" not in data: data = str(server.recv(512)) token = re.findall('"param": (.+) }', data)[0] stop_stream = bytes('{"msg_id":260,"token":%s}' % token, encoding='ascii') server.send(stop_stream) start_stream = bytes('{"msg_id":259,"token":%s,"param":"none_force"}' % token, encoding='ascii') server.send(start_stream) def drain_camera_socket(): ignored = server.recv(512) while ignored: ignored = server.recv(512) drain = threading.Thread(target=drain_camera_socket, args=()) drain.setDaemon(True) drain.start() return VideoStream( FFmpegVideoCapture(XiaoYiActionCamera.STREAM_URL % self.ip_address, self.stream_width, self.stream_height, 'bgr24'))
def create_video_handle(self, cam_type="video_file", cam_source="/sample_video/video.mp4"): global stream_handle print("cam_source:: " + cam_source + " cam_type :: " + cam_type) if cam_type == "video_file": video_dir = "sample_video" # By default video file name should be video.mp4/avi if os.path.exists(str(video_dir) + "/video.mp4"): #if cam_source: self.video_handle = str(str(video_dir) + "/video.mp4") elif os.path.exists(str(video_dir) + "/video.avi"): #if cam_source: self.video_handle = str(str(video_dir) + "/video.avi") else: print("\n ERROR: Camera source Not Found...!!!") print("\n Exiting inference...") sys.exit(0) elif cam_type == "rtsp_stream": if cam_source: self.video_handle = str(cam_source) print("settin cam_source to value :: " + cam_source) else: print("\n ERROR: Camera source Not Found...!!!") print("\n Exiting inference...") sys.exit(0) else: web_cam_found = False for i in range(4): if os.path.exists("/dev/video" + str(i)): web_cam_found = True break if web_cam_found: self.video_handle = "/dev/video" + str(i) else: print("\n Error: Input Camera device not found/detected") print("\n Exiting inference...") sys.exit(0) self.vs = VideoStream(self.video_handle).start() # Reading widht and height details self.img_width = int(self.vs.stream.get(cv2.CAP_PROP_FRAME_WIDTH)) self.img_height = int(self.vs.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)) stream_handle = True
def getFrameNum(self, filename): video = VideoStream(filename) frameNum = -1 while frameNum != video.frameNbr(): frameNum = video.frameNbr() video.nextFrame() return frameNum
def recvRtspRequest(self): responseCode=RESPONSE_OK conn,(address,port)=self.clientInfo['rtspSocket'] spath=os.path.dirname(os.path.realpath(sys.argv[0])) while True: try: data,tail=conn.recvfrom(RTSPBUFFERSIZE) if data: printLogToConsole(data) temp=data.split(MESSAGESEP) eventType=self.getEventTypeFromRTSP(temp) otherData='' if eventType==ActionEvents.EVSTEPUP: self.csession=''.join(str(uuid.uuid1()).split('-')) try: vFileName=spath+"\\"+self.getVidoeFileNameFromRTSP(temp) self.videoStream=VideoStream(vFileName) self.rtpPort=self.getRtpPortFromRTSP(temp) self.clientAddr=address otherData=MESSAGESEP+"Vsize: "+str(os.stat(vFileName).st_size) except IOError: responseCode=RESPONSE_NOTFOUND elif eventType==ActionEvents.EVPLAY: if self.rtpSocket is None: self.rtpSocket=socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.sendRtpThread=threading.Event() self.sendRtpThread.clear() threading.Thread(target=self.sendRtp).start() elif eventType==ActionEvents.EVPAUSE: if self.sendRtpThread is not None: self.sendRtpThread.set()# signal to stop the thread elif eventType==ActionEvents.EVTEARDOWN: if self.sendRtpThread is not None: self.sendRtpThread.set() elif eventType==ActionEvents.EVCLOSERTSPSOCKET: conn.close() conn=None break if conn is not None: conn.send(RTSPVERSION+' '+responseCode+MESSAGESEP+temp[1]+MESSAGESEP+"Session: "+str(self.csession)+otherData) except socket.error: print "\n" traceback.print_exc(file=sys.stdout) except: print "\n" traceback.print_exc(file=sys.stdout)
def __init__(self): """ Constructor """ global SVM_PATH self.my_x = 0 self.my_y = 0 self.my_theta = 0 self.other_x = 0 self.other_y = 0 self.other_theta = 0 self.speed = 0 rospy.init_node('g3') self.i2c = I2CHandler() self.vs = VideoStream().start() self.posData = [[-1, -1], [-1, -1], [-1, -1], [-1, -1], [ -1, -1 ]] #-1 represents no data --> from the start no data has received self.followingData = [ -1, -1, -1, -1, -1 ] #-1 represents leader --> from start everyone is the leader self.fanOutData = [-1, -1, -1, -1, -1] #-1 represents no data has received self.laneToGoTo = -1 self.fanOutFlag = False self.foundLaneTime = -1 self.robot_follower = RobotFollower(100, k_p=0.5) #self.serial = SerialHandler('/dev/ttyUSB0', 9600) self.turnC = TurnCheck([640, 480], "segImage.png") #self.gps_other = GpsClient(self.position_callback_other, color='green') self.heartbeat = Heartbeat(self.heartbeat_callback, self.platoon_pos_callback, self.fan_out_callback, self.lane_change_callback) self.gps = GpsClient(self.position_callback) self.image_recognizer = ImageRecognizer(SVM_PATH) self.get_ultrasound() time.sleep(2)
def processRtspRequest(self, data): #Process RTSP request from the client. request = data.split('\n') #print(request) param = request[0] line = param.split(' ') seq = request[1].split(' ')[1] # addr = request[2] # List = param.split(' ') # seqList = seq.split(' ') # addrList = addr.split(' ') requestType = line[0] if requestType == 'SETUP': self.clientInfo['videoStream'] = VideoStream(line[1]) self.clientInfo['seqNum'] = 0 self.clientInfo['rtpPort'] = request[2].split(' ')[3] self.state = OK self.clientInfo['session'] = randint(100000, 999999) # 随机生成session self.RtspResponse(seq) elif requestType == 'PLAY': if self.state == OK: self.state = PLAYING self.clientInfo['rtpSocket'] = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.RtspResponse(seq) self.clientInfo['event'] = threading.Event() self.clientInfo['worker'] = threading.Thread(target=self.sendRtp) self.clientInfo['worker'].start() #开始播放 elif requestType == 'PAUSE': if self.state == PLAYING: self.state = OK self.clientInfo['event'].set() #停止播放 self.RtspResponse(seq) elif requestType == 'TEARDOWN': self.clientInfo['event'].set() self.RtspResponse(seq) self.clientInfo['rtpSocket'].close()
def processRtspRequest(self, data): """Process RTSP request sent from the client.""" # Get the request type request = data.split('\n') line1 = request[0].split(' ') requestType = line1[0] # Get the media file DBid filename = line1[1] # Get the RTSP sequence number seq = request[1].split(' ') # Process SETUP request if requestType == self.SETUP: if self.state == self.INIT: # Update state print("processing SETUP") try: absName = filename[:filename.__len__() - (filename.split(".")[filename.split(".").__len__() - 1].__len__() + 1)] self.clientInfo['videoStream'] = VideoStream("./contents/" + filename)# it must be a path self.waveFile = wave.open('./contents/' + absName + '.wav', 'rb') self.state = self.READY except IOError: self.replyRtsp(self.FILE_NOT_FOUND_404, seq[1]) # Generate a randomized RTSP session ID self.clientInfo['session'] = randint(100000, 999999) # Send RTSP reply self.replyRtsp(self.OK_200, seq[1]) # Get the RTP/UDP port from the last line self.clientInfo['rtpPort'] = request[2].split(' ')[3] # Process PLAY request elif requestType == self.PLAY: if self.state == self.READY: print("processing PLAY") self.state = self.PLAYING # Create a new socket for RTP/UDP self.clientInfo["rtpSocket"] = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.clientInfo["waveSocket"] = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.replyRtsp(self.OK_200, seq[1]) # Create a new thread and start sending RTP packets self.clientInfo['event'] = threading.Event() self.clientInfo['worker']= threading.Thread(target=self.sendRtp) self.clientInfo['wave'] = threading.Thread(target=self.sendWave) self.clientInfo['wave'].start() self.clientInfo['worker'].start() # Process PAUSE request elif requestType == self.PAUSE: if self.state == self.PLAYING: print("processing PAUSE") self.state = self.READY self.clientInfo['event'].set() self.replyRtsp(self.OK_200, seq[1]) # Process TEARDOWN request elif requestType == self.TEARDOWN: print("processing TEARDOWN") self.clientInfo['event'].set() self.replyRtsp(self.OK_200, seq[1]) # Close the RTP socket self.clientInfo['rtpSocket'].close() self.mainSock.send(pickle.dumps("out," + str(self.agentSeqNum)))
import cv2 import imutils import numpy as np from VideoStream import VideoStream from KeyClipWrite import KeyClipWriter outputfile = "Ash" write = None vs = VideoStream(src=0).start() fourcc = "MJPG" fps = 20 (h, w) = (None, None) #kcw=KeyClipWriter(32,1.0) #frame=vs.read() #frame=imutils.resize(frame,width=600) #kcw.update(frame) while True: frame = vs.read() frame = imutils.resize(frame, width=300) if write is None: out = "{}.avi".format(outputfile) (h, w) = frame.shape[:2] write = cv2.VideoWriter(out, cv2.VideoWriter_fourcc(*fourcc), fps, (w * 2, h * 2), True) zeros = np.zeros(frame.shape[:2], dtype="uint8") (B, G, R) = cv2.split(frame) R = cv2.merge([zeros, zeros, R]) G = cv2.merge([zeros, G, zeros]) B = cv2.merge([B, zeros, zeros])
def processRtspRequest(self, data): """Process RTSP request sent from the client.""" # Get the request type request = data.split('\n') line1 = request[0].split(' ') requestType = line1[0] # Get the media file name filename = line1[1] # Get the RTSP sequence number seq = request[1].split(' ') #print("abcbbcaafhkjsafsa") # Process SETUP request if requestType == self.SETUP: #print("metvc") if self.state == self.INIT: # Update state print("processing SETUP\n") # Generate a randomized RTSP session ID self.clientInfo['session'] = randint(100000, 999999) try: self.clientInfo['videoStream'] = VideoStream(filename) self.state = self.READY except IOError: self.replyRtsp(self.FILE_NOT_FOUND_404, seq[1]) # Send RTSP reply self.replyRtsp(self.OK_200, seq[1]) # Get the RTP/UDP port from the last line self.clientInfo['rtpPort'] = request[2].split(' ')[3] self.countRTPSent = 0 # Process PLAY request elif requestType == self.PLAY: if self.state == self.READY: print("processing PLAY\n") self.state = self.PLAYING # Create a new socket for RTP/UDP self.clientInfo["rtpSocket"] = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.replyRtsp(self.OK_200, seq[1]) # Create a new thread and start sending RTP packets self.clientInfo['event'] = threading.Event() self.clientInfo['worker'] = threading.Thread(target=self.sendRtp) self.clientInfo['worker'].start() # Process PAUSE request elif requestType == self.PAUSE: if self.state == self.PLAYING: print("processing PAUSE\n") self.state = self.READY self.clientInfo['event'].set() self.replyRtsp(self.OK_200, seq[1]) line4 = self.countRTPSent - eval(request[3].split(' ')[1]) print("=========================\nRTP packet loss rate: " + str(line4) + '/' + str(self.countRTPSent) + "\n=========================\n") # Process TEARDOWN request elif requestType == self.TEARDOWN: self.state = self.INIT print("processing TEARDOWN\n") self.clientInfo['event'].set() self.replyRtsp(self.OK_200, seq[1]) # Close the RTP socket self.clientInfo['rtpSocket'].close() elif requestType == self.DESCRIBE: print("processing DESCRIBE\n") # self.clientInfo['event'].set() self.replyRtsp(self.OK_200, seq[1], 'describe')
def processRtspRequest(self, data): """Process RTSP request sent from the client.""" # Get the request type request = data.split('\n') line1 = request[0].split(' ') requestType = line1[0] # Get the media file name filename = line1[1] # Get the RTSP sequence number seq = request[1].split(' ') # Process SETUP request if requestType == self.SETUP: if self.state == self.INIT: # Update state print("processing SETUP\n") try: self.clientInfo['videoStream'] = VideoStream(filename) self.state = self.READY except IOError: self.replyRtsp(self.FILE_NOT_FOUND_404, seq[0]) # Generate a randomized RTSP session ID self.clientInfo['session'] = randint(100000, 999999) # Send RTSP reply self.replyRtsp(self.OK_200, seq[0]) # Get the RTP/UDP port from the last line self.clientInfo['rtpPort'] = request[2].split(' ')[3] # Process PLAY request elif requestType == self.PLAY: if self.state == self.READY: print("processing PLAY\n") self.state = self.PLAYING # Create a new socket for RTP/UDP self.clientInfo["rtpSocket"] = socket.socket( socket.AF_INET, socket.SOCK_DGRAM) self.replyRtsp(self.OK_200, seq[0]) # Create a new thread and start sending RTP packets self.clientInfo['event'] = threading.Event() self.clientInfo['worker'] = threading.Thread( target=self.sendRtp) self.clientInfo['worker'].start() # Process PAUSE request elif requestType == self.PAUSE: if self.state == self.PLAYING: print("processing PAUSE\n") self.state = self.READY try: self.clientInfo['event'].set() except: sys.exit(0) self.replyRtsp(self.OK_200, seq[0]) # Process TEARDOWN request elif requestType == self.TEARDOWN: print("processing TEARDOWN\n") try: self.clientInfo['event'].set() except: pass self.replyRtsp(self.OK_200, seq[0]) # Close the RTP socket try: self.clientInfo['rtpSocket'].close() except: pass
def processRtsp(self,receivedRtspDataJSON): """Handle received RTSP request""" # json tp dict receivedRtspData = json.loads(receivedRtspDataJSON) self.action = receivedRtspData['action'] # SETUP module if self.action == self.SETUP and self.state == self.INIT: self.fileName = receivedRtspData['fileName'] self.rtspVersion = receivedRtspData['rtspVersion'] self.CSeq = receivedRtspData['CSeq'] self.transportProtocol = receivedRtspData['transportProtocol'] self.rtpPort = receivedRtspData['rtpPort'] # create a session self.session = randint(0, 65536) try: self.file = VideoStream(self.fileName) # set frame number to 0 self.frameNbr = 0 # set rtpError to 0 self.rtpError = 0 # change state on server self.state = self.READY # send rtsp/tcp reply self.replyRtsp(200) print "ServerWorker SETUP" except IOError: self.replyRtsp(404) # PLAY module elif self.action == self.PLAY and self.state == self.READY: self.fileName = receivedRtspData['fileName'] self.rtspVersion = receivedRtspData['rtspVersion'] self.CSeq = receivedRtspData['CSeq'] self.session = receivedRtspData['session'] # change state on server self.state = self.PLAYING # connect RTP/UDP socket self.rtpScoket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.playEvent = threading.Event() self.playEvent.clear() self.rtpThread = threading.Thread(target=self.rtpConnect) self.rtpThread.start() # send rtsp/tcp reply self.replyRtsp(200) print "ServerWorker PLAY" # PAUSE module elif self.action == self.PAUSE and self.state == self.PLAYING: self.fileName = receivedRtspData['fileName'] self.rtspVersion = receivedRtspData['rtspVersion'] self.CSeq = receivedRtspData['CSeq'] self.session = receivedRtspData['session'] # change state on server self.state = self.READY # stop playEvent self.playEvent.set() # send rtsp/tcp reply self.replyRtsp(200) # Added line to prevent ghost sockets self.rtpScoket.close() print "ServerWorker PAUSE" # TEARDOWN module elif self.action == self.TEARDOWN: self.fileName = receivedRtspData['fileName'] self.rtspVersion = receivedRtspData['rtspVersion'] self.CSeq = receivedRtspData['CSeq'] self.session = receivedRtspData['session'] # flag of RTP/UDP socket flag = 1 try: # stop playEvent self.playEvent.set() except: flag = 0 print 'Attention! No playback thread was created before teardown!' # send rtsp/tcp reply self.replyRtsp(200) # Close the RTP/UDP socket if flag == 1: self.rtpScoket.close() print "ServerWorker TEARDOWN" # DESCRIBE module elif self.action == self.DESCRIBE: self.fileName = receivedRtspData['fileName'] self.rtspVersion = receivedRtspData['rtspVersion'] self.CSeq = receivedRtspData['CSeq'] self.session = receivedRtspData['session'] # change state on server self.state = self.READY # stop playEvent self.playEvent.set() # send rtsp/tcp reply self.replyDescribeRtsp(200) # Added line to prevent ghost sockets self.rtpScoket.close() print "ServerWorker DESCRIBE"
from picamera import PiCamera from WebcamVideoStream import WebcamVideoStream from VideoStream import VideoStream import imutils from threading import Thread import cv2 from imutils.video import VideoStream import datetime import argparse from Stepper import Stepper ap = argparse.ArgumentParser() ap.add_argument("-p", "--picamera", type=int, default=-1, help="whether the PiCamera being used") args = vars(ap.parse_args()) vs = VideoStream(usePiCamera=args["picamera"] > 0).start() time.sleep(2.0) # Creating a VideoCapture object while also specifying which camera will be used for the purpose # of capturing the video using hte input parameter 0. cap = cv2.VideoCapture(0) while True: frame = vs.read() frame = imutils.resize(frame, width=400) # Converting captured frame to monochrome gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Blurring the image using the GaussianBlur() method of the opencv object blur = cv2.GaussianBlur(gray, (9, 9), 0)
def processRtspRequest(self, data): """Process RTSP request sent from the client.""" # Get the request type request = data.split('\n') #line1 = request[0].split(' ') #requestType = line1[0] requestType, url, ver = re.split(r'\s+', request[0].strip()) # Get the media file name #filename = line1[1] hdr = {} for r in request[1:]: if len(r.strip()) == 0: continue arr = r.split(":") if len(arr) == 2: hdr[arr[0].strip().upper()] = arr[1].strip() else: print "not ok:", r, arr # Get the RTSP sequence number #Cseq=hdr['CSEQ'] # Process SETUP request if requestType == self.OPTIONS: if self.state == self.INIT: print "processing OPTIONS\n" self.replyOption(self.OK_200, hdr) elif requestType == self.DESCRIBE: if self.state == self.INIT: print "processing DESCRIBE\n" # Generate a randomized RTSP session ID self.clientInfo['session'] = randint(100000, 999999) v = VideoStream() self.clientInfo['videoStream'] = v sdp = v.getSdp() self.replyDescribe(self.OK_200, hdr, sdp) print "processing DESCRIBE Done\n" elif requestType == self.SETUP: if self.state == self.INIT: # Update state print "processing SETUP\n" self.state = self.READY s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.bind(('', 0)) self.udpPort = s.getsockname()[1] self.clientInfo["rtpSocket"] = s self.clientInfo['rtpPort'] = int( hdr['TRANSPORT'].split(';')[2].split('=')[1].split("-")[0]) # Send RTSP reply self.replySetup(self.OK_200, hdr) print "processing SETUP Done\n" # Process PLAY request elif requestType == self.PLAY: if self.state == self.READY: print "processing PLAY\n" self.state = self.PLAYING # Create a new socket for RTP/UDP #self.clientInfo["rtpSocket"] = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.replyPlay(self.OK_200, hdr) print "processing PLAY Done\n" # Create a new thread and start sending RTP packets self.clientInfo['event'] = threading.Event() self.clientInfo['worker'] = threading.Thread( target=self.sendRtp) self.clientInfo['worker'].start() # Process PAUSE request elif requestType == self.PAUSE: if self.state == self.PLAYING: print "processing PAUSE\n" self.state = self.READY self.clientInfo['event'].set() self.replyPause(self.OK_200, hdr) print "processing PAUSE Done\n" # Process TEARDOWN request elif requestType == self.TEARDOWN: print "processing TEARDOWN\n" self.clientInfo['event'].set() self.replyTeardown(self.OK_200, hdr) print "processing TEARDOWN Done\n" # Close the RTP socket self.clientInfo['rtpSocket'].close()
from VideoStream import VideoStream from FaceFilters import FaceFilters from faceTk import GUIFace import time filters = ['glasses.png', 'sunglasses.png', 'sunglasses1.png', 'sunglasses2.png', \ 'dog.png', 'rabbit.png','moustache.png', 'moustache1.png', 'ironman.png', 'capAmerica.png'] vs = VideoStream(0).start() fc = FaceFilters(filters) time.sleep(2.0) gui = GUIFace(vs, fc, 'output')
def set_Video_Source(self, newVideoPath): if self.verbose: logging.info('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '()') retVal = False realVideoPath = newVideoPath if self.videoPath == newVideoPath and self._videoSourceState == CaptureDeviceState.Running: return True if self.imageStreamHandler != None: statusMsg = '{{\"DeviceStatus\":\"Connecting to {}\",\"isSuccess\":{}}}'.format( self._remove_credential(newVideoPath), 1) self.imageStreamHandler.submit_write(statusMsg) self._videoSourceState = CaptureDeviceState.Stop if self._capture_in_progress: # wait for queue to drain and loop to exit time.sleep(1.0) self._capture_in_progress = False self._set_Video_Source_Type(realVideoPath) if self._videoSourceType == CaptureDevice.Unknown: self._videoSourceState = CaptureDeviceState.ErrorNotSupported logging.error('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '() : Unsupported Video Source {}'.format( self._videoSourceType)) else: self._videoSourceState = CaptureDeviceState.Init if self._videoSourceType == CaptureDevice.Hololens: strHololens = realVideoPath.split('?') # disable audio realVideoPath = '{}?holo=true&pv=true&mic=false&loopback=false'.format( strHololens[0]) self.videoStream = VideoStream(videoCapture=self, path=realVideoPath) fps_override = 30 if not self.videoStream.videoCapture == None: # get resolution cameraH1 = int( self.videoStream.videoCapture.get( cv2.CAP_PROP_FRAME_HEIGHT)) cameraW1 = int( self.videoStream.videoCapture.get( cv2.CAP_PROP_FRAME_WIDTH)) cameraFPS1 = int( self.videoStream.videoCapture.get(cv2.CAP_PROP_FPS)) if self._videoSourceType == CaptureDevice.Webcam: if not cameraH1 == self._displayH: self.videoStream.videoCapture.set( cv2.CAP_PROP_FRAME_HEIGHT, self._displayH) if not cameraW1 == self._displayW: self.videoStream.videoCapture.set( cv2.CAP_PROP_FRAME_WIDTH, self._displayW) elif self._videoSourceType == CaptureDevice.Rtsp: if not cameraH1 == self._displayH: self.videoStream.videoCapture.set( cv2.CAP_PROP_FRAME_HEIGHT, self._displayH) if not cameraW1 == self._displayW: self.videoStream.videoCapture.set( cv2.CAP_PROP_FRAME_WIDTH, self._displayW) elif self._videoSourceType == CaptureDevice.Hololens: holo_w = 1280 holo_h = 720 if 'live_med.mp4' in realVideoPath: holo_w = 854 holo_h = 480 elif 'live_low.mp4' in realVideoPath: holo_w = 428 holo_h = 240 fps_override = 15 self.videoStream.videoCapture.set( cv2.CAP_PROP_FRAME_HEIGHT, holo_h) self.videoStream.videoCapture.set(cv2.CAP_PROP_FRAME_WIDTH, holo_w) self.videoStream.videoCapture.set(cv2.CAP_PROP_FPS, fps_override) self._cameraH = int( self.videoStream.videoCapture.get( cv2.CAP_PROP_FRAME_HEIGHT)) self._cameraW = int( self.videoStream.videoCapture.get( cv2.CAP_PROP_FRAME_WIDTH)) self._cameraFPS = int( self.videoStream.videoCapture.get(cv2.CAP_PROP_FPS)) logging.info( '===============================================================' ) logging.info( 'Setting Video Capture with the following parameters:') logging.info(' - Video Source Type : {}'.format( self._videoSourceType)) logging.info(' - Display Resolution : {} x {}'.format( self._displayW, self._displayH)) logging.info(' Original : {} x {} @ {}'.format( cameraW1, cameraH1, cameraFPS1)) logging.info(' New : {} x {} @ {}'.format( self._cameraW, self._cameraH, self._cameraFPS)) logging.info( '===============================================================' ) if self.videoStream.start(): self._videoSourceState = CaptureDeviceState.Running retVal = True else: self._videoSourceState = CaptureDeviceState.ErrorRead else: if self._videoSourceType == CaptureDevice.Hololens or self._videoSourceType == CaptureDevice.Rtsp: url_parsed = urlparse(realVideoPath) if url_parsed.password != None or url_parsed.username != None: url_parsed = url_parsed._replace( netloc="{}".format(url_parsed.hostname)) ipAddress = url_parsed.netloc ping_ret = subprocess.call( ['ping', '-c', '5', '-W', '3', ipAddress], stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w')) if ping_ret == 0: self._videoSourceState = CaptureDeviceState.ErrorOpen else: self._videoSourceState = CaptureDeviceState.ErrorNoNetwork logging.error('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '() : Failed to open Video Capture') self.videoPath = realVideoPath if retVal == False: self.set_Wallpaper(self._frame_wp_no_video) else: self._videoReadyEvent.set() self.sendCurrentVideoPath(realVideoPath) return retVal
class ServerWorker: def __init__(self,clientInfo): self.clientInfo=clientInfo self.csession=0 self.videoStream=None self.rtpPort=None self.rtpSocket=None self.sendRtpThread=None self.clientAddr=None def run(self): threading.Thread(target=self.recvRtspRequest).start() def recvRtspRequest(self): responseCode=RESPONSE_OK conn,(address,port)=self.clientInfo['rtspSocket'] spath=os.path.dirname(os.path.realpath(sys.argv[0])) while True: try: data,tail=conn.recvfrom(RTSPBUFFERSIZE) if data: printLogToConsole(data) temp=data.split(MESSAGESEP) eventType=self.getEventTypeFromRTSP(temp) otherData='' if eventType==ActionEvents.EVSTEPUP: self.csession=''.join(str(uuid.uuid1()).split('-')) try: vFileName=spath+"\\"+self.getVidoeFileNameFromRTSP(temp) self.videoStream=VideoStream(vFileName) self.rtpPort=self.getRtpPortFromRTSP(temp) self.clientAddr=address otherData=MESSAGESEP+"Vsize: "+str(os.stat(vFileName).st_size) except IOError: responseCode=RESPONSE_NOTFOUND elif eventType==ActionEvents.EVPLAY: if self.rtpSocket is None: self.rtpSocket=socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.sendRtpThread=threading.Event() self.sendRtpThread.clear() threading.Thread(target=self.sendRtp).start() elif eventType==ActionEvents.EVPAUSE: if self.sendRtpThread is not None: self.sendRtpThread.set()# signal to stop the thread elif eventType==ActionEvents.EVTEARDOWN: if self.sendRtpThread is not None: self.sendRtpThread.set() elif eventType==ActionEvents.EVCLOSERTSPSOCKET: conn.close() conn=None break if conn is not None: conn.send(RTSPVERSION+' '+responseCode+MESSAGESEP+temp[1]+MESSAGESEP+"Session: "+str(self.csession)+otherData) except socket.error: print "\n" traceback.print_exc(file=sys.stdout) except: print "\n" traceback.print_exc(file=sys.stdout) def getEventTypeFromRTSP(self,request): dataFrame=request[0].split() return dataFrame[0][:len(dataFrame[0])-1] def getVidoeFileNameFromRTSP(self,request): fileName=request[0].split()[1] return fileName def getRtpPortFromRTSP(self,request): dataFrame=request[2].split(';') return int(dataFrame[1].split()[1]) def sendRtp(self): while True: if self.sendRtpThread.isSet(): break time.sleep(0.05)# pause for 50 milliseconds try: vidata=self.videoStream.nextFrame() if vidata: rtpp=RtpPacket() rtpp.encode(2,0,0,0,self.videoStream.frameNbr(),0,26,6,vidata) self.rtpSocket.sendto(rtpp.getPacket(),(self.clientAddr,self.rtpPort)) else: printLogToConsole("end of stream") self.sendRtpThread.set() self.rtpSocket.sendto(RTSPVERSION+' '+RESPONSE_OK_END+MESSAGESEP+"TotalFrame: "+str(self.videoStream.frameNbr())+MESSAGESEP+"Session: "+str(self.csession),(self.clientAddr,self.rtpPort)) try: self.videoStream.file.close() except: printLogToConsole("close file error") break except: print "\n" traceback.print_exc(file=sys.stdout)
class ONNXRuntimeModelDeploy(ObjectDetection, ImageClassification): """Object Detection class for ONNX Runtime """ def __init__(self, manifest_dir, cam_type="video_file", cam_source="/sample_video/video.mp4", tu_flag_=False): # Default system params #ToDo make this twin property self.render = False # Application parameters self.img_width = 0 self.img_height = 0 self.cap_handle = None self.vs = None self.session = None self.cam_type = cam_type self.cam_source = cam_source self.video_handle = None self.twin_update_flag = tu_flag_ self.m_parser(manifest_dir) def m_parser(self, model_dir): m_file = open(model_dir + str("/cvexport.manifest")) data = json.load(m_file) # cvexport manifest prameters self.domain_type = str(data["DomainType"]) print("Domain Type:", self.domain_type) # default anchors if str(self.domain_type) == "ObjectDetection": objdet = ObjectDetection(data, model_dir, None) ret = self.model_inference(objdet, iot_hub_manager, 1) elif str(self.domain_type) == "Classification": imgcls = ImageClassification(data, model_dir) ret = self.model_inference(imgcls, iot_hub_manager, 0) else: print( "Error: No matching DomainType: Object Detection/Image Classificaiton \n" ) print("Exiting.....!!!! \n") sys.exit(0) if ret == 1: print("App finished running Inference...Exiting...!!!") sys.exit(1) #def predict(self, preprocessed_image): # inputs = np.array(preprocessed_image, dtype=np.float32)[np.newaxis,:,:,(2,1,0)] # RGB -> BGR # inputs = np.ascontiguousarray(np.rollaxis(inputs, 3, 1)) # start = time.time() # outputs = self.session.run(None, {self.input_name: inputs}) # end = time.time() # inference_time = end - start # return np.squeeze(outputs).transpose((1,2,0)), inference_time def create_video_handle(self, cam_type="video_file", cam_source="/sample_video/video.mp4"): global stream_handle print("cam_source:: " + cam_source + " cam_type :: " + cam_type) if cam_type == "video_file": video_dir = "sample_video" # By default video file name should be video.mp4/avi if os.path.exists(str(video_dir) + "/video.mp4"): #if cam_source: self.video_handle = str(str(video_dir) + "/video.mp4") elif os.path.exists(str(video_dir) + "/video.avi"): #if cam_source: self.video_handle = str(str(video_dir) + "/video.avi") else: print("\n ERROR: Camera source Not Found...!!!") print("\n Exiting inference...") sys.exit(0) elif cam_type == "rtsp_stream": if cam_source: self.video_handle = str(cam_source) print("settin cam_source to value :: " + cam_source) else: print("\n ERROR: Camera source Not Found...!!!") print("\n Exiting inference...") sys.exit(0) else: web_cam_found = False for i in range(4): if os.path.exists("/dev/video" + str(i)): web_cam_found = True break if web_cam_found: self.video_handle = "/dev/video" + str(i) else: print("\n Error: Input Camera device not found/detected") print("\n Exiting inference...") sys.exit(0) self.vs = VideoStream(self.video_handle).start() # Reading widht and height details self.img_width = int(self.vs.stream.get(cv2.CAP_PROP_FRAME_WIDTH)) self.img_height = int(self.vs.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)) stream_handle = True def model_inference(self, obj, iot_hub_manager, pp_flag): global stream_handle last_time = time.time() # Default video surce to usb_cam @ /dev/video0 # If can change it to video_file in twin updates # ***** Requirments to pass a video_file ***** # Video file should be .mp4/.avi extension with name of file a "video" Ex: video.mp4/avi # Video file should be an url link to a .zip folder self.create_video_handle(cam_type=self.cam_type, cam_source=self.cam_source) while self.vs.stream.isOpened(): if iot_hub_manager.setRestartCamera == True: iot_hub_manager.setRestartCamera = False if iot_hub_manager.model_url == None: model_folder = "./default_model" else: model_folder = iot_hub_manager.model_dst_folder #self.cap_handle.release() obj.session = None #RunOptions.terminate = True self.vs.stream.release() if (self.render): cv2.destroyAllWindows() if os.path.exists( str(model_folder) + str('/cvexport.manifest')): print("\n Reading cvexport.config file from model folder") config_file_dir = str(model_folder) #self.create_video_handle(iot_hub_manager.cam_type, iot_hub_manager.cam_source) self.__init__(config_file_dir, iot_hub_manager.cam_type, iot_hub_manager.cam_source, True) elif os.path.exists("./default_model/cvexport.manifest"): config_file_dir = "./default_model" print( "\n Reading cvexport.manifest file from default_model folder" ) #self.create_video_handle(iot_hub_manager.cam_type, iot_hub_manager.cam_source) self.__init__(config_file_dir, iot_hub_manager.cam_type, iot_hub_manager.cam_source, True) else: print( "\n ERROR: cvexport.manifest not found check root/model dir" ) print("\n Exiting inference....") sys.exit(0) #iot_hub_manager.setRestartCamera = False # Caputre frame-by-frame frame = self.vs.read() print(frame) if self.twin_update_flag: predictions, infer_time = obj.predict_image(frame) print(pp_flag) # if Object Detection if pp_flag: for d in predictions: x = int(d['boundingBox']['left'] * self.img_width) y = int(d['boundingBox']['top'] * self.img_height) w = int(d['boundingBox']['width'] * self.img_width) h = int(d['boundingBox']['height'] * self.img_height) x_end = x + w y_end = y + h start = (x, y) end = (x_end, y_end) out_label = str(d['tagName']) score = str(int(d['probability'] * 100)) print("Found label " + out_label + " with probability :: " + score) if 0.50 < d['probability']: frame = cv2.rectangle(frame, start, end, (100, 255, 100), 2) out_label = str(d['tagName']) score = str(int(d['probability'] * 100)) cv2.putText(frame, out_label, (x - 5, y), cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255), 2) #cv2.putText(frame, score, (x+w-50, y), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 0), 2) message = { "Label": out_label, "Confidence": score, "Position": [x, y, x_end, y_end], "TimeStamp": datetime.datetime.utcnow().strftime( "%Y-%m-%d %H:%M:%S") } # Send message to IoT Hub if iot_hub_manager is not None: last_time = iot_hub_manager.send_message_to_upstream( json.dumps(message), last_time) else: #Postprocessing for Classificaton model res = obj.postprocess(predictions) idx = np.argmax(res) frame = cv2.putText(frame, obj.labels[idx], (15, 15), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 0), 2) message = { "Label": obj.labels[idx], "TimeStamp": datetime.datetime.utcnow().strftime( "%Y-%m-%d %H:%M:%S") } # Send message to IoT Hub # ToDo send to module # ToDo set the frequncy from module twin if iot_hub_manager is not None: last_time = iot_hub_manager.send_message_to_upstream( json.dumps(message), last_time) cv2.putText(frame, 'FPS: {}'.format(1.0 / infer_time), (10, 40), cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 0, 255), 1) if self.render: # Displaying the image cv2.imshow("Inference results", frame) if cv2.waitKey(1) & 0xFF == ord('q'): break cv2.destroyAllWindows() # when everything done, release the capture self.vs.__exit__(None, None, None) return True
def processRtspRequest(self, data): """Process RTSP request sent from the client.""" # Get the request type request = data.split('\n') line1 = request[0].split(' ') self.requestType = line1[0] # Get the media file name self.filename = line1[1] # Get the RTSP sequence number seq = request[1].split(' ') # Process SETUP request if self.requestType == self.SETUP: if self.state == self.INIT or self.state == self.SWITCHING: # Update state print("processing SETUP\n") try: self.clientInfo['videoStream'] = VideoStream(self.filename) self.state = self.READY except IOError: self.replyRtsp(self.FILE_NOT_FOUND_404, seq[1]) # Generate a randomized RTSP session ID self.clientInfo['session'] = randint(100000, 999999) # Send RTSP reply self.replyRtsp(self.OK_200, seq[1]) # Get the RTP/UDP port from the last line self.clientInfo['rtpPort'] = request[2].split(' ')[3] # Initialize a frame counter self.frameCnt = 0 # Process DESCRIBE request elif self.requestType == self.DESCRIBE: print("processing DESCRIBE\n") self.replyRtsp(self.OK_200, seq[1]) # Process PLAY request elif self.requestType == self.PLAY: if self.state == self.READY: print("processing PLAY\n") self.state = self.PLAYING requestedFrame = int(request[3].split(' ')[1]) frameCnt = self.serverInfo[self.filename] if requestedFrame >= frameCnt: requestedFrame = frameCnt - 1 self.clientInfo['requestedFrame'] = requestedFrame # Create a new socket for RTP/UDP self.clientInfo['rtpSocket'] = socket.socket( socket.AF_INET, socket.SOCK_DGRAM) self.replyRtsp(self.OK_200, seq[1]) # Create a new thread and start sending RTP packets self.clientInfo['event'] = threading.Event() self.clientInfo['worker'] = threading.Thread( target=self.sendRtp) self.clientInfo['worker'].start() # Process PAUSE request elif self.requestType == self.PAUSE: if self.state == self.PLAYING: print("processing PAUSE\n") self.state = self.READY self.clientInfo['event'].set() self.replyRtsp(self.OK_200, seq[1]) # Process TEARDOWN request elif self.requestType == self.TEARDOWN: print("processing TEARDOWN\n") self.state = self.INIT self.clientInfo['event'].set() self.replyRtsp(self.OK_200, seq[1]) # Close the RTP socket self.clientInfo['rtpSocket'].close() # Process SWITCH request elif self.requestType == self.SWITCH: print("processing SWITCH\n") self.state = self.SWITCHING self.clientInfo['event'].set() self.replyRtsp(self.OK_200, seq[1]) # Close the RTP socket self.clientInfo['rtpSocket'].close() # Process GET_LIST request elif self.requestType == self.GET_LIST: print("processing GET_LIST\n") self.replyRtsp(self.OK_200, seq[1])
def processRtspRequest(self, data): """Process RTSP request sent from the client.""" # Get the request type request = data.split('\n') line1 = request[0].split(' ') requestType = line1[0] # Get the media file name filename = line1[1] # Get the RTSP sequence number seq = request[1].split(' ') # Process SETUP request if requestType == self.SETUP: if self.state == self.INIT: # Update state print("processing SETUP\n") try: self.clientInfo['videoStream'] = VideoStream(filename) self.state = self.READY except IOError: self.replyRtsp(self.FILE_NOT_FOUND_404, seq[1]) # Generate a randomized RTSP session ID self.clientInfo['session'] = randint(100000, 999999) # Send RTSP reply self.replyRtsp(self.OK_200, seq[1]) # Get the RTP/UDP port from the last line self.clientInfo['rtpPort'] = request[2].split(' ')[3] #Change # Process PLAY request elif requestType == self.PLAY: if self.state == self.READY: print("processing PLAY\n") self.state = self.PLAYING # Create a new socket for RTP/UDP self.clientInfo["rtpSocket"] = socket.socket( socket.AF_INET, socket.SOCK_DGRAM) self.replyRtsp(self.OK_200, seq[1]) # Create a new thread and start sending RTP packets self.clientInfo['event'] = threading.Event() self.clientInfo['worker'] = threading.Thread( target=self.sendRtp) self.clientInfo['worker'].start() # Process PAUSE request elif requestType == self.PAUSE: if self.state == self.PLAYING: print("processing PAUSE\n") self.state = self.READY self.clientInfo['event'].set() self.replyRtsp(self.OK_200, seq[1]) # Process STOP request elif requestType == self.STOP: print("processing STOP\n") if self.state == self.PLAYING: #note self.clientInfo['event'].set() self.replyRtsp(self.OK_200, seq[1]) # Close the RTP socket if self.state == self.PLAYING: #note self.clientInfo['rtpSocket'].close() elif requestType == self.DESCRIBE: print("processing DESCRIBE\n") file_path = 'information.txt' with open(file_path, 'w') as f: info = "v=0\n" + \ "m=video " + str(int(self.clientInfo['rtpPort'])) + " RTP/AVP " + "26\n" + \ "a=control:streamid=" + str(self.clientInfo['session']) + "\n" + \ "a=mimetype:string;\"video/MJPEG\"\n" general_info = "RTSP/1.0 200 OK\nCSeq: " + str(seq[1]) + "\n" + \ "Content-Base: " + filename + "\n" + \ "Content-Type: " + "application/sdp\n" + \ "Content-Length: " + str(len(info)) + "\n" general_info += info f.write(general_info) with open(file_path, "rb") as f: connSocket = self.clientInfo['rtspSocket'][0] l = f.read(1024) while (l): connSocket.send(l) l = f.read(1024) self.replyRtsp(self.OK_200, seq[1])
def processRtspRequest(self, data): """Process RTSP request sent from the client.""" # Get the request type request = data.split('\n') line1 = request[0].split(' ') requestType = line1[0] # Get the media file name filename = line1[1] # Get the RTSP sequence number seq = request[1].split(' ') # Process SETUP request if requestType == self.SETUP: if self.state == self.INIT: # Update state print "processing SETUP\n" try: self.clientInfo['videoStream'] = VideoStream(filename) self.state = self.READY except IOError: self.replyRtsp(self.FILE_NOT_FOUND_404, seq[1]) # Generate a randomized RTSP session ID self.clientInfo['session'] = randint(100000, 999999) # Send RTSP reply self.replyRtsp(self.OK_200, seq[1]) print "RTSP/1.0 200 OK\nCSeq: " + seq[1] + "\nSession: " + str( self.clientInfo['session']) # Get the RTP/UDP port from the last line self.clientInfo['rtpPort'] = request[2].split(' ')[3] # Process PLAY request elif requestType == self.PLAY: if self.state == self.READY: print "\nprocessing PLAY\n" self.state = self.PLAYING # Create a new socket for RTP/UDP self.clientInfo["rtpSocket"] = socket.socket( socket.AF_INET, socket.SOCK_DGRAM) self.replyRtsp(self.OK_200, seq[1]) print "\nRTSP/1.0 200 OK\nCSeq: " + seq[ 1] + "\nSession: " + str(self.clientInfo['session']) + "\n" # Create a new thread and start sending RTP packets self.clientInfo['event'] = threading.Event() self.clientInfo['worker'] = threading.Thread( target=self.sendRtp) self.clientInfo['worker'].start() # Process PAUSE request elif requestType == self.PAUSE: if self.state == self.PLAYING: self.state = self.READY print "\nprocessing PAUSE\n" self.clientInfo['event'].set() self.replyRtsp(self.OK_200, seq[0]) print "\nRTSP/1.0 200 OK\nCSeq: " + seq[ 0] + "\nSession: " + str(self.clientInfo['session']) + "\n" # Process TEARDOWN request elif requestType == self.TEARDOWN: print "\nprocessing TEARDOWN\n" self.clientInfo['event'].set() self.replyRtsp(self.OK_200, seq[0]) print "\nRTSP/1.0 200 OK\nCSeq: " + seq[0] + "\nSession: " + str( self.clientInfo['session']) + "\n" # Close the RTP socket self.clientInfo['rtpSocket'].close() #calc and display avg. jitter frameNumber = self.clientInfo['videoStream'].frameNbr() rate = float(self.jitcount) / frameNumber print("Jitter: %.6f" % rate) + " s"
import cv2 from imutils.video import VideoStream import datetime import argparse from Stepper import Stepper from Servo import Servo ap = argparse.ArgumentParser() ap.add_argument("-p", "--picamera", type=int, default=-1, help="whether the PiCamera being used") args = vars(ap.parse_args()) vs = VideoStream(usePiCamera=args["picamera"] > 0).start() time.sleep(2.0) def main(argv): bright_values = {} # creates an array of the brightest values count = 0 while True: if count == 7: # looks for 7 different spots break frame = vs.read() # reads the frame from the OpenCV object frame = imutils.resize( frame, width=400
class VideoCapture(object): def __init__(self, videoPath="", verbose=True, displayW=1920, displayH=1080, fontScale=1.0, inference=True, confidenceLevel=0.5): self.verbose = verbose self._debug = False self.videoPath = videoPath self._videoSourceType = CaptureDevice.Unknown self._videoSourceState = CaptureDeviceState.Unknown self.videoStream = None self._videoReadyEvent = Event() self._capture_in_progress = False # Display Resolution # Will try to set camera's resolution to the specified resolution self._displayW = displayW self._displayH = displayH self._cameraW = 0 self._cameraH = 0 # Camera's FPS self._cameraFPS = 30 # Font Scale for putText self._fontScale = float(fontScale) # turn inference on/off self.runInference = inference # confidence level threshold self.confidenceLevel = confidenceLevel # various frame data # frame data for UI self._displayFrame = None # wallpapers for UI self._frame_wp_init_system = cv2.imread( "./www/WP-InitializingSystem.png") self._frame_wp_no_video = cv2.imread("./www/WP-NoVideoData.png") self._frame_wp_init_iothub = cv2.imread( "./www/WP-InitializeIotHub.png") if self.verbose: logging.info('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '()') logging.info( '===============================================================') logging.info( 'Initializing Video Capture with the following parameters:') logging.info(' - OpenCV Version : {}'.format(cv2.__version__)) logging.info(' - Video path : {}'.format(self.videoPath)) logging.info(' - Display Resolution : {} x {}'.format( self._displayW, self._displayH)) logging.info(' - Font Scale : {}'.format(self._fontScale)) logging.info(' - Inference? : {}'.format(self.runInference)) logging.info(' - ConfidenceLevel : {}'.format( self.confidenceLevel)) logging.info( '===============================================================') # set wallpaper self.set_Wallpaper(self._frame_wp_init_system) # set FPS self.fps = FPS() self.imageStreamHandler = None # Start Web Server for View self.imageServer = ImageServer(80, self) self.imageServer.start() # Set Video Source self.set_Video_Source(self.videoPath) self.set_Wallpaper(cv2.imread("./www/WP-InitializeAIEngine.png")) # logging.info('Yolo Inference Initializing\r\n') self.yoloInference = YoloInference(self._fontScale, sendMessage=False) # logging.info('Yolo Inference Initialized\r\n') def __enter__(self): if self.verbose: logging.info('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '()') # self.set_Video_Source(self.videoPath) return self def videoStreamReadTimeoutHandler(self, signum, frame): raise Exception("VideoStream Read Timeout") # # Video Source Management # def _set_Video_Source_Type(self, videoPath): if self.verbose: logging.info('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '() : {}'.format(videoPath)) self._reset_Video_Source() if '/dev/video' in videoPath.lower(): self._videoSourceType = CaptureDevice.Webcam elif 'rtsp:' in videoPath.lower(): self._videoSourceType = CaptureDevice.Rtsp elif '/api/holographic/stream' in videoPath.lower(): self._videoSourceType = CaptureDevice.Hololens if self.verbose: logging.info('<< ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '() : {}'.format(self._videoSourceType)) def _get_Video_Source_Type(self, videoPath): videoType = CaptureDevice.Unknown if self.verbose: logging.info('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '() : {}'.format(videoPath)) if '/dev/video' in videoPath.lower(): videoType = CaptureDevice.Webcam elif 'rtsp:' in videoPath.lower(): videoType = CaptureDevice.Rtsp elif '/api/holographic/stream' in videoPath.lower(): videoType = CaptureDevice.Hololens return videoType # # Resets video capture/stream settings # def _reset_Video_Source(self): if self.verbose: logging.info('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '()') if self.videoStream: self.videoStream.stop() # self.videoStream.close() # self.videoStream = None self._videoSourceType = CaptureDevice.Unknown self._videoSourceState = CaptureDeviceState.Unknown def set_Video_Source(self, newVideoPath): if self.verbose: logging.info('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '()') retVal = False realVideoPath = newVideoPath if self.videoPath == newVideoPath and self._videoSourceState == CaptureDeviceState.Running: return True if self.imageStreamHandler != None: statusMsg = '{{\"DeviceStatus\":\"Connecting to {}\",\"isSuccess\":{}}}'.format( self._remove_credential(newVideoPath), 1) self.imageStreamHandler.submit_write(statusMsg) self._videoSourceState = CaptureDeviceState.Stop if self._capture_in_progress: # wait for queue to drain and loop to exit time.sleep(1.0) self._capture_in_progress = False self._set_Video_Source_Type(realVideoPath) if self._videoSourceType == CaptureDevice.Unknown: self._videoSourceState = CaptureDeviceState.ErrorNotSupported logging.error('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '() : Unsupported Video Source {}'.format( self._videoSourceType)) else: self._videoSourceState = CaptureDeviceState.Init if self._videoSourceType == CaptureDevice.Hololens: strHololens = realVideoPath.split('?') # disable audio realVideoPath = '{}?holo=true&pv=true&mic=false&loopback=false'.format( strHololens[0]) self.videoStream = VideoStream(videoCapture=self, path=realVideoPath) fps_override = 30 if not self.videoStream.videoCapture == None: # get resolution cameraH1 = int( self.videoStream.videoCapture.get( cv2.CAP_PROP_FRAME_HEIGHT)) cameraW1 = int( self.videoStream.videoCapture.get( cv2.CAP_PROP_FRAME_WIDTH)) cameraFPS1 = int( self.videoStream.videoCapture.get(cv2.CAP_PROP_FPS)) if self._videoSourceType == CaptureDevice.Webcam: if not cameraH1 == self._displayH: self.videoStream.videoCapture.set( cv2.CAP_PROP_FRAME_HEIGHT, self._displayH) if not cameraW1 == self._displayW: self.videoStream.videoCapture.set( cv2.CAP_PROP_FRAME_WIDTH, self._displayW) elif self._videoSourceType == CaptureDevice.Rtsp: if not cameraH1 == self._displayH: self.videoStream.videoCapture.set( cv2.CAP_PROP_FRAME_HEIGHT, self._displayH) if not cameraW1 == self._displayW: self.videoStream.videoCapture.set( cv2.CAP_PROP_FRAME_WIDTH, self._displayW) elif self._videoSourceType == CaptureDevice.Hololens: holo_w = 1280 holo_h = 720 if 'live_med.mp4' in realVideoPath: holo_w = 854 holo_h = 480 elif 'live_low.mp4' in realVideoPath: holo_w = 428 holo_h = 240 fps_override = 15 self.videoStream.videoCapture.set( cv2.CAP_PROP_FRAME_HEIGHT, holo_h) self.videoStream.videoCapture.set(cv2.CAP_PROP_FRAME_WIDTH, holo_w) self.videoStream.videoCapture.set(cv2.CAP_PROP_FPS, fps_override) self._cameraH = int( self.videoStream.videoCapture.get( cv2.CAP_PROP_FRAME_HEIGHT)) self._cameraW = int( self.videoStream.videoCapture.get( cv2.CAP_PROP_FRAME_WIDTH)) self._cameraFPS = int( self.videoStream.videoCapture.get(cv2.CAP_PROP_FPS)) logging.info( '===============================================================' ) logging.info( 'Setting Video Capture with the following parameters:') logging.info(' - Video Source Type : {}'.format( self._videoSourceType)) logging.info(' - Display Resolution : {} x {}'.format( self._displayW, self._displayH)) logging.info(' Original : {} x {} @ {}'.format( cameraW1, cameraH1, cameraFPS1)) logging.info(' New : {} x {} @ {}'.format( self._cameraW, self._cameraH, self._cameraFPS)) logging.info( '===============================================================' ) if self.videoStream.start(): self._videoSourceState = CaptureDeviceState.Running retVal = True else: self._videoSourceState = CaptureDeviceState.ErrorRead else: if self._videoSourceType == CaptureDevice.Hololens or self._videoSourceType == CaptureDevice.Rtsp: url_parsed = urlparse(realVideoPath) if url_parsed.password != None or url_parsed.username != None: url_parsed = url_parsed._replace( netloc="{}".format(url_parsed.hostname)) ipAddress = url_parsed.netloc ping_ret = subprocess.call( ['ping', '-c', '5', '-W', '3', ipAddress], stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w')) if ping_ret == 0: self._videoSourceState = CaptureDeviceState.ErrorOpen else: self._videoSourceState = CaptureDeviceState.ErrorNoNetwork logging.error('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '() : Failed to open Video Capture') self.videoPath = realVideoPath if retVal == False: self.set_Wallpaper(self._frame_wp_no_video) else: self._videoReadyEvent.set() self.sendCurrentVideoPath(realVideoPath) return retVal def get_display_frame(self): return self.displayFrame def set_status(self, device_status): self._videoSourceState = device_status if self._videoSourceState != CaptureDeviceState.Running: self.sendCurrentVideoPath("") def sendCurrentVideoPath(self, videoPath): if videoPath == "": video_path = self._remove_credential(self.videoPath) else: video_path = self._remove_credential(videoPath) logging.info('>> Current Video Status {}'.format( self._videoSourceState)) if self.imageStreamHandler != None: if self._videoSourceState == CaptureDeviceState.Running: strUserName = "" strPassword = "" videoType = self._get_Video_Source_Type(videoPath) if videoType == CaptureDevice.Rtsp or videoType == CaptureDevice.Hololens: url_parsed = urlparse(videoPath) if url_parsed.password != None: strPassword = url_parsed.password if url_parsed.username != None: strUserName = url_parsed.username statusMsg = '{{\"DevicePath\":\"{}\",\"isSuccess\":{},\"UserName\":\"{}\",\"Password\":\"{}\"}}'.format( video_path, 1, strUserName, strPassword) else: statusMsg = '{{\"DeviceStatus\":\"Error ({}): {}\",\"isSuccess\":{},\"UserName\":\"\",\"Password\":\"\"}}'.format( self._videoSourceState, video_path, 0) self.imageStreamHandler.submit_write(statusMsg) def setVideoPathFromUI(self, json_Data): videoPath = "" json_Data = json.loads(json_Data) logging.info('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '() : {}'.format(json_Data["VideoPath"])) logging.info('>> {}'.format(json_Data["VideoPath"])) logging.info('>> {}'.format(json_Data["UserName"])) logging.info('>> {}'.format(json_Data["Password"])) videoType = self._get_Video_Source_Type(json_Data["VideoPath"]) if videoType == CaptureDevice.Webcam: videoPath = json_Data["VideoPath"].strip() elif videoType == CaptureDevice.Rtsp or videoType == CaptureDevice.Hololens: url_parsed = urlparse(json_Data["VideoPath"].strip()) if '@' in url_parsed.netloc or len(json_Data["UserName"]) == 0: # already contains password or user name not specified videoPath = json_Data["VideoPath"] else: url_parsed = url_parsed._replace(netloc='{}:{}@{}'.format( json_Data["UserName"], json_Data["Password"], url_parsed.netloc)) videoPath = url_parsed.geturl() self.set_Video_Source(videoPath) def _remove_credential(self, videoPath): logging.info('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '()') ret_Path = "" videoType = self._get_Video_Source_Type(videoPath) if videoType == CaptureDevice.Webcam: ret_Path = videoPath elif videoType == CaptureDevice.Rtsp or videoType == CaptureDevice.Hololens: url_parsed = urlparse(videoPath) if url_parsed.password != None or url_parsed.username != None: url_parsed = url_parsed._replace( netloc="{}".format(url_parsed.hostname)) ret_Path = url_parsed.geturl() return ret_Path def set_Wallpaper(self, image): if self.verbose: logging.info('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '()') self.displayFrame = cv2.imencode('.jpg', image)[1].tobytes() def start(self): if self.verbose: logging.info('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '()') while True: if self._videoSourceState == CaptureDeviceState.Running: self._capture_in_progress = True self.__Run__() self._capture_in_progress = False else: if self._videoSourceState == CaptureDeviceState.ErrorOpen or self._videoSourceState == CaptureDeviceState.ErrorRead: self.set_Wallpaper(self._frame_wp_no_video) if self._videoSourceType == CaptureDevice.Unknown: if self._debug: logging.info('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '() : Unknown Device') time.sleep(1.0) else: if self._debug: logging.info('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '() : Device Not Running') # time.sleep(1.0) logging.info('>> Video Ready Event Enter ---------------') self._videoReadyEvent.wait() logging.info('<< Video Ready Event Exit ---------------') self._videoReadyEvent.clear() def __Run__(self): if self.verbose: logging.info( '===============================================================' ) logging.info('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '()') # Check camera's FPS if self._cameraFPS == 0: logging.error('Error : Could not read FPS') # raise Exception("Unable to acquire FPS for Video Source") return logging.info('>> Frame rate (FPS) : {}'.format(self._cameraFPS)) logging.info('>> Run Inference {}'.format(self.runInference)) perFrameTimeInMs = 1000 / self._cameraFPS self.fps.start() self.fps.reset() while True: # Get current time before we capture a frame tFrameStart = time.time() frame = np.array([]) captureRet = False if not self._videoSourceState == CaptureDeviceState.Running: break captureRet, frame = self.videoStream.read() if captureRet == False: self._videoSourceState = CaptureDeviceState.ErrorRead logging.error("ERROR : Failed to read from video source") break if frame.size > 0: # Run Object Detection if self.runInference: self.yoloInference.runInference(frame, self._cameraW, self._cameraH, self.confidenceLevel) # Calculate FPS currentFPS = self.fps.fps() if (currentFPS > self._cameraFPS): # Cannot go faster than Camera's FPS currentFPS = self._cameraFPS # Add FPS Text to the frame cv2.putText(frame, "FPS " + str(currentFPS), (10, int(30 * self._fontScale)), cv2.FONT_HERSHEY_SIMPLEX, self._fontScale, (0, 0, 255), 2) self.displayFrame = cv2.imencode('.jpg', frame)[1].tobytes() timeElapsedInMs = (time.time() - tFrameStart) * 1000 if perFrameTimeInMs > timeElapsedInMs: # This is faster than image source (e.g. camera) can feed. waitTimeBetweenFrames = perFrameTimeInMs - timeElapsedInMs time.sleep(waitTimeBetweenFrames / 1000.0) def __exit__(self, exception_type, exception_value, traceback): self.imageServer.close() cv2.destroyAllWindows()
class ServerWorker: SETUP = 'SETUP' PLAY = 'PLAY' PAUSE = 'PAUSE' TEARDOWN = 'TEARDOWN' DESCRIBE = 'DESCRIBE' INIT = 0 READY = 1 PLAYING = 2 state = INIT clientInfo = {} """Initialization""" def __init__(self, clientInfo): self.clientInfo = clientInfo print "ServerWorker init done\n" """Create RTSP thread""" def run(self): self.rtspThread = threading.Thread(target=self.receiveRtsp) self.rtspThread.start() print "ServerWorker run done\n" """RTSP/TCP module""" def receiveRtsp(self): """connect rtsp/tcp socket""" rtspSocket = self.clientInfo['rtspSocket'][0] print "ServerWorker RTSP/TCP socket connected\n" while True: receivedRtspDataJSON = rtspSocket.recv(256) if receivedRtspDataJSON: self.processRtsp(receivedRtspDataJSON) def processRtsp(self,receivedRtspDataJSON): """Handle received RTSP request""" # json tp dict receivedRtspData = json.loads(receivedRtspDataJSON) self.action = receivedRtspData['action'] # SETUP module if self.action == self.SETUP and self.state == self.INIT: self.fileName = receivedRtspData['fileName'] self.rtspVersion = receivedRtspData['rtspVersion'] self.CSeq = receivedRtspData['CSeq'] self.transportProtocol = receivedRtspData['transportProtocol'] self.rtpPort = receivedRtspData['rtpPort'] # create a session self.session = randint(0, 65536) try: self.file = VideoStream(self.fileName) # set frame number to 0 self.frameNbr = 0 # set rtpError to 0 self.rtpError = 0 # change state on server self.state = self.READY # send rtsp/tcp reply self.replyRtsp(200) print "ServerWorker SETUP" except IOError: self.replyRtsp(404) # PLAY module elif self.action == self.PLAY and self.state == self.READY: self.fileName = receivedRtspData['fileName'] self.rtspVersion = receivedRtspData['rtspVersion'] self.CSeq = receivedRtspData['CSeq'] self.session = receivedRtspData['session'] # change state on server self.state = self.PLAYING # connect RTP/UDP socket self.rtpScoket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.playEvent = threading.Event() self.playEvent.clear() self.rtpThread = threading.Thread(target=self.rtpConnect) self.rtpThread.start() # send rtsp/tcp reply self.replyRtsp(200) print "ServerWorker PLAY" # PAUSE module elif self.action == self.PAUSE and self.state == self.PLAYING: self.fileName = receivedRtspData['fileName'] self.rtspVersion = receivedRtspData['rtspVersion'] self.CSeq = receivedRtspData['CSeq'] self.session = receivedRtspData['session'] # change state on server self.state = self.READY # stop playEvent self.playEvent.set() # send rtsp/tcp reply self.replyRtsp(200) # Added line to prevent ghost sockets self.rtpScoket.close() print "ServerWorker PAUSE" # TEARDOWN module elif self.action == self.TEARDOWN: self.fileName = receivedRtspData['fileName'] self.rtspVersion = receivedRtspData['rtspVersion'] self.CSeq = receivedRtspData['CSeq'] self.session = receivedRtspData['session'] # flag of RTP/UDP socket flag = 1 try: # stop playEvent self.playEvent.set() except: flag = 0 print 'Attention! No playback thread was created before teardown!' # send rtsp/tcp reply self.replyRtsp(200) # Close the RTP/UDP socket if flag == 1: self.rtpScoket.close() print "ServerWorker TEARDOWN" # DESCRIBE module elif self.action == self.DESCRIBE: self.fileName = receivedRtspData['fileName'] self.rtspVersion = receivedRtspData['rtspVersion'] self.CSeq = receivedRtspData['CSeq'] self.session = receivedRtspData['session'] # change state on server self.state = self.READY # stop playEvent self.playEvent.set() # send rtsp/tcp reply self.replyDescribeRtsp(200) # Added line to prevent ghost sockets self.rtpScoket.close() print "ServerWorker DESCRIBE" def replyDescribeRtsp(self, replyCode): """Reply RTSP to client""" if replyCode == 200: # create dict dictReply = {} dictReply['rtspVersion'] = self.rtspVersion dictReply['replyCode'] = 200 dictReply['replyAction'] = "OK" dictReply['CSeq'] = self.CSeq dictReply['session'] = self.session dictReply['Describe'] = "Describe" dictReply['stream'] = "Movie" dictReply['encoding'] = self.fileName.split('.')[1] # dict to JSON dictReplyJSON = json.dumps(dictReply) # connect rtsp/tcp socket rtspSocket = self.clientInfo['rtspSocket'][0] # send JSON to clinet rtspSocket.send(dictReplyJSON) #print print dictReply['rtspVersion'] + " " + str(dictReply['replyCode']) + " " + dictReply['replyAction'] + "\nCSeq: " + str(dictReply['CSeq']) + "\nSession: " + str(dictReply['session']) +"\nStream: " + dictReply['stream'] + " Encoding: " + dictReply['encoding'] +"\n" def replyRtsp(self, replyCode): """Reply RTSP to client""" if replyCode == 200: # create dict dictReply = {} dictReply['rtspVersion'] = self.rtspVersion dictReply['replyCode'] = 200 dictReply['replyAction'] = "OK" dictReply['CSeq'] = self.CSeq dictReply['session'] = self.session # dict to JSON dictReplyJSON = json.dumps(dictReply) # connect rtsp/tcp socket rtspSocket = self.clientInfo['rtspSocket'][0] # send JSON to clinet rtspSocket.send(dictReplyJSON) #print print dictReply['rtspVersion'] + " " + str(dictReply['replyCode']) + " " + dictReply['replyAction'] + "\nCSeq: " + str(dictReply['CSeq']) + "\nSession: " + str(dictReply['session']) +"\n" elif replyCode == 404: # create dict dictReply = {} dictReply['rtspVersion'] = self.rtspVersion dictReply['replyCode'] = 404 dictReply['replyAction'] = "FILE NOT FOUND" dictReply['CSeq'] = self.CSeq dictReply['session'] = self.session # dict to JSON dictReplyJSON = json.dumps(dictReply) # connect rtsp/tcp socket rtspSocket = self.clientInfo['rtspSocket'][0] # send JSON to clinet rtspSocket.send(dictReplyJSON) #print print dictReply['rtspVersion'] + " " + str(dictReply['replyCode']) + " " + dictReply['replyAction'] + "\nCSeq: " + str(dictReply['CSeq']) + "\nSession: " + str(dictReply['session']) +"\n" elif replyCode == 500: # create dict dictReply = {} dictReply['rtspVersion'] = self.rtspVersion dictReply['replyCode'] = 500 dictReply['replyAction'] = "CONNECTION ERROR" dictReply['CSeq'] = self.CSeq dictReply['session'] = self.session # dict to JSON dictReplyJSON = json.dumps(dictReply) # connect rtsp/tcp socket rtspSocket = self.clientInfo['rtspSocket'][0] # send JSON to clinet rtspSocket.send(dictReplyJSON) #print print dictReply['rtspVersion'] + " " + str(dictReply['replyCode']) + " " + dictReply['replyAction'] + "\nCSeq: " + str(dictReply['CSeq']) + "\nSession: " + str(dictReply['session']) +"\n" """RTP/UDP module""" def rtpConnect(self): """Send messages over UDP.""" while True: # wait until current rtp has sent data self.playEvent.wait(0.05) # Stop sending if request is PAUSE or TEARDOWN if self.playEvent.isSet(): break # RtpPacket arguments version = 2 padding = 0 extension = 0 cc = 0 seqnum = self.file.frameNbr() marker = 0 pt = 26 ssrc = 34 payload = self.file.nextFrame() # if not finished playing if payload: # create RtpPacket instance rtpPacket = RtpPacket() # encode Rtppacket rtpPacket.encode(version, padding, extension, cc, seqnum, marker, pt, ssrc, payload) # getPacket packet = rtpPacket.getPacket() # get the RTP/UDP address of client self.rtpAddress = self.clientInfo['rtspSocket'][1][0] try: self.rtpScoket.sendto(packet,(self.rtpAddress, self.rtpPort)) except: self.rtpError += 1 print "ServerWorker RTP/UDP Sending Error: %d" % self.rtpError self.replyRtsp(500) # if finished playing, inform client else: self.state = self.INIT self.finishPlaying(seqnum-1) break """Finish playing function""" def finishPlaying(self, frameNbr): dictReply = {} dictReply['rtspVersion'] = self.rtspVersion dictReply['replyCode'] = 200 dictReply['replyAction'] = "OK" dictReply['CSeq'] = self.CSeq dictReply['session'] = self.session dictReply['finish'] = frameNbr # dict to JSON dictReplyJSON = json.dumps(dictReply) # connect rtsp/tcp socket rtspSocket = self.clientInfo['rtspSocket'][0] # send JSON to clinet rtspSocket.send(dictReplyJSON) #print print dictReply['rtspVersion'] + " " + str(dictReply['replyCode']) + " " + dictReply['replyAction'] + "\nCSeq: " + str(dictReply['CSeq']) + "\nSession: " + str(dictReply['session']) +"\n" + "Finish Playing Frame Number: " + str(dictReply['finish']) # Task 4 question 1: RTP loss rate rtpLossRate = self.rtpError*100/float(frameNbr) print "RTP packet loss rate is: %.3f %%" %(rtpLossRate)
from KeyClipWrite import KeyClipWriter import cv2 import imutils import numpy as np from VideoStream import VideoStream import datetime import time import argparse vid=VideoStream(0).start() time.sleep(2.0) output="AshirTestVid" fourcc="MJPG" fps=20 i=0 buffersize=64 kwc=KeyClipWriter(buffersize,1.0) green1=(29,86,6) green2=(64,255,255) ap = argparse.ArgumentParser() #ap.add_argument("-o", "--output", required=True, #help="path to output directory") ap.add_argument("-c", "--codec", type=str, default="MJPG", help="codec of output video") args = vars(ap.parse_args()) frame=vid.read() frame=imutils.resize(frame,width=600) kwc.update(frame) while True:
def processRtspRequest(self, data): """Process RTSP request sent from the client.""" # Get the request type request = data.split('\n') line1 = request[0].split(' ') requestType = line1[0] # Get the media file name self.filename = line1[1] # Get the RTSP sequence number seq = request[1].split(' ') # Process SETUP request if requestType == self.SETUP: if self.state == self.INIT: # Update state print("processing SETUP\n") try: self.clientInfo['videoStream'] = VideoStream(self.filename) self.state = self.READY # TODO Get FPS, total time, number of frames of the video to send back to the client # # # self.clientInfo['videoStream'].calTotalTime() self.totalTime = self.clientInfo['videoStream'].totalTime self.fps = self.clientInfo['videoStream'].fps self.noFrames = self.clientInfo['videoStream'].numFrames ####################################################### # Find all media files except IOError: self.replyRtsp(self.FILE_NOT_FOUND_404, seq[1]) # Generate a randomized RTSP session ID self.clientInfo['session'] = randint(100000, 999999) # Send RTSP reply self.replyRtsp(self.OK_200, seq[1]) # Get the RTP/UDP port from the last line self.clientInfo['rtpPort'] = request[2].split(' ')[3] # Process PLAY request elif requestType == self.PLAY: if self.state == self.READY: print("processing PLAY\n") self.state = self.PLAYING # Create a new socket for RTP/UDP self.clientInfo["rtpSocket"] = socket.socket( socket.AF_INET, socket.SOCK_DGRAM) self.replyRtsp(self.OK_200, seq[1]) # Create a new thread and start sending RTP packets self.clientInfo['event'] = threading.Event() self.clientInfo['worker'] = threading.Thread( target=self.sendRtp) self.clientInfo['worker'].start() # Process PAUSE request elif requestType == self.PAUSE: if self.state == self.PLAYING: print("processing PAUSE\n") self.state = self.READY self.clientInfo['event'].set() self.replyRtsp(self.OK_200, seq[1]) # TODO: ################################ # # # Process FORWARD request # elif requestType == self.FORWARD: # if self.state == self.PLAYING: # print("processing FORWARD\n") # self.state = self.PLAYING # self.forward = 1 # self.replyRtsp(self.OK_200, seq[1]) # Process BACKWARD request # elif requestType == self.BACKWARD: # if self.state == self.PLAYING: # print("processing BACKWARD\n") # self.state = self.PLAYING # self.backward = 1 # self.replyRtsp(self.OK_200, seq[1]) ######################################## # Process DESCRIBE request elif requestType == self.DESCRIBE: print("processing DESCRIBE\n") self.replyRtsp(self.OK_200, seq[1]) ######################################## # Process SWITCH request elif requestType == self.SWITCH: print("processing SWITCH\n") # If the state is READY if self.state == self.READY: self.clientInfo['videoStream'] = VideoStream(self.filename) # TODO # Get FPS, total time, number of frames of the video to send back to the client # # # self.clientInfo['videoStream'].calTotalTime() self.totalTime = self.clientInfo['videoStream'].totalTime self.fps = self.clientInfo['videoStream'].fps self.noFrames = self.clientInfo['videoStream'].numFrames ####################################################### # If the state is PLAYING switch to READY first # Required the user to pause the video to switch self.replyRtsp(self.OK_200, seq[1]) ######################################## # Process TEARDOWN request elif requestType == self.TEARDOWN: print("processing TEARDOWN\n") self.clientInfo['event'].set() self.replyRtsp(self.OK_200, seq[1]) # Close the RTP socket self.clientInfo['rtpSocket'].close() # process stop request elif requestType == self.STOP: print("processing STOP\n") if self.state == self.PLAYING or self.state == self.READY: self.clientInfo['event'].set() self.clientInfo['videoStream'].resetFrame() self.state = self.READY self.replyRtsp(self.OK_200, seq[1])
import cv2 from imutils.video import VideoStream import datetime import argparse from Stepper import Stepper # defaults to using the PiCamera ap = argparse.ArgumentParser() ap.add_argument("-p", "--picamera", type=int, default=-1, help="whether the PiCamera being used") args = vars(ap.parse_args()) vs = VideoStream(usePiCamera=args["picamera"] > 0).start( ) # starts video capture using PiCamera time.sleep(2.0) def main(argv): bright_values = {} # creates an array of the brightest values count = 0 while True: if count == 7: # looks for 7 different spots break frame = vs.read() # reads the frame from the OpenCV object frame = imutils.resize( frame, width=400 ) # resizing the frame in case frame was changed previously
def processRtspRequest(self, data): """Process RTSP request sent from the client.""" # Get the request type request = data.split('\n') line1 = request[0].split(' ') requestType = line1[0] # Get the media file name filename = line1[1] # Get the RTSP sequence number seq = request[1].split(' ') # Process SETUP request if requestType == self.SETUP: if self.state == self.INIT: # Update state print("processing SETUP\n") try: self.clientInfo['videoStream'] = VideoStream(filename) self.state = self.READY # set rtpError to 0 self.rtpError = 0 self.frame_Number =0 except IOError: self.replyRtsp(self.FILE_NOT_FOUND_404, seq[1]) # Generate a randomized RTSP session ID self.clientInfo['session'] = randint(100000, 999999) # Send RTSP reply self.replyRtsp(self.OK_200, seq[1]) # Get the RTP/UDP port from the last line self.clientInfo['rtpPort'] = request[2].split(' ')[3] # Process PLAY request elif requestType == self.PLAY: if self.state == self.READY: print("processing PLAY\n") self.state = self.PLAYING # Create a new socket for RTP/UDP self.clientInfo["rtpSocket"] = socket.socket( socket.AF_INET, socket.SOCK_DGRAM) self.replyRtsp(self.OK_200, seq[1]) # Create a new thread and start sending RTP packets self.clientInfo['event'] = threading.Event() self.clientInfo['worker'] = threading.Thread( target=self.sendRtp) self.clientInfo['worker'].start() # Process PAUSE request elif requestType == self.PAUSE: if self.state == self.PLAYING: print("processing PAUSE\n") self.state = self.READY self.clientInfo['event'].set() self.replyRtsp(self.OK_200, seq[1]) # Process TEARDOWN request elif requestType == self.TEARDOWN: print("processing TEARDOWN\n") self.clientInfo['event'].set() self.replyRtsp(self.OK_200, seq[1]) #print statics rtpLossRate = self.rtpError*100/float(self.frame_Number) print("RTP packet loss rate is: %.3f %%" %(rtpLossRate)) # Close the RTP socket self.clientInfo['rtpSocket'].close() # Process TEARDOWN request elif requestType == self.SWITCH: if not self.state == self.PLAYING: print("processing SWITCH\n") self.replyRtsp(self.OK_200, seq[1]) elif requestType == self.CHOOSE: if not self.state == self.PLAYING: print("processing CHOOSE\n") try: if not self.clientInfo['videoStream'].file.closed: self.clientInfo['videoStream'].terminate() self.clientInfo['videoStream'] = VideoStream(filename) except IOError: self.replyRtsp(self.FILE_NOT_FOUND_404, seq[1]) # Send RTSP reply self.replyRtsp(self.OK_200, seq[1]) elif requestType == self.STOP: if self.state == self.PLAYING or self.state == self.READY: print("processing STOP\n") self.state = self.INIT self.clientInfo['event'].set() if not self.clientInfo['videoStream'].file.closed: self.clientInfo['videoStream'].terminate() self.replyRtsp(self.OK_200, seq[1]) self.clientInfo['rtpSocket'].close() elif requestType == self.DESCRIBE: print("processing DESCRIBE\n") # create dict description = {} description['session'] = self.clientInfo['session'] description['encoding'] = filename.split('.')[1] reply = 'RTSP/1.0 200 OK\nCSeq: ' + seq[1] + '\nSession: ' + \ str(self.clientInfo['session']) + \ '\n' + json.dumps(description) self.clientInfo['rtspSocket'][0].send(reply.encode())
def recvRtspRequest(self): connSocket, (clientAddr, clientPort) = self.clientInfo['rtspSocket'] self.clientAddr = clientAddr while True: data = connSocket.recv(1024) if data: print "\nData Recieve\n" + data #get the info from the data content = data.split("\n") array_1 = content[0].split(" ") # requst type: setup, play, pause, teardown reqType = array_1[0] # the filename filename = array_1[1] # seq number seq = content[1].split(" ")[1] if reqType == self.SETUP_REQ: if self.state == self.INIT: print "set up the movie" array_3 = content[2].split(" ") # get hte rtp port number self.clientrtpPort = int(array_3[3]) try: self.clientInfo['videoStream'] = VideoStream( filename) except: self.replyRtsp(self.FILE_NOT_FOUND_404, seq) self.state = self.READY self.session = randint(111, 777) self.replyRtsp(self.OK_200, seq) elif reqType == self.PLAY_REQ: if self.state == self.READY: self.state = self.PLAYING print "playing the movie" self.replyRtsp(self.OK_200, seq) # when get the play movie socket, establish the socket and be ready to send the video data if self.rtpSocket == None: self.rtpSocket = socket.socket( socket.AF_INET, socket.SOCK_DGRAM) self.rtpSocket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 65535) # start a even to control the action of playing, pausing and teardown self.event_trigger = threading.Event() self.event_trigger.clear() # start a thread to send the video data threading.Thread(target=self.sendRtp).start() elif reqType == self.PAUSE_REQ: if self.state == self.PLAYING: print "pause the move" self.replyRtsp(self.OK_200, seq) # stop send the video to the client self.event_trigger.set() self.state = self.READY elif reqType == self.TEARDOWN_REQ: print "close the movie" self.replyRtsp(self.OK_200, seq) # stop send the video to the client self.event_trigger.set() self.state = self.INIT
import cv2 import imutils from VideoStream import VideoStream from MotionClass import Motion import time import numpy as np import datetime vs1=VideoStream(src=0).start() vs2=VideoStream(src=1).start() time.sleep(15.0) motion1=Motion() motion2=Motion() image=cv2.imread("MUNA.jpg") total=0 while True: frames=[] for (stream,motion) in zip((vs1,vs2),(motion1,motion2)): frame=stream.read() gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) gray=cv2.GaussianBlur(gray,(21,21),0) locs=motion.update(gray) if total>32: frames.append(frame) if len(locs)>0: (max_x,max_y)=(-np.inf,-np.inf) (min_x,min_y)=(np.inf,np.inf) for l in locs: (x,y,w,h)=cv2.boundingBox(l) (minx,maxx)=(min(min_x,x),max(max_x,x+w)) (miny,maxy)=(min(min_y,y),max(max_y,y+h))
def predict_video(self, source=0, sync=False): VideoStream(source=source, fbae=self).start()
def processRtspRequest(self, data): """Process RTSP request sent from the client.""" # Get the request type data = data.decode() request = data.split('\n') line1 = request[0].split(' ') requestType = line1[0] # Get the media file name filename = line1[1] # Get the RTSP sequence number seq = request[1].split(' ') # Process LIST request if requestType == self.LIST: fileList = [] for file in os.listdir("./"): if file.endswith(".mjpeg") or file.endswith(".Mjpeg"): fileList.append(file) reply = "" for file in fileList: reply += file + "," self.replyRtsp(self.LIST_OK_200, reply) # Process SETUP request if requestType == self.SETUP: if self.state == self.INIT: # Update state print("SETUP Request received\n") # fileList = [] # --------------------------------------------------- try: self.clientInfo['totalFrame'] = self.getFrameNum(filename) except Exception as e: print(e) # --------------------------------------------------- # for file in os.listdir("./"): # if file.endswith(".mjpeg") or file.endswith(".Mjpeg"): # fileList.append(file) try: self.clientInfo['videoStream'] = VideoStream(filename) self.state = self.READY file = open(filename, "rb") self.clientInfo["videoWeight"] = str(round(len(file.read()) / 1024)) file.close() except IOError: self.replyRtsp(self.FILE_NOT_FOUND_404, seq[1]) # Generate a randomized RTSP session ID self.clientInfo['session'] = randint(100000, 999999) # Send RTSP reply self.replyRtsp(self.OK_200, seq[0]) # seq[0] the sequenceNum received from Client.py print("sequenceNum is " + seq[0]) # Get the RTP/UDP port from the last line self.clientInfo['rtpPort'] = request[2].split(' ')[3] print('-' * 60 + "\nrtpPort is :" + self.clientInfo['rtpPort'] + "\n" + '-' * 60) print("filename is " + filename) # Process PLAY request elif requestType == self.PLAY: if self.state == self.READY: print('-' * 60 + "\nPLAY Request Received\n" + '-' * 60) self.state = self.PLAYING # Create a new socket for RTP/UDP self.clientInfo["rtpSocket"] = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.replyRtsp(self.OK_200, seq[0]) print('-' * 60 + "\nSequence Number (" + seq[0] + ")\nReplied to client\n" + '-' * 60) # Create a new thread and start sending RTP packets self.clientInfo['event'] = threading.Event() self.clientInfo['worker'] = threading.Thread(target=self.sendRtp) self.clientInfo['worker'].start() # Process RESUME request elif self.state == self.PAUSE: print('-' * 60 + "\nRESUME Request Received\n" + '-' * 60) self.state = self.PLAYING # Process PAUSE request elif requestType == self.PAUSE: if self.state == self.PLAYING: print('-' * 60 + "\nPAUSE Request Received\n" + '-' * 60) self.state = self.READY self.clientInfo['event'].set() self.replyRtsp(self.OK_200, seq[0]) # Process TEARDOWN request elif requestType == self.TEARDOWN: print('-' * 60 + "\nTEARDOWN Request Received\n" + '-' * 60) self.clientInfo['event'].set() self.replyRtsp(self.OK_200, seq[0]) # Close the RTP socket self.clientInfo['rtpSocket'].close() # Process DESCRIBE request elif requestType == self.DESCRIBE: print('-' * 60 + "\nDESCRIBE Request Received\n" + '-' * 60) self.replyRtsp(self.OK_200, seq[0]) # Process FORWARD request elif requestType == self.FORWARD: print('-' * 60 + "\nFORWARD Request Received\n" + '-' * 60) self.replyRtsp(self.OK_200, seq[0]) self.clientInfo['skipCounter'] += 10 # Process BACKWARD request elif requestType == self.BACKWARD: print('-' * 60 + "\nBACKWARD Request Received\n" + '-' * 60) self.replyRtsp(self.OK_200, seq[0]) self.clientInfo['skipCounter'] -= 10
def processRtspRequest(self, data): """Process RTSP request sent from the client.""" # Get the request type request = data.split('\n') line1 = request[0].split(' ') requestType = line1[0] # Get the media file name filename = line1[1] # Get the RTSP sequence number seq = request[1].split(' ') # Process SETUP request if requestType == self.SETUP: if self.state == self.INIT: # Update state print "SETUP Request received\n" try: self.clientInfo['videoStream'] = VideoStream(filename) self.state = self.READY except IOError: self.replyRtsp(self.FILE_NOT_FOUND_404, seq[1]) # Generate a randomized RTSP session ID self.clientInfo['session'] = randint(100000, 999999) # Send RTSP reply self.replyRtsp(self.OK_200, seq[0]) #seq[0] the sequenceNum received from Client.py print "sequenceNum is " + seq[0] # Get the RTP/UDP port from the last line self.clientInfo['rtpPort'] = request[2].split(' ')[3] print '-'*60 + "\nrtpPort is :" + self.clientInfo['rtpPort'] + "\n" + '-'*60 print "filename is " + filename # Process PLAY request elif requestType == self.PLAY: if self.state == self.READY: print '-'*60 + "\nPLAY Request Received\n" + '-'*60 self.state = self.PLAYING # Create a new socket for RTP/UDP self.clientInfo["rtpSocket"] = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.replyRtsp(self.OK_200, seq[0]) print '-'*60 + "\nSequence Number ("+ seq[0] + ")\nReplied to client\n" + '-'*60 # Create a new thread and start sending RTP packets self.clientInfo['event'] = threading.Event() self.clientInfo['worker']= threading.Thread(target=self.sendRtp) self.clientInfo['worker'].start() # Process RESUME request elif self.state == self.PAUSE: print '-'*60 + "\nRESUME Request Received\n" + '-'*60 self.state = self.PLAYING # Process PAUSE request elif requestType == self.PAUSE: if self.state == self.PLAYING: print '-'*60 + "\nPAUSE Request Received\n" + '-'*60 self.state = self.READY self.clientInfo['event'].set() self.replyRtsp(self.OK_200, seq[0]) # Process TEARDOWN request elif requestType == self.TEARDOWN: print '-'*60 + "\nTEARDOWN Request Received\n" + '-'*60 self.clientInfo['event'].set() self.replyRtsp(self.OK_200, seq[0]) # Close the RTP socket self.clientInfo['rtpSocket'].close()
class ServerWorker: SETUP = 'SETUP' PLAY = 'PLAY' PAUSE = 'PAUSE' TEARDOWN = 'TEARDOWN' INIT = 0 READY = 1 PLAYING = 2 state = INIT clientInfo = {} """Initialization""" def __init__(self, clientInfo): self.clientInfo = clientInfo print "ServerWorker init done\n" """Create RTSP thread""" def run(self): self.rtspThread = threading.Thread(target=self.receiveRtsp) self.rtspThread.start() print "ServerWorker run done\n" """RTSP/TCP module""" def receiveRtsp(self): """connect rtsp/tcp socket""" rtspSocket = self.clientInfo['rtspSocket'][0] print "ServerWorker RTSP/TCP socket connected\n" while True: receivedRtspDataJSON = rtspSocket.recv(256) if receivedRtspDataJSON: self.processRtsp(receivedRtspDataJSON) def processRtsp(self,receivedRtspDataJSON): """Handle received RTSP request""" # json tp dict receivedRtspData = json.loads(receivedRtspDataJSON) self.action = receivedRtspData['action'] # SETUP module if self.action == self.SETUP and self.state == self.INIT: self.fileName = receivedRtspData['fileName'] self.rtspVersion = receivedRtspData['rtspVersion'] self.CSeq = receivedRtspData['CSeq'] self.transportProtocol = receivedRtspData['transportProtocol'] self.rtpPort = receivedRtspData['rtpPort'] try: self.file = VideoStream(self.fileName) # create a session self.session = randint(0, 65536) # set frame number to 0 self.frameNbr = 0 # set rtpError to 0 self.rtpError = 0 # change state on server self.state = self.READY except IOError: self.replyRtsp(self.FILE_NOT_FOUND_404, seq[1]) # send rtsp/tcp reply self.replyRtsp(200) print "ServerWorker SETUP" # PLAY module elif self.action == self.PLAY and self.state == self.READY: self.fileName = receivedRtspData['fileName'] self.rtspVersion = receivedRtspData['rtspVersion'] self.CSeq = receivedRtspData['CSeq'] self.session = receivedRtspData['session'] # change state on server self.state = self.PLAYING # connect RTP/UDP socket self.rtpScoket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.playEvent = threading.Event() self.playEvent.clear() self.rtpThread = threading.Thread(target=self.rtpConnect) self.rtpThread.start() # send rtsp/tcp reply self.replyRtsp(200) print "ServerWorker PLAY" # PAUSE module elif self.action == self.PAUSE and self.state == self.PLAYING: self.fileName = receivedRtspData['fileName'] self.rtspVersion = receivedRtspData['rtspVersion'] self.CSeq = receivedRtspData['CSeq'] self.session = receivedRtspData['session'] # change state on server self.state = self.READY # stop playEvent self.playEvent.set() # send rtsp/tcp reply self.replyRtsp(200) # Added line to prevent ghost sockets self.rtpScoket.close() print "ServerWorker PAUSE" # TEARDOWN module elif self.action == self.TEARDOWN: self.fileName = receivedRtspData['fileName'] self.rtspVersion = receivedRtspData['rtspVersion'] self.CSeq = receivedRtspData['CSeq'] self.session = receivedRtspData['session'] # flag of RTP/UDP socket flag = 1 try: # stop playEvent self.playEvent.set() except: flag = 0 print 'Attention! No playback thread was created before teardown!' # send rtsp/tcp reply self.replyRtsp(200) # Close the RTP/UDP socket if flag == 1: self.rtpScoket.close() print "ServerWorker TEARDOWN" def replyRtsp(self, replyCode): """Reply RTSP to client""" if replyCode == 200: # create dict dictReply = {} dictReply['rtspVersion'] = self.rtspVersion dictReply['replyCode'] = 200 dictReply['replyAction'] = "OK" dictReply['CSeq'] = self.CSeq dictReply['session'] = self.session # dict to JSON dictReplyJSON = json.dumps(dictReply) # connect rtsp/tcp socket rtspSocket = self.clientInfo['rtspSocket'][0] # send JSON to clinet rtspSocket.send(dictReplyJSON) #print print dictReply['rtspVersion'] + " " + str(dictReply['replyCode']) + " " + dictReply['replyAction'] + "\nCSeq: " + str(dictReply['CSeq']) + "\nSession: " + str(dictReply['session']) +"\n" elif replyCode == 404: # create dict dictReply = {} dictReply['rtspVersion'] = self.rtspVersion dictReply['replyCode'] = 404 dictReply['replyAction'] = "FILE NOT FOUND" dictReply['CSeq'] = self.CSeq dictReply['session'] = self.session # dict to JSON dictReplyJSON = json.dumps(dictReply) # connect rtsp/tcp socket rtspSocket = self.clientInfo['rtspSocket'][0] # send JSON to clinet rtspSocket.send(dictReplyJSON) #print print dictReply['rtspVersion'] + " " + str(dictReply['replyCode']) + " " + dictReply['replyAction'] + "\nCSeq: " + str(dictReply['CSeq']) + "\nSession: " + str(dictReply['session']) +"\n" elif replyCode == 500: # create dict dictReply = {} dictReply['rtspVersion'] = self.rtspVersion dictReply['replyCode'] = 500 dictReply['replyAction'] = "CONNECTION ERROR" dictReply['CSeq'] = self.CSeq dictReply['session'] = self.session # dict to JSON dictReplyJSON = json.dumps(dictReply) # connect rtsp/tcp socket rtspSocket = self.clientInfo['rtspSocket'][0] # send JSON to clinet rtspSocket.send(dictReplyJSON) #print print dictReply['rtspVersion'] + " " + str(dictReply['replyCode']) + " " + dictReply['replyAction'] + "\nCSeq: " + str(dictReply['CSeq']) + "\nSession: " + str(dictReply['session']) +"\n" """RTP/UDP module""" def rtpConnect(self): """Send messages over UDP.""" while True: # wait until current rtp has sent data self.playEvent.wait(0.05) # Stop sending if request is PAUSE or TEARDOWN if self.playEvent.isSet(): break # RtpPacket arguments version = 2 padding = 0 extension = 0 cc = 0 seqnum = self.file.frameNbr() marker = 0 pt = 26 ssrc = 34 payload = self.file.nextFrame() # if not finished playing if payload: # create RtpPacket instance rtpPacket = RtpPacket() # encode Rtppacket rtpPacket.encode(version, padding, extension, cc, seqnum, marker, pt, ssrc, payload) # getPacket packet = rtpPacket.getPacket() # get the RTP/UDP address of client self.rtpAddress = self.clientInfo['rtspSocket'][1][0] try: self.rtpScoket.sendto(packet,(self.rtpAddress, self.rtpPort)) except: self.rtpError += 1 print "ServerWorker RTP/UDP Sending Error: %d" % self.rtpError # if finished playing, inform client else: self.state = self.INIT self.finishPlaying(seqnum-1) break """Terminate thread. Problem with invalid thread id""" self._async_raise(self.rtpThread.ident, SystemExit) """Finish playing function""" def finishPlaying(self, frameNbr): dictReply = {} dictReply['rtspVersion'] = self.rtspVersion dictReply['replyCode'] = 200 dictReply['replyAction'] = "OK" dictReply['CSeq'] = self.CSeq dictReply['session'] = self.session dictReply['finish'] = frameNbr # dict to JSON dictReplyJSON = json.dumps(dictReply) # connect rtsp/tcp socket rtspSocket = self.clientInfo['rtspSocket'][0] # send JSON to clinet rtspSocket.send(dictReplyJSON) #print print dictReply['rtspVersion'] + " " + str(dictReply['replyCode']) + " " + dictReply['replyAction'] + "\nCSeq: " + str(dictReply['CSeq']) + "\nSession: " + str(dictReply['session']) +"\n" + "Finish Playing Frame Number: " + str(dictReply['finish']) """Raise exception to terminate thread""" def _async_raise(self, tid, exctype): if not inspect.isclass(exctype): exctype = type(exctype) res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype)) if res == 0: raise ValueError("invalid thread id") elif res != 1: # if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) raise SystemError("PyThreadState_SetAsyncExc failed")