def test_audio_stop_source(self): source = AudioStreamTrack() relay = MediaRelay() proxy1 = relay.subscribe(source) proxy2 = relay.subscribe(source) # read some frames samples_per_frame = 160 for pts in range(0, 2 * samples_per_frame, samples_per_frame): frame1, frame2 = run(asyncio.gather(proxy1.recv(), proxy2.recv())) self.assertEqual(frame1.format.name, "s16") self.assertEqual(frame1.layout.name, "mono") self.assertEqual(frame1.pts, pts) self.assertEqual(frame1.samples, samples_per_frame) self.assertEqual(frame2.format.name, "s16") self.assertEqual(frame2.layout.name, "mono") self.assertEqual(frame2.pts, pts) self.assertEqual(frame2.samples, samples_per_frame) # stop source track source.stop() # continue reading run(asyncio.gather(proxy1.recv(), proxy2.recv())) for i in range(2): exc1, exc2 = run( asyncio.gather(proxy1.recv(), proxy2.recv(), return_exceptions=True) ) self.assertTrue(isinstance(exc1, MediaStreamError)) self.assertTrue(isinstance(exc2, MediaStreamError))
def __init__(self, room_id: int, q: Connection): self.room_id = room_id self.relay = MediaRelay() self.active_users: Dict[int, Tuple[RTCPeerConnection, MediaStreamTrack]] = dict() # outQ is listened by RootServer self.outQ = Producer(TMQ_HOST, TMQ_PORT, TOPIC_TO_ROOT_SERVER) self.inQ: Connection = q self.logger = logging.getLogger(f"LiveRoom_{self.room_id}") self.logger.setLevel(logging.DEBUG) self.blackhole = MediaBlackhole()
async def test_audio_slow_consumer(self): source = AudioStreamTrack() relay = MediaRelay() proxy1 = relay.subscribe(source, buffered=False) proxy2 = relay.subscribe(source, buffered=False) # read some frames samples_per_frame = 160 for pts in range(0, 2 * samples_per_frame, samples_per_frame): frame1, frame2 = await asyncio.gather(proxy1.recv(), proxy2.recv()) self.assertEqual(frame1.format.name, "s16") self.assertEqual(frame1.layout.name, "mono") self.assertEqual(frame1.pts, pts) self.assertEqual(frame1.samples, samples_per_frame) self.assertEqual(frame2.format.name, "s16") self.assertEqual(frame2.layout.name, "mono") self.assertEqual(frame2.pts, pts) self.assertEqual(frame2.samples, samples_per_frame) # skip some frames timestamp = 5 * samples_per_frame await asyncio.sleep(source._start + (timestamp / 8000) - time.time()) frame1, frame2 = await asyncio.gather(proxy1.recv(), proxy2.recv()) self.assertEqual(frame1.format.name, "s16") self.assertEqual(frame1.layout.name, "mono") self.assertEqual(frame1.pts, 5 * samples_per_frame) self.assertEqual(frame1.samples, samples_per_frame) self.assertEqual(frame2.format.name, "s16") self.assertEqual(frame2.layout.name, "mono") self.assertEqual(frame2.pts, 5 * samples_per_frame) self.assertEqual(frame2.samples, samples_per_frame) # stop a consumer proxy1.stop() # continue reading for i in range(2): exc1, frame2 = await asyncio.gather(proxy1.recv(), proxy2.recv(), return_exceptions=True) self.assertTrue(isinstance(exc1, MediaStreamError)) self.assertTrue(isinstance(frame2, av.AudioFrame)) # stop source track source.stop()
def get_global_relay() -> MediaRelay: server = Server.get_current() if hasattr(server, _SERVER_GLOBAL_RELAY_ATTR_NAME_): return getattr(server, _SERVER_GLOBAL_RELAY_ATTR_NAME_) else: loop = get_server_event_loop() with loop_context(loop): relay = MediaRelay() setattr(server, _SERVER_GLOBAL_RELAY_ATTR_NAME_, relay) return relay
def create_local_tracks(play_from): global relay, webcam if play_from: #player = MediaPlayer(play_from) return FlagVideoStreamTrack() else: options = {"framerate": "30", "video_size": "640x480"} if relay is None: if platform.system() == "Darwin": webcam = MediaPlayer( "default:none", format="avfoundation", options=options ) elif platform.system() == "Windows": webcam = MediaPlayer( "video=Integrated Camera", format="dshow", options=options ) else: webcam = MediaPlayer("/dev/video0", format="v4l2", options=options) relay = MediaRelay() return None, relay.subscribe(webcam.video)
def create_local_tracks(play_from, owner = False, camFilter = False): global relay, webcam global relayFilter, webcamFilter if play_from: player = MediaPlayer(play_from) return player.audio, player.video else: # options = {"framerate": "30", "video_size": "640x480"} if not owner: if relay is None: webcam = MediaPlayer("/dev/video0", format="v4l2") # webcam = MediaPlayer("/dev/video0", format="v4l2", options=options) # webcam = MediaPlayer("/dev/video6", format="v4l2", options=options) relay = MediaRelay() relayTmp = relay.subscribe(webcam.video) else: if relayFilter is None: webcamFilter = MediaPlayer("/dev/video7", format="v4l2") # webcamFilter = MediaPlayer("/dev/video7", format="v4l2", options=options) relayFilter = MediaRelay() relayTmp = relayFilter.subscribe(webcamFilter.video) if camFilter: proc2.send_signal(40) else: proc2.send_signal(41) return None, relayTmp
def create_local_tracks(play_from=None): if play_from: player = MediaPlayer(play_from) return player.audio, player.video else: options = {"framerate": "30", "video_size": "1920x1080"} # if relay is None: # if platform.system() == "Darwin": # webcam = MediaPlayer( # "default:none", format="avfoundation", options=options # ) # elif platform.system() == "Windows": # webcam = MediaPlayer("video.mp4") webcam = MediaPlayer("video=FULL HD 1080P Webcam", format="dshow", options=options) # else: # webcam = MediaPlayer("/dev/video0", format="v4l2", options=options) # audio, video = VideoTransformTrack(webcam.video, transform="cv") relay = MediaRelay() return None, relay.subscribe(webcam.video)
async def on_track(track): if track.kind == "audio": self.recorder.addTrack(track) elif track.kind == "video": self.__video = track self.recorder.addTrack( FixedPtsTrack(MediaRelay().subscribe(track))) logger.info(f"Track {track.kind} added") @track.on("ended") async def on_ended(): await self.recorder.stop() logger.info(f"Track {track.kind} ended")
async def connect(self, host, port, turn=None): ice_servers = [RTCIceServer('stun:stun.l.google.com:19302')] if turn: ice_servers.append(turn) config = RTCConfiguration(ice_servers) self.pc = RTCPeerConnection(config) if not self.__video: self.__video = await self.__get_tracks() self.pc.addTrack(MediaRelay().subscribe(self.__video)) offer = await self.pc.createOffer() await self.pc.setLocalDescription(offer) self.signaling = WebSocketClient(host, port) async def send_offer(): logger.debug(f"Ice Gathering State: {self.pc.iceGatheringState}") if self.pc.iceGatheringState == 'complete': logger.debug("Offer sent") await self.signaling.send_data({ "sdp": self.pc.localDescription.sdp, "type": self.pc.localDescription.type }) else: self.pc.once("icegatheringstatechange", send_offer) @self.signaling.on_connected async def on_connected(_): await send_offer() @self.signaling.on_message async def on_message(message): logger.debug(f"{message.get('type')} received") if message.get("type") == "answer": answer = RTCSessionDescription(sdp=message["sdp"], type=message["type"]) await self.pc.setRemoteDescription(answer) @self.pc.on("connectionstatechange") async def on_connectionstatechange(): if self.pc.connectionState == "failed": await self.pc.close() logger.info(f"Connection state: {self.pc.connectionState}")
async def offer(params: Offer): offer = RTCSessionDescription(sdp=params.sdp, type=params.type) pc = RTCPeerConnection() pcs.add(pc) recorder = MediaBlackhole() relay = MediaRelay() @pc.on("connectionstatechange") async def on_connectionstatechange(): print("Connection state is %s" % pc.connectionState) if pc.connectionState == "failed": await pc.close() pcs.discard(pc) # open media source # audio, video = create_local_tracks() @pc.on("track") def on_track(track): # if track.kind == "audio": # pc.addTrack(player.audio) # recorder.addTrack(track) if track.kind == "video": pc.addTrack( VideoTransformTrack(relay.subscribe(track), transform=params.video_transform)) # if args.record_to: # recorder.addTrack(relay.subscribe(track)) @track.on("ended") async def on_ended(): await recorder.stop() # handle offer await pc.setRemoteDescription(offer) await recorder.start() # send answer answer = await pc.createAnswer() await pc.setRemoteDescription(offer) await pc.setLocalDescription(answer) return {"sdp": pc.localDescription.sdp, "type": pc.localDescription.type}
def _test(): # Mock functions that depend on Streamlit global server object global get_global_relay, get_server_event_loop loop = asyncio.get_event_loop() def get_server_event_loop_mock(): return loop get_server_event_loop = get_server_event_loop_mock fake_global_relay = MediaRelay() def get_global_relay_mock(): return fake_global_relay get_global_relay = get_global_relay_mock # Start the test client = RTCPeerConnection() client.createDataChannel("test") offer = loop.run_until_complete(client.createOffer()) logger.debug("Offer for mock testing: %s", offer) def test_thread_fn(): webrtc_worker = WebRtcWorker(mode=WebRtcMode.SENDRECV) localDescription = webrtc_worker.process_offer(offer.sdp, offer.type) logger.debug("localDescription:") logger.debug(localDescription) webrtc_worker.stop() test_thread = threading.Thread(target=test_thread_fn) test_thread.start() # HACK for _ in range(100): loop.run_until_complete(asyncio.sleep(0.01))
async def offer(request, **kwargs): video = kwargs['video'] if ("broadcast" in kwargs and kwargs["broadcast"]): video = MediaRelay().subscribe(video) params = await request.json() offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"]) pc = RTCPeerConnection() pcs.add(pc) @pc.on("connectionstatechange") async def on_connectionstatechange(): print("Connection state is %s" % pc.connectionState) if pc.connectionState == "failed": await pc.close() pcs.discard(pc) # open media source audio = None await pc.setRemoteDescription(offer) for t in pc.getTransceivers(): if t.kind == "audio" and audio: pc.addTrack(audio) elif t.kind == "video" and video: pc.addTrack(video) answer = await pc.createAnswer() await pc.setLocalDescription(answer) return web.Response( content_type="application/json", text=json.dumps({ "sdp": pc.localDescription.sdp, "type": pc.localDescription.type }), )
async def video_track(self): if self.__video: return MediaRelay().subscribe(self.__video) else: return None
class LiveRoom: """ This will run in a separate process for every live room It recieves new connection request from the inQ. New connection request if of the format: { type: room_id: user_id: sdp: } It writes answer to outQ, which is been consumed by the RootServer which will be sent to the client. Answer is of format { type , room_id , user_id ,sdp} """ def __init__(self, room_id: int, q: Connection): self.room_id = room_id self.relay = MediaRelay() self.active_users: Dict[int, Tuple[RTCPeerConnection, MediaStreamTrack]] = dict() # outQ is listened by RootServer self.outQ = Producer(TMQ_HOST, TMQ_PORT, TOPIC_TO_ROOT_SERVER) self.inQ: Connection = q self.logger = logging.getLogger(f"LiveRoom_{self.room_id}") self.logger.setLevel(logging.DEBUG) self.blackhole = MediaBlackhole() async def listen_inq(self): """ This is the main loop for the live rooms """ while True: # because inQ.get is sychronous, running it in a thread so that main event loop is not blocked data = await asyncio.get_running_loop().run_in_executor( None, self.inQ.recv) if not isinstance(data, dict) or "type" not in data: self.logger.debug( "Invald data read. Message will be discarded.") continue if data["type"] == "offer": if "sdp" not in data: self.logger.debug( "Offer recieved without sdp. Message will be discarded." ) continue if "user_id" not in data: self.logger.debug( "Offer recieved without user_id. Message will be discarded." ) continue await self.handle_incoming_offer(data["sdp"], data["user_id"]) async def handle_incoming_offer(self, sdp, user_id): player = MediaPlayer("./test.mp3") if user_id in self.active_users: self.logger.debug(f"User {user_id} already in room_id") return pc = RTCPeerConnection() @pc.on("datachannel") def on_datachannel(channel): @channel.on("message") async def on_message(message): if isinstance(message, str): channel.send("pong") # Do nothing, but still defining it # TODO: Try removing this and see if it still works @pc.on("connectionstatechange") async def on_connectionstatechange(): self.logger.debug( f"Connecting state changed to {pc.connectionState} for user {user_id} in room {self.room_id}" ) if pc.connectionState == "failed" or pc.connectionState == "closed": await pc.close() if user_id in self.active_users: del self.active_users[user_id] @pc.on("track") async def on_track(track): self.logger.debug( f"Got {track.kind} track from user {user_id} in room {self.room_id}" ) if track.kind == "audio": self.active_users[user_id] = [pc, track] @track.on("ended") async def on_ended(): pass # IDK what to do here for uid, value in self.active_users.items(): if value[1]: pc.addTrack(self.relay.subscribe(value[1])) await pc.setRemoteDescription( RTCSessionDescription(sdp=sdp, type="offer")) ans = await pc.createAnswer() await pc.setLocalDescription(ans) if user_id not in self.active_users: self.active_users[user_id] = [pc, None] _ans = { "type": "answer", "user_id": user_id, "room_id": self.room_id, "sdp": pc.localDescription.sdp } await self.outQ.send(json.dumps(_ans), self.room_id) async def __run_server(self): await self.outQ.init_conn() await self.listen_inq() def start_room(self): self.logger.info(f"Starting room {self.room_id}") asyncio.run(self.__run_server())
def __init__(self, enablePiCamera=False, stabilize=False, source=None, camera_num=0, stream_mode=False, backend=0, colorspace=None, resolution=(640, 480), framerate=25, logging=False, time_delay=0, **options): """ This constructor method initializes the object state and attributes of the WebGear_RTC class. Parameters: enablePiCamera (bool): provide access to PiGear(if True) or CamGear(if False) APIs respectively. stabilize (bool): enable access to Stabilizer Class for stabilizing frames. camera_num (int): selects the camera module index which will be used as Rpi source. resolution (tuple): sets the resolution (i.e. `(width,height)`) of the Rpi source. framerate (int/float): sets the framerate of the Rpi source. source (based on input): defines the source for the input stream. stream_mode (bool): controls the exclusive YouTube Mode. backend (int): selects the backend for OpenCV's VideoCapture class. colorspace (str): selects the colorspace of the input stream. logging (bool): enables/disables logging. time_delay (int): time delay (in sec) before start reading the frames. options (dict): provides ability to alter Tweak Parameters of WebGear_RTC, CamGear, PiGear & Stabilizer. """ # raise error(s) for critical Class imports import_dependency_safe("starlette" if starlette is None else "") import_dependency_safe("aiortc" if aiortc is None else "") # initialize global params self.__logging = logging custom_data_location = "" # path to save data-files to custom location data_path = "" # path to WebGear_RTC data-files overwrite_default = False self.__relay = None # act as broadcaster # reformat dictionary options = {str(k).strip(): v for k, v in options.items()} # assign values to global variables if specified and valid if options: if "custom_data_location" in options: value = options["custom_data_location"] if isinstance(value, str): assert os.access( value, os.W_OK ), "[WebGear_RTC:ERROR] :: Permission Denied!, cannot write WebGear_RTC data-files to '{}' directory!".format( value) assert os.path.isdir( os.path.abspath(value) ), "[WebGear_RTC:ERROR] :: `custom_data_location` value must be the path to a directory and not to a file!" custom_data_location = os.path.abspath(value) else: logger.warning( "Skipped invalid `custom_data_location` value!") del options["custom_data_location"] # clean if "overwrite_default_files" in options: value = options["overwrite_default_files"] if isinstance(value, bool): overwrite_default = value else: logger.warning( "Skipped invalid `overwrite_default_files` value!") del options["overwrite_default_files"] # clean if "enable_live_broadcast" in options: value = options["enable_live_broadcast"] if isinstance(value, bool): if value: self.__relay = MediaRelay() options[ "enable_infinite_frames"] = True # enforce infinite frames logger.critical( "Enabled live broadcasting for Peer connection(s)." ) else: None else: logger.warning( "Skipped invalid `enable_live_broadcast` value!") del options["enable_live_broadcast"] # clean # check if custom certificates path is specified if custom_data_location: data_path = generate_webdata( custom_data_location, c_name="webgear_rtc", overwrite_default=overwrite_default, logging=logging, ) else: # otherwise generate suitable path data_path = generate_webdata( os.path.join(expanduser("~"), ".vidgear"), c_name="webgear_rtc", overwrite_default=overwrite_default, logging=logging, ) # log it self.__logging and logger.debug( "`{}` is the default location for saving WebGear_RTC data-files.". format(data_path)) # define Jinja2 templates handler self.__templates = Jinja2Templates( directory="{}/templates".format(data_path)) # define custom exception handlers self.__exception_handlers = { 404: self.__not_found, 500: self.__server_error } # define routing tables self.routes = [ Route("/", endpoint=self.__homepage), Route("/offer", self.__offer, methods=["GET", "POST"]), Mount( "/static", app=StaticFiles(directory="{}/static".format(data_path)), name="static", ), ] # define middleware support self.middleware = [] # Handle RTC video server if source is None: self.config = {"server": None} self.__default_rtc_server = None self.__logging and logger.warning("Given source is of NoneType!") else: # Handle video source self.__default_rtc_server = RTC_VideoServer( enablePiCamera=enablePiCamera, stabilize=stabilize, source=source, camera_num=camera_num, stream_mode=stream_mode, backend=backend, colorspace=colorspace, resolution=resolution, framerate=framerate, logging=logging, time_delay=time_delay, **options) # define default frame generator in configuration self.config = {"server": self.__default_rtc_server} # add exclusive reset connection node self.routes.append( Route("/close_connection", self.__reset_connections, methods=["POST"])) # copying original routing tables for further validation self.__rt_org_copy = self.routes[:] # collects peer RTC connections self.__pcs = set()
class WebGear_RTC: """ WebGear_RTC is similar to WeGear API in many aspects but utilizes WebRTC technology under the hood instead of Motion JPEG, which makes it suitable for building powerful video-streaming solutions for all modern browsers as well as native clients available on all major platforms. WebGear_RTC is implemented with the help of aiortc library which is built on top of asynchronous I/O framework for Web Real-Time Communication (WebRTC) and Object Real-Time Communication (ORTC) and supports many features like SDP generation/parsing, Interactive Connectivity Establishment with half-trickle and mDNS support, DTLS key and certificate generation, DTLS handshake, etc. WebGear_RTC can handle multiple consumers seamlessly and provides native support for ICE (Interactive Connectivity Establishment) protocol, STUN (Session Traversal Utilities for NAT), and TURN (Traversal Using Relays around NAT) servers that help us to easily establish direct media connection with the remote peers for uninterrupted data flow. It also allows us to define our custom Server as a source to transform frames easily before sending them across the network(see this doc example). WebGear_RTC API works in conjunction with Starlette ASGI application and can also flexibly interact with Starlette's ecosystem of shared middleware, mountable applications, Response classes, Routing tables, Static Files, Templating engine(with Jinja2), etc. Additionally, WebGear_RTC API also provides internal wrapper around VideoGear, which itself provides internal access to both CamGear and PiGear APIs. """ def __init__(self, enablePiCamera=False, stabilize=False, source=None, camera_num=0, stream_mode=False, backend=0, colorspace=None, resolution=(640, 480), framerate=25, logging=False, time_delay=0, **options): """ This constructor method initializes the object state and attributes of the WebGear_RTC class. Parameters: enablePiCamera (bool): provide access to PiGear(if True) or CamGear(if False) APIs respectively. stabilize (bool): enable access to Stabilizer Class for stabilizing frames. camera_num (int): selects the camera module index which will be used as Rpi source. resolution (tuple): sets the resolution (i.e. `(width,height)`) of the Rpi source. framerate (int/float): sets the framerate of the Rpi source. source (based on input): defines the source for the input stream. stream_mode (bool): controls the exclusive YouTube Mode. backend (int): selects the backend for OpenCV's VideoCapture class. colorspace (str): selects the colorspace of the input stream. logging (bool): enables/disables logging. time_delay (int): time delay (in sec) before start reading the frames. options (dict): provides ability to alter Tweak Parameters of WebGear_RTC, CamGear, PiGear & Stabilizer. """ # raise error(s) for critical Class imports import_dependency_safe("starlette" if starlette is None else "") import_dependency_safe("aiortc" if aiortc is None else "") # initialize global params self.__logging = logging custom_data_location = "" # path to save data-files to custom location data_path = "" # path to WebGear_RTC data-files overwrite_default = False self.__relay = None # act as broadcaster # reformat dictionary options = {str(k).strip(): v for k, v in options.items()} # assign values to global variables if specified and valid if options: if "custom_data_location" in options: value = options["custom_data_location"] if isinstance(value, str): assert os.access( value, os.W_OK ), "[WebGear_RTC:ERROR] :: Permission Denied!, cannot write WebGear_RTC data-files to '{}' directory!".format( value) assert os.path.isdir( os.path.abspath(value) ), "[WebGear_RTC:ERROR] :: `custom_data_location` value must be the path to a directory and not to a file!" custom_data_location = os.path.abspath(value) else: logger.warning( "Skipped invalid `custom_data_location` value!") del options["custom_data_location"] # clean if "overwrite_default_files" in options: value = options["overwrite_default_files"] if isinstance(value, bool): overwrite_default = value else: logger.warning( "Skipped invalid `overwrite_default_files` value!") del options["overwrite_default_files"] # clean if "enable_live_broadcast" in options: value = options["enable_live_broadcast"] if isinstance(value, bool): if value: self.__relay = MediaRelay() options[ "enable_infinite_frames"] = True # enforce infinite frames logger.critical( "Enabled live broadcasting for Peer connection(s)." ) else: None else: logger.warning( "Skipped invalid `enable_live_broadcast` value!") del options["enable_live_broadcast"] # clean # check if custom certificates path is specified if custom_data_location: data_path = generate_webdata( custom_data_location, c_name="webgear_rtc", overwrite_default=overwrite_default, logging=logging, ) else: # otherwise generate suitable path data_path = generate_webdata( os.path.join(expanduser("~"), ".vidgear"), c_name="webgear_rtc", overwrite_default=overwrite_default, logging=logging, ) # log it self.__logging and logger.debug( "`{}` is the default location for saving WebGear_RTC data-files.". format(data_path)) # define Jinja2 templates handler self.__templates = Jinja2Templates( directory="{}/templates".format(data_path)) # define custom exception handlers self.__exception_handlers = { 404: self.__not_found, 500: self.__server_error } # define routing tables self.routes = [ Route("/", endpoint=self.__homepage), Route("/offer", self.__offer, methods=["GET", "POST"]), Mount( "/static", app=StaticFiles(directory="{}/static".format(data_path)), name="static", ), ] # define middleware support self.middleware = [] # Handle RTC video server if source is None: self.config = {"server": None} self.__default_rtc_server = None self.__logging and logger.warning("Given source is of NoneType!") else: # Handle video source self.__default_rtc_server = RTC_VideoServer( enablePiCamera=enablePiCamera, stabilize=stabilize, source=source, camera_num=camera_num, stream_mode=stream_mode, backend=backend, colorspace=colorspace, resolution=resolution, framerate=framerate, logging=logging, time_delay=time_delay, **options) # define default frame generator in configuration self.config = {"server": self.__default_rtc_server} # add exclusive reset connection node self.routes.append( Route("/close_connection", self.__reset_connections, methods=["POST"])) # copying original routing tables for further validation self.__rt_org_copy = self.routes[:] # collects peer RTC connections self.__pcs = set() def __call__(self): """ Implements a custom Callable method for WebGear_RTC application. """ # validate routing tables assert not (self.routes is None), "Routing tables are NoneType!" if not isinstance(self.routes, list) or not all( x in self.routes for x in self.__rt_org_copy): raise RuntimeError( "[WebGear_RTC:ERROR] :: Routing tables are not valid!") # validate middlewares assert not (self.middleware is None), "Middlewares are NoneType!" if self.middleware and ( not isinstance(self.middleware, list) or not all(isinstance(x, Middleware) for x in self.middleware)): raise RuntimeError( "[WebGear_RTC:ERROR] :: Middlewares are not valid!") # validate assigned RTC video-server in WebGear_RTC configuration if isinstance(self.config, dict) and "server" in self.config: # check if assigned RTC server class is inherit from `VideoStreamTrack` API.i if self.config["server"] is None or not issubclass( self.config["server"].__class__, VideoStreamTrack): # otherwise raise error raise ValueError( "[WebGear_RTC:ERROR] :: Invalid configuration. {}. Refer Docs for more information!" .format( "Video-Server not assigned" if self.config["server"] is None else "Assigned Video-Server class must be inherit from `aiortc.VideoStreamTrack` only" )) # check if assigned server class has `terminate` function defined and callable if not (hasattr(self.config["server"], "terminate") and callable(self.config["server"].terminate)): # otherwise raise error raise ValueError( "[WebGear_RTC:ERROR] :: Invalid configuration. Assigned Video-Server Class must have `terminate` method defined. Refer Docs for more information!" ) else: # raise error if validation fails raise RuntimeError( "[WebGear_RTC:ERROR] :: Assigned configuration is invalid!") # return Starlette application self.__logging and logger.debug("Running Starlette application.") return Starlette( debug=(True if self.__logging else False), routes=self.routes, middleware=self.middleware, exception_handlers=self.__exception_handlers, on_shutdown=[self.__on_shutdown], ) async def __offer(self, request): """ Generates JSON Response with a WebRTC Peer Connection of Video Server. """ # get offer from params params = await request.json() offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"]) # initiate stream if not (self.__default_rtc_server is None) and not (self.__default_rtc_server.is_launched): self.__logging and logger.debug("Initiating Video Streaming.") self.__default_rtc_server.launch() # setup RTC peer connection - interface represents a WebRTC connection # between the local computer and a remote peer. pc = RTCPeerConnection() self.__pcs.add(pc) self.__logging and logger.info("Created WebRTC Peer Connection.") # track ICE connection state changes @pc.on("iceconnectionstatechange") async def on_iceconnectionstatechange(): logger.debug("ICE connection state is %s" % pc.iceConnectionState) if pc.iceConnectionState == "failed": logger.error("ICE connection state failed.") # check if Live Broadcasting is enabled if self.__relay is None: # if not, close connection. await pc.close() self.__pcs.discard(pc) # Change the remote description associated with the connection. await pc.setRemoteDescription(offer) # retrieve list of RTCRtpTransceiver objects that are currently attached to the connection for t in pc.getTransceivers(): # Increments performance significantly, IDK why this works as H265 codec is not even supported :D capabilities = RTCRtpSender.getCapabilities("video") preferences = list( filter(lambda x: x.name == "H265", capabilities.codecs)) t.setCodecPreferences(preferences) # add video server to peer track if t.kind == "video": pc.addTrack( self.__relay.subscribe(self.config["server"]) if not ( self.__relay is None) else self.config["server"]) # Create an SDP answer to an offer received from a remote peer answer = await pc.createAnswer() # Change the local description for the answer await pc.setLocalDescription(answer) # return Starlette json response return JSONResponse({ "sdp": pc.localDescription.sdp, "type": pc.localDescription.type }) async def __homepage(self, request): """ Return an HTML index page. """ return self.__templates.TemplateResponse("index.html", {"request": request}) async def __not_found(self, request, exc): """ Return an HTML 404 page. """ return self.__templates.TemplateResponse("404.html", {"request": request}, status_code=404) async def __server_error(self, request, exc): """ Return an HTML 500 page. """ return self.__templates.TemplateResponse("500.html", {"request": request}, status_code=500) async def __reset_connections(self, request): """ Resets all connections and recreates VideoServer timestamps """ # get additional parameter parameter = await request.json() # check if Live Broadcasting is enabled if (self.__relay is None and not (self.__default_rtc_server is None) and (self.__default_rtc_server.is_running)): logger.critical("Resetting Server") # close old peer connections if parameter != 0: # disable if specified explicitly coros = [pc.close() for pc in self.__pcs] await asyncio.gather(*coros) self.__pcs.clear() await self.__default_rtc_server.reset() return PlainTextResponse("OK") else: # if does, then do nothing return PlainTextResponse("DISABLED") async def __on_shutdown(self): """ Implements a Callable to be run on application shutdown """ # close Video Server self.shutdown() # collects peer RTC connections coros = [pc.close() for pc in self.__pcs] await asyncio.gather(*coros) self.__pcs.clear() def shutdown(self): """ Gracefully shutdown video-server """ if not (self.config["server"] is None): self.__logging and logger.debug("Closing Video Server.") self.config["server"].terminate() self.config["server"] = None # terminate internal server aswell. self.__default_rtc_server = None
async def video_track(self): if not self.__video: self.__video = await self.__get_tracks() return MediaRelay().subscribe(self.__video)
async def _process_offer_coro( mode: WebRtcMode, pc: RTCPeerConnection, offer: RTCSessionDescription, relay: MediaRelay, source_video_track: Optional[MediaStreamTrack], source_audio_track: Optional[MediaStreamTrack], in_recorder: Optional[MediaRecorder], out_recorder: Optional[MediaRecorder], video_processor: Optional[VideoProcessorBase], audio_processor: Optional[AudioProcessorBase], video_receiver: Optional[VideoReceiver], audio_receiver: Optional[AudioReceiver], async_processing: bool, sendback_video: bool, sendback_audio: bool, on_track_created: Callable[[TrackType, MediaStreamTrack], None], ): if mode == WebRtcMode.SENDRECV: @pc.on("track") def on_track(input_track): logger.info("Track %s received", input_track.kind) if input_track.kind == "video": on_track_created("input:video", input_track) elif input_track.kind == "audio": on_track_created("input:audio", input_track) output_track = None if input_track.kind == "audio": if source_audio_track: logger.info("Set %s as an input audio track", source_audio_track) output_track = source_audio_track elif audio_processor: AudioTrack = (AsyncAudioProcessTrack if async_processing else AudioProcessTrack) logger.info( "Set %s as an input audio track with audio_processor %s", input_track, AudioTrack, ) output_track = AudioTrack( track=relay.subscribe(input_track), processor=audio_processor, ) else: output_track = input_track # passthrough elif input_track.kind == "video": if source_video_track: logger.info("Set %s as an input video track", source_video_track) output_track = source_video_track elif video_processor: VideoTrack = (AsyncVideoProcessTrack if async_processing else VideoProcessTrack) logger.info( "Set %s as an input video track with video_processor %s", input_track, VideoTrack, ) output_track = VideoTrack( track=relay.subscribe(input_track), processor=video_processor, ) else: output_track = input_track if (output_track.kind == "video" and sendback_video) or (output_track.kind == "audio" and sendback_audio): logger.info( "Add a track %s of kind %s to %s", output_track, output_track.kind, pc, ) pc.addTrack(relay.subscribe(output_track)) else: logger.info("Block a track %s of kind %s", output_track, output_track.kind) if out_recorder: logger.info("Track %s is added to out_recorder", output_track.kind) out_recorder.addTrack(relay.subscribe(output_track)) if in_recorder: logger.info("Track %s is added to in_recorder", input_track.kind) in_recorder.addTrack(relay.subscribe(input_track)) if output_track.kind == "video": on_track_created("output:video", output_track) elif output_track.kind == "audio": on_track_created("output:audio", output_track) @input_track.on("ended") async def on_ended(): logger.info("Track %s ended", input_track.kind) if in_recorder: await in_recorder.stop() if out_recorder: await out_recorder.stop() elif mode == WebRtcMode.SENDONLY: @pc.on("track") def on_track(input_track): logger.info("Track %s received", input_track.kind) if input_track.kind == "video": on_track_created("input:video", input_track) elif input_track.kind == "audio": on_track_created("input:audio", input_track) if input_track.kind == "audio": if audio_receiver: logger.info("Add a track %s to receiver %s", input_track, audio_receiver) audio_receiver.addTrack(input_track) elif input_track.kind == "video": if video_receiver: logger.info("Add a track %s to receiver %s", input_track, video_receiver) video_receiver.addTrack(input_track) if in_recorder: logger.info("Track %s is added to in_recorder", input_track.kind) in_recorder.addTrack(input_track) @input_track.on("ended") async def on_ended(): logger.info("Track %s ended", input_track.kind) if video_receiver: video_receiver.stop() if audio_receiver: audio_receiver.stop() if in_recorder: await in_recorder.stop() await pc.setRemoteDescription(offer) if mode == WebRtcMode.RECVONLY: for t in pc.getTransceivers(): output_track = None if t.kind == "audio": if source_audio_track: if audio_processor: AudioTrack = (AsyncAudioProcessTrack if async_processing else AudioProcessTrack) logger.info( "Set %s as an input audio track with audio_processor %s", source_audio_track, AudioTrack, ) output_track = AudioTrack(track=source_audio_track, processor=audio_processor) else: output_track = source_audio_track # passthrough elif t.kind == "video": if source_video_track: if video_processor: VideoTrack = (AsyncVideoProcessTrack if async_processing else VideoProcessTrack) logger.info( "Set %s as an input video track with video_processor %s", source_video_track, VideoTrack, ) output_track = VideoTrack(track=source_video_track, processor=video_processor) else: output_track = source_video_track # passthrough if output_track: logger.info("Add a track %s to %s", output_track, pc) pc.addTrack(relay.subscribe(output_track)) # NOTE: Recording is not supported in this mode # because connecting player to recorder does not work somehow; # it generates unplayable movie files. if output_track.kind == "video": on_track_created("output:video", output_track) elif output_track.kind == "audio": on_track_created("output:audio", output_track) if video_receiver and video_receiver.hasTrack(): video_receiver.start() if audio_receiver and audio_receiver.hasTrack(): audio_receiver.start() if in_recorder: await in_recorder.start() if out_recorder: await out_recorder.start() answer = await pc.createAnswer() await pc.setLocalDescription(answer) return pc.localDescription
import paho.mqtt.client as mqtt from aiohttp import web from aiohttp import ClientSession from av import VideoFrame from aiortc import MediaStreamTrack, RTCPeerConnection, RTCSessionDescription, RTCRtpSender from aiortc.contrib.media import MediaBlackhole, MediaPlayer, MediaRecorder, MediaRelay ROOT = os.path.dirname(__file__) #logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger("pc") pcs = set() relay = MediaRelay() broadcast = None ### Publisher def on_connect(client, userdata, flags, rc): print(f"Connected with result code {rc}") client.subscribe("Number of connections") def create_broadcast(track): global broadcast broadcast = track def broadcast_ended():
format='lavfi', options=audio_options).audio if platform.system() == "Windows": video_track = MediaPlayer("video=HP TrueVision HD Camera", format="dshow", options=video_options).video else: video_track = MediaPlayer("/dev/video0", format="v4l2", options=video_options).video return audio_track, video_track if __name__ == "__main__": if len(sys.argv) > 1: trans_key = sys.argv[1] else: print('The following argument is required: translation-key') sys.exit() relay = MediaRelay() recorder = MediaRecorder(f'rtmp://a.rtmp.youtube.com/live2/{trans_key}', format='flv') audio, video = get_tracks() recorder.addTrack(audio) recorder.addTrack(relay.subscribe(video)) loop = asyncio.get_event_loop() loop.create_task(recorder.start()) loop.run_forever()