def create_local_tracks(play_from, owner = False, camFilter = False): global relay, webcam global relayFilter, webcamFilter if play_from: player = MediaPlayer(play_from) return player.audio, player.video else: # options = {"framerate": "30", "video_size": "640x480"} if not owner: if relay is None: webcam = MediaPlayer("/dev/video0", format="v4l2") # webcam = MediaPlayer("/dev/video0", format="v4l2", options=options) # webcam = MediaPlayer("/dev/video6", format="v4l2", options=options) relay = MediaRelay() relayTmp = relay.subscribe(webcam.video) else: if relayFilter is None: webcamFilter = MediaPlayer("/dev/video7", format="v4l2") # webcamFilter = MediaPlayer("/dev/video7", format="v4l2", options=options) relayFilter = MediaRelay() relayTmp = relayFilter.subscribe(webcamFilter.video) if camFilter: proc2.send_signal(40) else: proc2.send_signal(41) return None, relayTmp
def test_audio_stop_source(self): source = AudioStreamTrack() relay = MediaRelay() proxy1 = relay.subscribe(source) proxy2 = relay.subscribe(source) # read some frames samples_per_frame = 160 for pts in range(0, 2 * samples_per_frame, samples_per_frame): frame1, frame2 = run(asyncio.gather(proxy1.recv(), proxy2.recv())) self.assertEqual(frame1.format.name, "s16") self.assertEqual(frame1.layout.name, "mono") self.assertEqual(frame1.pts, pts) self.assertEqual(frame1.samples, samples_per_frame) self.assertEqual(frame2.format.name, "s16") self.assertEqual(frame2.layout.name, "mono") self.assertEqual(frame2.pts, pts) self.assertEqual(frame2.samples, samples_per_frame) # stop source track source.stop() # continue reading run(asyncio.gather(proxy1.recv(), proxy2.recv())) for i in range(2): exc1, exc2 = run( asyncio.gather(proxy1.recv(), proxy2.recv(), return_exceptions=True) ) self.assertTrue(isinstance(exc1, MediaStreamError)) self.assertTrue(isinstance(exc2, MediaStreamError))
def get_global_relay() -> MediaRelay: server = Server.get_current() if hasattr(server, _SERVER_GLOBAL_RELAY_ATTR_NAME_): return getattr(server, _SERVER_GLOBAL_RELAY_ATTR_NAME_) else: loop = get_server_event_loop() with loop_context(loop): relay = MediaRelay() setattr(server, _SERVER_GLOBAL_RELAY_ATTR_NAME_, relay) return relay
def __init__(self, room_id: int, q: Connection): self.room_id = room_id self.relay = MediaRelay() self.active_users: Dict[int, Tuple[RTCPeerConnection, MediaStreamTrack]] = dict() # outQ is listened by RootServer self.outQ = Producer(TMQ_HOST, TMQ_PORT, TOPIC_TO_ROOT_SERVER) self.inQ: Connection = q self.logger = logging.getLogger(f"LiveRoom_{self.room_id}") self.logger.setLevel(logging.DEBUG) self.blackhole = MediaBlackhole()
async def on_track(track): if track.kind == "audio": self.recorder.addTrack(track) elif track.kind == "video": self.__video = track self.recorder.addTrack( FixedPtsTrack(MediaRelay().subscribe(track))) logger.info(f"Track {track.kind} added") @track.on("ended") async def on_ended(): await self.recorder.stop() logger.info(f"Track {track.kind} ended")
async def test_audio_slow_consumer(self): source = AudioStreamTrack() relay = MediaRelay() proxy1 = relay.subscribe(source, buffered=False) proxy2 = relay.subscribe(source, buffered=False) # read some frames samples_per_frame = 160 for pts in range(0, 2 * samples_per_frame, samples_per_frame): frame1, frame2 = await asyncio.gather(proxy1.recv(), proxy2.recv()) self.assertEqual(frame1.format.name, "s16") self.assertEqual(frame1.layout.name, "mono") self.assertEqual(frame1.pts, pts) self.assertEqual(frame1.samples, samples_per_frame) self.assertEqual(frame2.format.name, "s16") self.assertEqual(frame2.layout.name, "mono") self.assertEqual(frame2.pts, pts) self.assertEqual(frame2.samples, samples_per_frame) # skip some frames timestamp = 5 * samples_per_frame await asyncio.sleep(source._start + (timestamp / 8000) - time.time()) frame1, frame2 = await asyncio.gather(proxy1.recv(), proxy2.recv()) self.assertEqual(frame1.format.name, "s16") self.assertEqual(frame1.layout.name, "mono") self.assertEqual(frame1.pts, 5 * samples_per_frame) self.assertEqual(frame1.samples, samples_per_frame) self.assertEqual(frame2.format.name, "s16") self.assertEqual(frame2.layout.name, "mono") self.assertEqual(frame2.pts, 5 * samples_per_frame) self.assertEqual(frame2.samples, samples_per_frame) # stop a consumer proxy1.stop() # continue reading for i in range(2): exc1, frame2 = await asyncio.gather(proxy1.recv(), proxy2.recv(), return_exceptions=True) self.assertTrue(isinstance(exc1, MediaStreamError)) self.assertTrue(isinstance(frame2, av.AudioFrame)) # stop source track source.stop()
async def connect(self, host, port, turn=None): ice_servers = [RTCIceServer('stun:stun.l.google.com:19302')] if turn: ice_servers.append(turn) config = RTCConfiguration(ice_servers) self.pc = RTCPeerConnection(config) if not self.__video: self.__video = await self.__get_tracks() self.pc.addTrack(MediaRelay().subscribe(self.__video)) offer = await self.pc.createOffer() await self.pc.setLocalDescription(offer) self.signaling = WebSocketClient(host, port) async def send_offer(): logger.debug(f"Ice Gathering State: {self.pc.iceGatheringState}") if self.pc.iceGatheringState == 'complete': logger.debug("Offer sent") await self.signaling.send_data({ "sdp": self.pc.localDescription.sdp, "type": self.pc.localDescription.type }) else: self.pc.once("icegatheringstatechange", send_offer) @self.signaling.on_connected async def on_connected(_): await send_offer() @self.signaling.on_message async def on_message(message): logger.debug(f"{message.get('type')} received") if message.get("type") == "answer": answer = RTCSessionDescription(sdp=message["sdp"], type=message["type"]) await self.pc.setRemoteDescription(answer) @self.pc.on("connectionstatechange") async def on_connectionstatechange(): if self.pc.connectionState == "failed": await self.pc.close() logger.info(f"Connection state: {self.pc.connectionState}")
async def offer(params: Offer): offer = RTCSessionDescription(sdp=params.sdp, type=params.type) pc = RTCPeerConnection() pcs.add(pc) recorder = MediaBlackhole() relay = MediaRelay() @pc.on("connectionstatechange") async def on_connectionstatechange(): print("Connection state is %s" % pc.connectionState) if pc.connectionState == "failed": await pc.close() pcs.discard(pc) # open media source # audio, video = create_local_tracks() @pc.on("track") def on_track(track): # if track.kind == "audio": # pc.addTrack(player.audio) # recorder.addTrack(track) if track.kind == "video": pc.addTrack( VideoTransformTrack(relay.subscribe(track), transform=params.video_transform)) # if args.record_to: # recorder.addTrack(relay.subscribe(track)) @track.on("ended") async def on_ended(): await recorder.stop() # handle offer await pc.setRemoteDescription(offer) await recorder.start() # send answer answer = await pc.createAnswer() await pc.setRemoteDescription(offer) await pc.setLocalDescription(answer) return {"sdp": pc.localDescription.sdp, "type": pc.localDescription.type}
def create_local_tracks(play_from): global relay, webcam if play_from: #player = MediaPlayer(play_from) return FlagVideoStreamTrack() else: options = {"framerate": "30", "video_size": "640x480"} if relay is None: if platform.system() == "Darwin": webcam = MediaPlayer( "default:none", format="avfoundation", options=options ) elif platform.system() == "Windows": webcam = MediaPlayer( "video=Integrated Camera", format="dshow", options=options ) else: webcam = MediaPlayer("/dev/video0", format="v4l2", options=options) relay = MediaRelay() return None, relay.subscribe(webcam.video)
def _test(): # Mock functions that depend on Streamlit global server object global get_global_relay, get_server_event_loop loop = asyncio.get_event_loop() def get_server_event_loop_mock(): return loop get_server_event_loop = get_server_event_loop_mock fake_global_relay = MediaRelay() def get_global_relay_mock(): return fake_global_relay get_global_relay = get_global_relay_mock # Start the test client = RTCPeerConnection() client.createDataChannel("test") offer = loop.run_until_complete(client.createOffer()) logger.debug("Offer for mock testing: %s", offer) def test_thread_fn(): webrtc_worker = WebRtcWorker(mode=WebRtcMode.SENDRECV) localDescription = webrtc_worker.process_offer(offer.sdp, offer.type) logger.debug("localDescription:") logger.debug(localDescription) webrtc_worker.stop() test_thread = threading.Thread(target=test_thread_fn) test_thread.start() # HACK for _ in range(100): loop.run_until_complete(asyncio.sleep(0.01))
def create_local_tracks(play_from=None): if play_from: player = MediaPlayer(play_from) return player.audio, player.video else: options = {"framerate": "30", "video_size": "1920x1080"} # if relay is None: # if platform.system() == "Darwin": # webcam = MediaPlayer( # "default:none", format="avfoundation", options=options # ) # elif platform.system() == "Windows": # webcam = MediaPlayer("video.mp4") webcam = MediaPlayer("video=FULL HD 1080P Webcam", format="dshow", options=options) # else: # webcam = MediaPlayer("/dev/video0", format="v4l2", options=options) # audio, video = VideoTransformTrack(webcam.video, transform="cv") relay = MediaRelay() return None, relay.subscribe(webcam.video)
async def offer(request, **kwargs): video = kwargs['video'] if ("broadcast" in kwargs and kwargs["broadcast"]): video = MediaRelay().subscribe(video) params = await request.json() offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"]) pc = RTCPeerConnection() pcs.add(pc) @pc.on("connectionstatechange") async def on_connectionstatechange(): print("Connection state is %s" % pc.connectionState) if pc.connectionState == "failed": await pc.close() pcs.discard(pc) # open media source audio = None await pc.setRemoteDescription(offer) for t in pc.getTransceivers(): if t.kind == "audio" and audio: pc.addTrack(audio) elif t.kind == "video" and video: pc.addTrack(video) answer = await pc.createAnswer() await pc.setLocalDescription(answer) return web.Response( content_type="application/json", text=json.dumps({ "sdp": pc.localDescription.sdp, "type": pc.localDescription.type }), )
import paho.mqtt.client as mqtt from aiohttp import web from aiohttp import ClientSession from av import VideoFrame from aiortc import MediaStreamTrack, RTCPeerConnection, RTCSessionDescription, RTCRtpSender from aiortc.contrib.media import MediaBlackhole, MediaPlayer, MediaRecorder, MediaRelay ROOT = os.path.dirname(__file__) #logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger("pc") pcs = set() relay = MediaRelay() broadcast = None ### Publisher def on_connect(client, userdata, flags, rc): print(f"Connected with result code {rc}") client.subscribe("Number of connections") def create_broadcast(track): global broadcast broadcast = track def broadcast_ended():
async def video_track(self): if self.__video: return MediaRelay().subscribe(self.__video) else: return None
def __init__(self, enablePiCamera=False, stabilize=False, source=None, camera_num=0, stream_mode=False, backend=0, colorspace=None, resolution=(640, 480), framerate=25, logging=False, time_delay=0, **options): """ This constructor method initializes the object state and attributes of the WebGear_RTC class. Parameters: enablePiCamera (bool): provide access to PiGear(if True) or CamGear(if False) APIs respectively. stabilize (bool): enable access to Stabilizer Class for stabilizing frames. camera_num (int): selects the camera module index which will be used as Rpi source. resolution (tuple): sets the resolution (i.e. `(width,height)`) of the Rpi source. framerate (int/float): sets the framerate of the Rpi source. source (based on input): defines the source for the input stream. stream_mode (bool): controls the exclusive YouTube Mode. backend (int): selects the backend for OpenCV's VideoCapture class. colorspace (str): selects the colorspace of the input stream. logging (bool): enables/disables logging. time_delay (int): time delay (in sec) before start reading the frames. options (dict): provides ability to alter Tweak Parameters of WebGear_RTC, CamGear, PiGear & Stabilizer. """ # raise error(s) for critical Class imports import_dependency_safe("starlette" if starlette is None else "") import_dependency_safe("aiortc" if aiortc is None else "") # initialize global params self.__logging = logging custom_data_location = "" # path to save data-files to custom location data_path = "" # path to WebGear_RTC data-files overwrite_default = False self.__relay = None # act as broadcaster # reformat dictionary options = {str(k).strip(): v for k, v in options.items()} # assign values to global variables if specified and valid if options: if "custom_data_location" in options: value = options["custom_data_location"] if isinstance(value, str): assert os.access( value, os.W_OK ), "[WebGear_RTC:ERROR] :: Permission Denied!, cannot write WebGear_RTC data-files to '{}' directory!".format( value) assert os.path.isdir( os.path.abspath(value) ), "[WebGear_RTC:ERROR] :: `custom_data_location` value must be the path to a directory and not to a file!" custom_data_location = os.path.abspath(value) else: logger.warning( "Skipped invalid `custom_data_location` value!") del options["custom_data_location"] # clean if "overwrite_default_files" in options: value = options["overwrite_default_files"] if isinstance(value, bool): overwrite_default = value else: logger.warning( "Skipped invalid `overwrite_default_files` value!") del options["overwrite_default_files"] # clean if "enable_live_broadcast" in options: value = options["enable_live_broadcast"] if isinstance(value, bool): if value: self.__relay = MediaRelay() options[ "enable_infinite_frames"] = True # enforce infinite frames logger.critical( "Enabled live broadcasting for Peer connection(s)." ) else: None else: logger.warning( "Skipped invalid `enable_live_broadcast` value!") del options["enable_live_broadcast"] # clean # check if custom certificates path is specified if custom_data_location: data_path = generate_webdata( custom_data_location, c_name="webgear_rtc", overwrite_default=overwrite_default, logging=logging, ) else: # otherwise generate suitable path data_path = generate_webdata( os.path.join(expanduser("~"), ".vidgear"), c_name="webgear_rtc", overwrite_default=overwrite_default, logging=logging, ) # log it self.__logging and logger.debug( "`{}` is the default location for saving WebGear_RTC data-files.". format(data_path)) # define Jinja2 templates handler self.__templates = Jinja2Templates( directory="{}/templates".format(data_path)) # define custom exception handlers self.__exception_handlers = { 404: self.__not_found, 500: self.__server_error } # define routing tables self.routes = [ Route("/", endpoint=self.__homepage), Route("/offer", self.__offer, methods=["GET", "POST"]), Mount( "/static", app=StaticFiles(directory="{}/static".format(data_path)), name="static", ), ] # define middleware support self.middleware = [] # Handle RTC video server if source is None: self.config = {"server": None} self.__default_rtc_server = None self.__logging and logger.warning("Given source is of NoneType!") else: # Handle video source self.__default_rtc_server = RTC_VideoServer( enablePiCamera=enablePiCamera, stabilize=stabilize, source=source, camera_num=camera_num, stream_mode=stream_mode, backend=backend, colorspace=colorspace, resolution=resolution, framerate=framerate, logging=logging, time_delay=time_delay, **options) # define default frame generator in configuration self.config = {"server": self.__default_rtc_server} # add exclusive reset connection node self.routes.append( Route("/close_connection", self.__reset_connections, methods=["POST"])) # copying original routing tables for further validation self.__rt_org_copy = self.routes[:] # collects peer RTC connections self.__pcs = set()
async def video_track(self): if not self.__video: self.__video = await self.__get_tracks() return MediaRelay().subscribe(self.__video)