async def offer(request): params = await request.json() offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"]) pc = RTCPeerConnection() pcs.add(pc) @pc.on("datachannel") def on_datachannel(channel): @channel.on("message") def on_message(message): channel.send("pong") @pc.on("iceconnectionstatechange") async def on_iceconnectionstatechange(): print("ICE connection state is %s" % pc.iceConnectionState) if pc.iceConnectionState == "failed": await pc.close() cameravideo = VideoSender() pc.addTrack(cameravideo) # handle offer await pc.setRemoteDescription(offer) # send answer answer = await pc.createAnswer() await pc.setLocalDescription(answer) return web.Response( content_type="application/json", text=json.dumps( {"sdp": pc.localDescription.sdp, "type": pc.localDescription.type} ), )
def test_connect(self): pc1 = RTCPeerConnection() pc1_states = track_states(pc1) pc2 = RTCPeerConnection() pc2_states = track_states(pc2) self.assertEqual(pc1.iceConnectionState, 'new') self.assertEqual(pc1.iceGatheringState, 'new') self.assertIsNone(pc1.localDescription) self.assertIsNone(pc1.remoteDescription) self.assertEqual(pc2.iceConnectionState, 'new') self.assertEqual(pc2.iceGatheringState, 'new') self.assertIsNone(pc2.localDescription) self.assertIsNone(pc2.remoteDescription) # create offer pc1.addTrack(AudioStreamTrack()) offer = run(pc1.createOffer()) self.assertEqual(offer.type, 'offer') self.assertTrue('m=audio ' in offer.sdp) self.assertFalse('a=candidate:' in offer.sdp) run(pc1.setLocalDescription(offer)) self.assertEqual(pc1.iceConnectionState, 'new') self.assertEqual(pc1.iceGatheringState, 'complete') self.assertTrue('m=audio ' in pc1.localDescription.sdp) self.assertTrue('a=candidate:' in pc1.localDescription.sdp) self.assertTrue('a=sendrecv' in pc1.localDescription.sdp) self.assertTrue('a=fingerprint:sha-256' in pc1.localDescription.sdp) self.assertTrue('a=setup:actpass' in pc1.localDescription.sdp) self.assertTrue('a=mid:audio' in pc1.localDescription.sdp) # handle offer run(pc2.setRemoteDescription(pc1.localDescription)) self.assertEqual(pc2.remoteDescription, pc1.localDescription) self.assertEqual(len(pc2.getReceivers()), 1) self.assertEqual(len(pc2.getSenders()), 1) self.assertEqual(len(pc2.getTransceivers()), 1) # create answer answer = run(pc2.createAnswer()) self.assertEqual(answer.type, 'answer') self.assertTrue('m=audio ' in answer.sdp) self.assertFalse('a=candidate:' in answer.sdp) run(pc2.setLocalDescription(answer)) self.assertEqual(pc2.iceConnectionState, 'checking') self.assertEqual(pc2.iceGatheringState, 'complete') self.assertTrue('m=audio ' in pc2.localDescription.sdp) self.assertTrue('a=candidate:' in pc2.localDescription.sdp) self.assertTrue('a=recvonly' in pc2.localDescription.sdp) self.assertTrue('a=fingerprint:sha-256' in pc2.localDescription.sdp) self.assertTrue('a=setup:active' in pc2.localDescription.sdp) self.assertTrue('a=mid:audio' in pc2.localDescription.sdp) # handle answer run(pc1.setRemoteDescription(pc2.localDescription)) self.assertEqual(pc1.remoteDescription, pc2.localDescription) self.assertEqual(pc1.iceConnectionState, 'checking') # check outcome run(asyncio.sleep(1)) self.assertEqual(pc1.iceConnectionState, 'completed') self.assertEqual(pc2.iceConnectionState, 'completed') # close run(pc1.close()) run(pc2.close()) self.assertEqual(pc1.iceConnectionState, 'closed') self.assertEqual(pc2.iceConnectionState, 'closed') # check state changes self.assertEqual(pc1_states['iceConnectionState'], ['new', 'checking', 'completed', 'closed']) self.assertEqual(pc1_states['iceGatheringState'], ['new', 'gathering', 'complete']) self.assertEqual(pc1_states['signalingState'], ['stable', 'have-local-offer', 'stable', 'closed']) self.assertEqual(pc2_states['iceConnectionState'], ['new', 'checking', 'completed', 'closed']) self.assertEqual(pc2_states['iceGatheringState'], ['new', 'gathering', 'complete']) self.assertEqual(pc2_states['signalingState'], ['stable', 'have-remote-offer', 'stable', 'closed'])
def test_addTrack_closed(self): pc = RTCPeerConnection() run(pc.close()) with self.assertRaises(InvalidStateError) as cm: pc.addTrack(AudioStreamTrack()) self.assertEqual(str(cm.exception), 'RTCPeerConnection is closed')
async def offer(request): params = await request.json() print(params) offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"]) pc = RTCPeerConnection() client_id = client_manager.create_new_client(pc) # data = [True] #recog_worker_thread = threading.Thread(target=recog_worker,args=(client_manager.get_client(client_id),data,)) #recog_worker_thread.start() @pc.on("iceconnectionstatechange") async def on_iceconnectionstatechange(): print("ICE connection state is %s" % pc.iceConnectionState) if pc.iceConnectionState == "failed": await client_manager.remove_client( client_id) #this already handles pc closing faceregtrack = FacialRecognitionTrack(face_detect, client_manager.get_client(client_id)) @pc.on("datachannel") def on_datachannel(channel): @channel.on("message") def on_message(message): client = client_manager.get_client(client_id) if "$register$" in message: #this is some poorman message handling :) new_subject = message.split("$register$")[1] print("Registering for subject: " + new_subject) client.toggle_register_mode(new_subject) channel.send("$register$") #ack back elif "$recognize$" in message: print("Turned on recognition mode") client.toggle_recognition_mode() channel.send("$recognize$") #ack back @pc.on("close") async def on_close(track): print("Connection with client: " + str(client_id) + " closed") #data[0] = False #recog_worker_thread.join() await client_manager.remove_client(client_id) @pc.on("track") def on_track(track): print("Track %s received" % track.kind) if track.kind == "video": faceregtrack.update(track) await pc.setRemoteDescription(offer) for t in pc.getTransceivers(): if t.kind == "video": pc.addTrack(faceregtrack) answer = await pc.createAnswer() await pc.setLocalDescription(answer) print("Connection with client formed") return web.Response( content_type="application/json", text=json.dumps({ "sdp": pc.localDescription.sdp, "type": pc.localDescription.type }), )
async def _process_offer( mode: WebRtcMode, pc: RTCPeerConnection, offer: RTCSessionDescription, player_factory: Optional[MediaPlayerFactory], in_recorder_factory: Optional[MediaRecorderFactory], out_recorder_factory: Optional[MediaRecorderFactory], video_transformer: Optional[VideoTransformerBase], video_receiver: Optional[VideoReceiver], async_transform: bool, callback: Callable[[Union[RTCSessionDescription, Exception]], None], ): try: player = None if player_factory: player = player_factory() in_recorder = None if in_recorder_factory: in_recorder = in_recorder_factory() out_recorder = None if out_recorder_factory: out_recorder = out_recorder_factory() @pc.on("iceconnectionstatechange") async def on_iceconnectionstatechange(): logger.info("ICE connection state is %s", pc.iceConnectionState) if pc.iceConnectionState == "failed": await pc.close() if mode == WebRtcMode.SENDRECV: @pc.on("track") def on_track(input_track): logger.info("Track %s received", input_track.kind) output_track = None if input_track.kind == "audio": if player and player.audio: logger.info("Add player to audio track") output_track = player.audio else: # Transforming audio is not supported yet. output_track = input_track # passthrough elif input_track.kind == "video": if player and player.video: logger.info("Add player to video track") output_track = player.video elif video_transformer: VideoTrack = (AsyncVideoTransformTrack if async_transform else VideoTransformTrack) logger.info( "Add a input video track %s to " "output track with video_transformer %s", input_track, VideoTrack, ) local_video = VideoTrack( track=input_track, video_transformer=video_transformer) logger.info( "Add the video track with transfomer to %s", pc) output_track = local_video else: output_track = input_track if not output_track: raise Exception( "Neither a player nor a transformer is created. " "Either factory must be set.") pc.addTrack(output_track) if out_recorder: logger.info("Track %s is added to out_recorder", output_track.kind) out_recorder.addTrack(output_track) if in_recorder: logger.info("Track %s is added to in_recorder", input_track.kind) in_recorder.addTrack(input_track) @input_track.on("ended") async def on_ended(): logger.info("Track %s ended", input_track.kind) if in_recorder: await in_recorder.stop() if out_recorder: await out_recorder.stop() elif mode == WebRtcMode.SENDONLY: @pc.on("track") def on_track(input_track): logger.info("Track %s received", input_track.kind) if input_track.kind == "audio": # Not supported yet pass elif input_track.kind == "video": if video_receiver: logger.info("Add a track %s to receiver %s", input_track, video_receiver) video_receiver.addTrack(input_track) if in_recorder: logger.info("Track %s is added to in_recorder", input_track.kind) in_recorder.addTrack(input_track) @input_track.on("ended") async def on_ended(): logger.info("Track %s ended", input_track.kind) if video_receiver: video_receiver.stop() if in_recorder: await in_recorder.stop() await pc.setRemoteDescription(offer) if mode == WebRtcMode.RECVONLY: for t in pc.getTransceivers(): output_track = None if t.kind == "audio": if player and player.audio: output_track = player.audio # pc.addTrack(player.audio) elif t.kind == "video": if player and player.video: # pc.addTrack(player.video) output_track = player.video if output_track: pc.addTrack(output_track) # NOTE: Recording is not supported in this mode # because connecting player to recorder does not work somehow; # it generates unplayable movie files. if video_receiver and video_receiver.hasTrack(): video_receiver.start() if in_recorder: await in_recorder.start() if out_recorder: await out_recorder.start() answer = await pc.createAnswer() await pc.setLocalDescription(answer) callback(pc.localDescription) except Exception as e: logger.debug("Error occurred in process_offer") logger.debug(e) callback(e)
import aiohttp import asyncio import json from aiortc import (RTCPeerConnection, RTCSessionDescription, RTCIceCandidate, VideoStreamTrack) from aiortc.contrib.media import MediaRecorder from aiortc.contrib.signaling import object_from_string, object_to_string WEBSOCKET_URI = 'ws://localhost:8080/stream/webrtc' pc = RTCPeerConnection() # prefare media pc.addTrack(VideoStreamTrack) async def websocket_coroutine(): session = aiohttp.ClientSession() async with session.ws_connect(WEBSOCKET_URI) as ws: print("websocket connected") request = json.dumps({ "what": "call" }) await ws.send_str(request) async for msg in ws: print(msg) if msg.type == aiohttp.WSMsgType.TEXT: params = json.loads(msg.data) print(params) if params["what"] == "offer": print("offer received")