def test_audio_ended(self): track = AudioStreamTrack() recorder = MediaBlackhole() recorder.addTrack(track) run(recorder.start()) run(asyncio.sleep(1)) track.stop() run(asyncio.sleep(1)) run(recorder.stop())
def test_audio_wav_ended(self): track = AudioStreamTrack() recorder = MediaRecorder(self.temporary_path('test.wav')) recorder.addTrack(track) run(recorder.start()) run(asyncio.sleep(1)) track.stop() run(asyncio.sleep(1)) run(recorder.stop())
def test_audio_and_video(self): recorder = MediaBlackhole() recorder.addTrack(AudioStreamTrack()) recorder.addTrack(VideoStreamTrack()) run(recorder.start()) run(asyncio.sleep(2)) run(recorder.stop())
def test_audio_and_video(self): recorder = MediaRecorder(self.temporary_path('test.mp4')) recorder.addTrack(AudioStreamTrack()) recorder.addTrack(VideoStreamTrack()) run(recorder.start()) run(asyncio.sleep(2)) run(recorder.stop())
def test_audio_and_video(self): recorder = MediaRecorder(path='foo.mp4') recorder.addTrack(AudioStreamTrack()) recorder.addTrack(VideoStreamTrack()) recorder.start() run(asyncio.sleep(2)) recorder.stop()
def test_audio_remove_track(self): recorder = MediaBlackhole() track = AudioStreamTrack() recorder.addTrack(track) recorder.start() run(asyncio.sleep(1)) recorder.removeTrack(track) run(asyncio.sleep(1)) recorder.stop()
def add_tracks(): if player and player.audio: pc.addTrack(player.audio) else: pc.addTrack(AudioStreamTrack()) if player and player.video: pc.addTrack(player.video) else: pc.addTrack(VideoImageTrack())
async def join_room(room): # fetch room parameters async with aiohttp.ClientSession() as session: async with session.post('https://appr.tc/join/' + room) as response: # we cannot use response.json() due to: # https://github.com/webrtc/apprtc/issues/562 data = json.loads(await response.text()) assert data['result'] == 'SUCCESS' params = data['params'] # create peer conection pc = RTCPeerConnection() pc.addTrack(AudioStreamTrack()) pc.addTrack(VideoImageTrack()) @pc.on('track') def on_track(track): print('Track %s received' % track.kind) if track.kind == 'audio': task = asyncio.ensure_future(consume_audio(track)) elif track.kind == 'video': task = asyncio.ensure_future(consume_video(track)) @track.on('ended') def on_ended(): print('Track %s ended' % track.kind) task.cancel() # connect to websocket and join signaling = Signaling() await signaling.connect(params) await signaling.send({ 'clientid': params['client_id'], 'cmd': 'register', 'roomid': params['room_id'], }) if params['is_initiator'] == 'true': # send offer await pc.setLocalDescription(await pc.createOffer()) await signaling.send_message(description_to_dict(pc.localDescription)) print('Please point a browser at %s' % params['room_link']) # receive 60s of media try: await asyncio.wait_for(consume_signaling(signaling, pc, params), timeout=60) except asyncio.TimeoutError: pass # shutdown print('Shutting down') await signaling.send_message({'type': 'bye'}) await pc.close()
async def join_room(room): consumers = [] # fetch room parameters async with aiohttp.ClientSession() as session: async with session.post('https://appr.tc/join/' + room) as response: # we cannot use response.json() due to: # https://github.com/webrtc/apprtc/issues/562 data = json.loads(await response.text()) assert data['result'] == 'SUCCESS' params = data['params'] # create peer conection pc = RTCPeerConnection() pc.addTrack(AudioStreamTrack()) pc.addTrack(VideoStreamTrack()) @pc.on('track') def on_track(track): if track.kind == 'audio': consumers.append(asyncio.ensure_future(consume_audio(track))) elif track.kind == 'video': consumers.append(asyncio.ensure_future(consume_video(track))) # connect to websocket and join signaling = Signaling() await signaling.connect(params) await signaling.send({ 'clientid': params['client_id'], 'cmd': 'register', 'roomid': params['room_id'], }) if params['is_initiator'] == 'true': # send offer await pc.setLocalDescription(await pc.createOffer()) await signaling.send_description(pc.localDescription) print('Please point a browser at %s' % params['room_link']) asyncio.ensure_future(consume_signaling(signaling, pc, params)) # receive 60s of media await asyncio.sleep(60) # shutdown print('Shutting down') for c in consumers: c.cancel() await pc.close()
def test_audio_wav(self): path = self.temporary_path('test.wav') recorder = MediaRecorder(path) recorder.addTrack(AudioStreamTrack()) run(recorder.start()) run(asyncio.sleep(2)) run(recorder.stop()) # check output media container = av.open(path, 'r') self.assertEqual(len(container.streams), 1) self.assertEqual(container.streams[0].codec.name, 'pcm_s16le') self.assertGreater( float(container.streams[0].duration * container.streams[0].time_base), 0)
def test_audio_mp3(self): path = self.temporary_path("test.mp3") recorder = MediaRecorder(path) recorder.addTrack(AudioStreamTrack()) run(recorder.start()) run(asyncio.sleep(2)) run(recorder.stop()) # check output media container = av.open(path, "r") self.assertEqual(len(container.streams), 1) self.assertIn(container.streams[0].codec.name, ("mp3", "mp3float")) self.assertGreater( float(container.streams[0].duration * container.streams[0].time_base), 0)
def test_audio_and_video(self): path = self.temporary_path('test.mp4') recorder = MediaRecorder(path) recorder.addTrack(AudioStreamTrack()) recorder.addTrack(VideoStreamTrack()) run(recorder.start()) run(asyncio.sleep(2)) run(recorder.stop()) # check output media container = av.open(path, 'r') self.assertEqual(len(container.streams), 2) self.assertEqual(container.streams[0].codec.name, 'aac') self.assertGreater( float(container.streams[0].duration * container.streams[0].time_base), 0) self.assertEqual(container.streams[1].codec.name, 'h264') self.assertEqual(container.streams[1].width, 640) self.assertEqual(container.streams[1].height, 480) self.assertGreater( float(container.streams[1].duration * container.streams[1].time_base), 0)
def test_audio(self): track = AudioStreamTrack() self.assertEqual(track.kind, "audio") self.assertEqual(len(track.id), 36)
def test_audio_wav(self): recorder = MediaRecorder(self.temporary_path('test.wav')) recorder.addTrack(AudioStreamTrack()) run(recorder.start()) run(asyncio.sleep(2)) run(recorder.stop())
async def join_room(room, play_from, record_to): # fetch room parameters async with aiohttp.ClientSession() as session: async with session.post('https://appr.tc/join/' + room) as response: # we cannot use response.json() due to: # https://github.com/webrtc/apprtc/issues/562 data = json.loads(await response.text()) assert data['result'] == 'SUCCESS' params = data['params'] # create peer conection pc = RTCPeerConnection() # setup media source if play_from: player = MediaPlayer(play_from) else: player = None if player and player.audio: pc.addTrack(player.audio) else: pc.addTrack(AudioStreamTrack()) if player and player.video: pc.addTrack(player.video) else: pc.addTrack(VideoImageTrack()) # setup media sink if record_to: recorder = MediaRecorder(record_to) else: recorder = MediaBlackhole() @pc.on('track') def on_track(track): print('Track %s received' % track.kind) recorder.addTrack(track) def on_ended(): print('Track %s ended' % track.kind) # connect to websocket and join signaling = Signaling() await signaling.connect(params) await signaling.send({ 'clientid': params['client_id'], 'cmd': 'register', 'roomid': params['room_id'], }) if params['is_initiator'] == 'true': # send offer await pc.setLocalDescription(await pc.createOffer()) await signaling.send_message(description_to_dict(pc.localDescription)) print('Please point a browser at %s' % params['room_link']) def start_media(): if player: player.start() recorder.start() # receive 60s of media try: await asyncio.wait_for(consume_signaling(signaling, pc, params, start_media), timeout=60) except asyncio.TimeoutError: pass # shutdown print('Shutting down') if player: player.stop() recorder.stop() await signaling.send_message({'type': 'bye'}) await pc.close()
def test_audio_wav(self): recorder = MediaRecorder(path='foo.wav') recorder.addTrack(AudioStreamTrack()) recorder.start() run(asyncio.sleep(2)) recorder.stop()