def test_audio_and_video(self): recorder = MediaRecorder(path='foo.mp4') recorder.addTrack(AudioStreamTrack()) recorder.addTrack(VideoStreamTrack()) recorder.start() run(asyncio.sleep(2)) recorder.stop()
def test_audio_and_video(self): recorder = MediaRecorder(self.temporary_path('test.mp4')) recorder.addTrack(AudioStreamTrack()) recorder.addTrack(VideoStreamTrack()) run(recorder.start()) run(asyncio.sleep(2)) run(recorder.stop())
def test_audio_wav_ended(self): track = AudioStreamTrack() recorder = MediaRecorder(self.temporary_path("test.wav")) recorder.addTrack(track) run(recorder.start()) run(asyncio.sleep(1)) track.stop() run(asyncio.sleep(1)) run(recorder.stop())
def test_audio_wav(self): path = self.temporary_path("test.wav") recorder = MediaRecorder(path) recorder.addTrack(AudioStreamTrack()) run(recorder.start()) run(asyncio.sleep(2)) run(recorder.stop()) # check output media container = av.open(path, "r") self.assertEqual(len(container.streams), 1) self.assertEqual(container.streams[0].codec.name, "pcm_s16le") self.assertGreater( float(container.streams[0].duration * container.streams[0].time_base), 0 )
def test_audio_mp3(self): path = self.temporary_path('test.mp3') recorder = MediaRecorder(path) recorder.addTrack(AudioStreamTrack()) run(recorder.start()) run(asyncio.sleep(2)) run(recorder.stop()) # check output media container = av.open(path, 'r') self.assertEqual(len(container.streams), 1) self.assertIn(container.streams[0].codec.name, ('mp3', 'mp3float')) self.assertGreater( float(container.streams[0].duration * container.streams[0].time_base), 0)
async def test_audio_mp3(self): path = self.temporary_path("test.mp3") recorder = MediaRecorder(path) recorder.addTrack(AudioStreamTrack()) await recorder.start() await asyncio.sleep(2) await recorder.stop() # check output media container = av.open(path, "r") self.assertEqual(len(container.streams), 1) self.assertIn(container.streams[0].codec.name, ("mp3", "mp3float")) self.assertGreater( float(container.streams[0].duration * container.streams[0].time_base), 0)
def test_video_mp4(self): path = self.temporary_path("test.mp4") recorder = MediaRecorder(path) recorder.addTrack(VideoStreamTrack()) run(recorder.start()) run(asyncio.sleep(2)) run(recorder.stop()) # check output media container = av.open(path, "r") self.assertEqual(len(container.streams), 1) self.assertEqual(container.streams[0].codec.name, "h264") self.assertGreater( float(container.streams[0].duration * container.streams[0].time_base), 0 ) self.assertEqual(container.streams[0].width, 640) self.assertEqual(container.streams[0].height, 480)
def test_audio_and_video(self): path = self.temporary_path('test.mp4') recorder = MediaRecorder(path) recorder.addTrack(AudioStreamTrack()) recorder.addTrack(VideoStreamTrack()) run(recorder.start()) run(asyncio.sleep(2)) run(recorder.stop()) # check output media container = av.open(path, 'r') self.assertEqual(len(container.streams), 2) self.assertEqual(container.streams[0].codec.name, 'aac') self.assertGreater( float(container.streams[0].duration * container.streams[0].time_base), 0) self.assertEqual(container.streams[1].codec.name, 'h264') self.assertEqual(container.streams[1].width, 640) self.assertEqual(container.streams[1].height, 480) self.assertGreater( float(container.streams[1].duration * container.streams[1].time_base), 0)
class WebRTCServer: def __init__(self): self.pc = None self.signaling = None self.recorder = None self.__video = None async def accept(self, port, segment_time, server=None, turn=None): ice_servers = [RTCIceServer('stun:stun.l.google.com:19302')] if turn: ice_servers.append(turn) config = RTCConfiguration(ice_servers) self.pc = RTCPeerConnection(config) if server: self.signaling = WebSocketClient(server, port) else: self.signaling = WebSocketServer(port) recorder_options = { "segment_time": segment_time, "reset_timestamps": "1", "strftime": "1", } self.recorder = MediaRecorder('video/%Y-%m-%d_%H-%M-%S.mkv', format="segment", options=recorder_options) async def send_answer(): logger.debug(f"Ice Gathering State: {self.pc.iceGatheringState}") # отправка происходит если были собраны все IceCandidate if self.pc.iceGatheringState == 'complete': logger.debug("Answer sent") await self.signaling.send_data({ "sdp": self.pc.localDescription.sdp, "type": self.pc.localDescription.type }) else: # если IceCandidate не собраны, то ожидается их сбор self.pc.once("icegatheringstatechange", send_answer) @self.signaling.on_message async def on_message(message): logger.debug(f"{message.get('type')} received") if message.get("type") == "offer": offer = RTCSessionDescription(sdp=message["sdp"], type=message["type"]) await self.pc.setRemoteDescription(offer) answer = await self.pc.createAnswer() await self.pc.setLocalDescription(answer) await send_answer() @self.pc.on("connectionstatechange") async def on_connectionstatechange(): if self.pc.connectionState == "failed": await self.pc.close() elif self.pc.connectionState == "connected": await self.recorder.start() elif self.pc.connectionState == "closed": await self.recorder.stop() logger.info("Recorder closed") logger.info(f"Connection state: {self.pc.connectionState}") @self.pc.on("track") async def on_track(track): if track.kind == "audio": self.recorder.addTrack(track) elif track.kind == "video": self.__video = track self.recorder.addTrack( FixedPtsTrack(MediaRelay().subscribe(track))) logger.info(f"Track {track.kind} added") @track.on("ended") async def on_ended(): await self.recorder.stop() logger.info(f"Track {track.kind} ended") async def close_connection(self): await self.recorder.stop() await self.signaling.close() await self.pc.close() async def video_track(self): if self.__video: return MediaRelay().subscribe(self.__video) else: return None
def test_video_png(self): recorder = MediaRecorder(self.temporary_path('test-%3d.png')) recorder.addTrack(VideoStreamTrack()) run(recorder.start()) run(asyncio.sleep(2)) run(recorder.stop())
def test_audio_wav(self): recorder = MediaRecorder(self.temporary_path('test.wav')) recorder.addTrack(AudioStreamTrack()) run(recorder.start()) run(asyncio.sleep(2)) run(recorder.stop())
def test_video_jpg(self): recorder = MediaRecorder(path='foo-%3d.jpg') recorder.addTrack(VideoStreamTrack()) recorder.start() run(asyncio.sleep(2)) recorder.stop()
def test_audio_wav(self): recorder = MediaRecorder(path='foo.wav') recorder.addTrack(AudioStreamTrack()) recorder.start() run(asyncio.sleep(2)) recorder.stop()
format='lavfi', options=audio_options).audio if platform.system() == "Windows": video_track = MediaPlayer("video=HP TrueVision HD Camera", format="dshow", options=video_options).video else: video_track = MediaPlayer("/dev/video0", format="v4l2", options=video_options).video return audio_track, video_track if __name__ == "__main__": if len(sys.argv) > 1: trans_key = sys.argv[1] else: print('The following argument is required: translation-key') sys.exit() relay = MediaRelay() recorder = MediaRecorder(f'rtmp://a.rtmp.youtube.com/live2/{trans_key}', format='flv') audio, video = get_tracks() recorder.addTrack(audio) recorder.addTrack(relay.subscribe(video)) loop = asyncio.get_event_loop() loop.create_task(recorder.start()) loop.run_forever()