コード例 #1
0
ファイル: server.py プロジェクト: notedit/rtc-to-rtmp
async def offer(request):
    params = await request.json()
    offer = RTCSessionDescription(
        sdp=params['sdp'],
        type=params['type'])

    pc = RTCPeerConnection()
    pc._consumers = []
    pcs.append(pc)

    @pc.on('track')
    def on_track(track):
        if track.kind == 'audio':
            pc._consumers.append(asyncio.ensure_future(consume_audio(track)))
        if track.kind == 'video':
            pc._consumers.append(asyncio.ensure_future(consume_video(track)))


    await pc.setRemoteDescription(offer)
    answer = await pc.createAnswer()
    await pc.setLocalDescription(answer)

    return web.Response(
        content_type='application/json',
        text=json.dumps({
            'sdp': pc.localDescription.sdp,
            'type': pc.localDescription.type
        }))
コード例 #2
0
async def offer(request):
    print("Offer triggerred ...")
    params = await request.json()
    offer = RTCSessionDescription(sdp=params['sdp'], type=params['type'])

    local_video = VideoReadTrack(rtsp_path)

    pc = RTCPeerConnection()
    pc._consumers = []
    pcs.append(pc)

    @pc.on('datachannel')
    def on_datachannel(channel):
        print("On datachannel from client ...")

        @channel.on('message')
        def on_message(message):
            channel.send('pong')

    """
    # Original addTrack handler, called when client send media stream
    @pc.on('track')
    def on_track(track):
        print("Video on track from client ...")
        if track.kind == 'video':
            print("Adding local track to client ...")
            pc.addTrack(local_video)
            print("Consuming local video ...")
            pc._consumers.append(
                asyncio.ensure_future(consume_video(track, local_video)))

    """

    # Roll video to client function
    def roll_video():
        print("Adding local track to client ...")
        pc.addTrack(local_video)
        print("Consuming local video ...")
        pc._consumers.append(asyncio.ensure_future(consume_video(local_video)))

    await pc.setRemoteDescription(offer)
    answer = await pc.createAnswer()
    await pc.setLocalDescription(answer)

    def handler(signum, frame):
        roll_video()

    # Call the roll video handler later, 5 sec. Using Signal.
    signal.signal(signal.SIGALRM, handler)
    signal.setitimer(signal.ITIMER_REAL, 5)

    # Call the roll video handler later, 5 sec. Using Timer
    # Timer(5.0, roll_video).start()

    return web.Response(content_type='application/json',
                        text=json.dumps({
                            'sdp': pc.localDescription.sdp,
                            'type': pc.localDescription.type
                        }))
コード例 #3
0
async def offer(request):
    params = await request.json()
    offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"])

    pc = RTCPeerConnection()
    pc._consumers = []
    pc._datachannel = None
    pcs.append(pc)

    # prepare local media
    local_audio = AudioFileTrack(path=os.path.join(ROOT, "demo-instruct.wav"))
    # local_video = VideoTransformTrack(transform=params["video_transform"])

    @pc.on("datachannel")
    def on_datachannel(channel):
        pc._datachannel = channel

        @channel.on("message")
        def on_message(message):
            channel.send("pong")

    @pc.on("track")
    def on_track(track):
        print("RECEIVE TRACK", track)
        if track.kind == "audio":
            # pc.addTrack(local_audio)
            pc._consumers.append(
                asyncio.ensure_future(consume_audio(track, pc)))
        elif track.kind == "video":
            # pc.addTrack(local_video)
            # pc._consumers.append(
            #     asyncio.ensure_future(consume_video(track, local_video))
            # )
            pass

    await pc.setRemoteDescription(offer)
    answer = await pc.createAnswer()
    await pc.setLocalDescription(answer)

    return web.Response(
        content_type="application/json",
        text=json.dumps({
            "sdp": pc.localDescription.sdp,
            "type": pc.localDescription.type
        }),
    )
コード例 #4
0
async def offer(request):
    params = await request.json()
    offer = RTCSessionDescription(sdp=params['sdp'], type=params['type'])

    pc = RTCPeerConnection()
    pc._consumers = []
    pcs.append(pc)

    width = 1920
    height = 1080
    local_video = VideoTransformTrack(container=container,
                                      video_stream=video_stream,
                                      width=width,
                                      height=height)
    pc.addTrack(local_video)

    @pc.on('datachannel')
    def on_datachannel(channel):
        @channel.on('message')
        def on_message(message):
            if message == "connect":
                vs.queue = []  # キューイングされているデータをクリア
            elif message == "ping":
                channel.send('pong')
            elif message == "takeoff":
                drone.takeoff()
            elif message == "down":
                drone.down(50)
            elif message == "land":
                drone.land()

    await pc.setRemoteDescription(offer)
    answer = await pc.createAnswer()
    await pc.setLocalDescription(answer)

    return web.Response(content_type='application/json',
                        text=json.dumps({
                            'sdp': pc.localDescription.sdp,
                            'type': pc.localDescription.type
                        }))
コード例 #5
0
async def offer(request):
    params = await request.json()
    offer = RTCSessionDescription(
        sdp=params['sdp'],
        type=params['type'])

    pc = RTCPeerConnection()
    pc._consumers = []
    pcs.append(pc)

    # prepare local media
    local_audio = AudioFileTrack(path=os.path.join(ROOT, 'demo-instruct.wav'))
    local_video = VideoTransformTrack(transform=params['video_transform'])

    @pc.on('datachannel')
    def on_datachannel(channel):
        @channel.on('message')
        def on_message(message):
            channel.send('pong')

    @pc.on('track')
    def on_track(track):
        if track.kind == 'audio':
            pc.addTrack(local_audio)
            pc._consumers.append(asyncio.ensure_future(consume_audio(track)))
        elif track.kind == 'video':
            pc.addTrack(local_video)
            pc._consumers.append(asyncio.ensure_future(consume_video(track, local_video)))

    await pc.setRemoteDescription(offer)
    answer = await pc.createAnswer()
    await pc.setLocalDescription(answer)

    return web.Response(
        content_type='application/json',
        text=json.dumps({
            'sdp': pc.localDescription.sdp,
            'type': pc.localDescription.type
        }))
コード例 #6
0
ファイル: server.py プロジェクト: TsvetkovAV/WEBrtc
async def offer(request):
    global user_uid  # for session synchronization
    offer = await request.json()
    #print('OFFER',offer)
    offer = RTCSessionDescription(sdp=offer['sdp'], type=offer['type'])

    pc = RTCPeerConnection()
    pc._consumers = []
    user_uid += 1
    #pc.uuid = str(uuid.uuid1())
    pc.uuid = str(user_uid)
    pcs.append(pc)
    #print('RTCPeerConnection',dir(RTCPeerConnection))
    # prepare local media
    local_audio = AudioFileTrack(path=os.path.join(ROOT, 'demo-instruct.wav'))
    local_video = VideoDummyTrack(process=False, pc=pc)
    pc.lvideo = local_video

    #local_video.process = False
    #local_video.pc = pc
    @pc.on('datachannel')
    def on_datachannel(channel):
        #print("on_datachannel",channel)

        local_video.datachannel = channel
        channel._channel_status_ = 0
        channel.counter = 0

        @channel.on('message')
        def on_message(message):
            global user_uid  # for session synchronization
            #
            # start getting photo
            # here we can use message from client browser
            if channel._channel_status_ == 0:
                print('msg', message)
                if message == 'process':
                    local_video.process = not local_video.process  # switch video process mode
                elif message == 'algorithm':

                    local_video.lensometer.set_algorithm()
                elif message[0:7] == 'chequer':
                    # distortion
                    print('chequer')
                    local_video.dset_open(local_video.CALIBR, message[8:])
                elif message[0:9] == 'dset-stop':
                    #print('STOP DSET')
                    local_video.dset.ask_close_dset()
                    #print('STOP DSET DONE',local_video.dset.is_open_dset())
                elif message[0:10] == 'dsetcancel':
                    local_video.dset_eof(force=True)
                elif message[0:4] == 'dset':
                    #print('SAVE DSET ',message[5:])
                    local_video.dset_open(local_video.CALC, message[8:])
                    #print('SAVE DSET ',message[5:],local_video.dset.is_open_dset())
                elif message[0:7] == 'pixsize':  # from calibr.js
                    # monitor pixel size
                    if pc.uuid is None:
                        user_uid += 1
                        #pc.uuid = str(uuid.uuid1())
                        pc.uuid = str(user_uid)
                    monpix = float(message[8:])
                    pcs_uid[pc.uuid] = (pc, monpix)  # set or update
                    if local_video.lensometer is not None:
                        #set for destop session
                        local_video.lensometer.set_mon_pix(monpix)
                    channel.send('userId ' + pc.uuid)
                elif message[0:6] == 'userId':
                    # monitor pixel size
                    uid = message[7:]
                    try:
                        _, pixsize = pcs_uid[uid]
                        channel.send('pixsize=' + str(pixsize))
                        print('user ID', uid, 'pixsize', pixsize)
                        if local_video.lensometer is not None:
                            local_video.lensometer.set_mon_pix(pixsize)

                    except KeyError:
                        channel.send('session expired ' + uid)
                        print('user session expired', uid)
                elif message[0:5] == 'photo':
                    channel._channel_status_ = 1
                    channel._photo_ = Photo(message[6:])

                elif message[0:4] == local_video.CALC:
                    channel._photo_ = Photo(message[5:], is_calc=True)
                    channel._channel_status_ = 1
                elif message[0:4] == local_video.CALIBR:
                    channel._photo_ = Photo(message[5:], is_dist=True)
                    channel._channel_status_ = 1

                elif message == 'Ping':
                    channel.send('pong frame ' + str(local_video.counter))
            else:  # photo mode
                if message[0:8] == 'photoeof':
                    #print('stop send photo size=',channel._photo_size_)
                    channel._channel_status_ = 0
                    if channel._photo_.photo_eof():
                        # there is exif
                        if local_video.lensometer is not None:
                            # set focus and  pixel size
                            _, focus, focus35, exifImageWidth, exifImageHeight = channel._photo_.get_params(
                            )
                            if focus is not None:
                                local_video.lensometer.set_cam_focus(focus)
                            if focus35 is not None and focus is not None:
                                psz = local_video.lensometer.set_cam_psize(
                                    focus, focus35, exifImageWidth,
                                    exifImageHeight)
                                channel.send('Cam pixel size=' + str(psz))
                            if channel._photo_.is_calc():
                                frame = channel._photo_.get_frame()
                                local_video.dset_add(local_video.CALC, frame)

                            elif channel._photo_.is_dist():
                                frame = channel._photo_.get_frame()
                                local_video.dset_add(local_video.CALIBR, frame)

                    # drop photo
                    channel._photo_ = None

                else:
                    # photo data
                    channel._photo_.add_data(message)

    @pc.on('track')
    def on_track(track):
        if track.kind == 'audio':
            pc.addTrack(local_audio)
            pc._consumers.append(asyncio.ensure_future(consume_audio(track)))
        elif track.kind == 'video':
            #print('ADD VIDEO TRACK')
            local_video.track = track
            pc.addTrack(local_video)
            pc._consumers.append(
                asyncio.ensure_future(consume_video(track, local_video)))

    await pc.setRemoteDescription(offer)
    answer = await pc.createAnswer()
    await pc.setLocalDescription(answer)

    return web.Response(content_type='application/json',
                        text=json.dumps({
                            'sdp': pc.localDescription.sdp,
                            'type': pc.localDescription.type
                        }))