Example #1
0
    def create_elements(self):
        self._create_initial_multiqueue()

        pipeline_string = ''
        if config.enable_video():
            # format=RGB removes the alpha channel which can crash autovideosink
            video_caps = 'video/x-raw,format=RGB,width=%d,height=%d,pixel-aspect-ratio=1/1' % \
                (self.props['width'], self.props['height'])

            pipeline_string += ('intervideosrc name=intervideosrc ! videoconvert ! videoscale ! ' +
                                video_caps + ' ! queue ! autovideosink')
        if config.enable_audio():
            pipeline_string += ' interaudiosrc name=interaudiosrc ! queue ! autoaudiosink'

        if not self.create_pipeline_from_string(pipeline_string):
            return False

        if config.enable_video():
            self.intervideosrc = self.pipeline.get_by_name('intervideosrc')
            self.intervideosrc_src_pad = self.intervideosrc.get_static_pad('src')
            self.create_intervideosink_and_connections()

        if config.enable_audio():
            self.interaudiosrc = self.pipeline.get_by_name('interaudiosrc')
            self.interaudiosrc_src_pad = self.interaudiosrc.get_static_pad('src')
            self.create_interaudiosink_and_connections()
Example #2
0
    def create_elements(self):
        pipeline_string = 'mp4mux name=mux ! filesink name=sink'

        if config.enable_video():
            pipeline_string += ' ' + self._video_pipeline_start(
            ) + 'x264enc name=video_encoder ! queue ! mux.'

        if config.enable_audio():
            audio_pipeline_string = (
                'interaudiosrc name=interaudiosrc ! '
                'audioconvert ! audioresample ! avenc_aac name=audio_encoder')

            # A larger queue size enables the video encoder to take longer
            audio_pipeline_string += f' ! queue max-size-bytes={10*(3 ** 20)} ! mux.'

            pipeline_string = pipeline_string + ' ' + audio_pipeline_string

        self.create_pipeline_from_string(pipeline_string)
        self.logger.debug('Writing to the file ' + self.props['location'])
        sink = self.pipeline.get_by_name('sink')
        sink.set_property('location', self.props['location'])

        if config.enable_video():
            self.video_encoder = self.pipeline.get_by_name('video_encoder')

        if config.enable_audio():
            self.audio_encoder = self.pipeline.get_by_name('audio_encoder')
Example #3
0
File: tcp.py Project: ottes/brave
    def create_elements(self):
        '''
        Create the elements needed whether this is audio, video, or both
        '''
        mux_type = 'oggmux' if self.container == 'ogg' else 'mpegtsmux'
        video_encoder_type = 'theoraenc' if self.container == 'ogg' else 'x264enc'
        audio_encoder_type = 'vorbisenc' if self.container == 'ogg' else 'avenc_ac3'

        pipeline_string = 'queue name=queue ! tcpserversink name=sink'

        # We only want a mux if there's video:
        has_mux = config.enable_video
        if has_mux:
            pipeline_string = f'{mux_type} name=mux ! {pipeline_string}'

        if config.enable_video():
            pipeline_string += ' ' + self._video_pipeline_start(
            ) + video_encoder_type + ' name=encoder ! queue ! mux.'

        if config.enable_audio():
            audio_bitrate = self.audio_bitrate

            # Having default_audio_caps() in the pipeline stops them from changing and interrupting the encoder.
            audio_pipeline_string = ('interaudiosrc name=interaudiosrc ! ' + config.default_audio_caps() +
                                     ' ! audioconvert ! audioresample ! %s name=audio_encoder bitrate=%d') % \
                (audio_encoder_type, audio_bitrate)
            if has_mux:
                audio_pipeline_string += f' ! queue ! mux.'
            else:
                audio_pipeline_string += ' ! queue.'

            pipeline_string = pipeline_string + ' ' + audio_pipeline_string

        self.create_pipeline_from_string(pipeline_string)

        if config.enable_video():
            # pass
            if self.container == 'mpeg':
                # Testing has shown 60 (i.e. once every 2s at 30 fps) works best
                self.pipeline.get_by_name('encoder').set_property(
                    'key-int-max', 60)

            # tune=zerolatency reduces the delay of TCP output
            # self.pipeline.get_by_name('encoder').set_property('tune', 'zerolatency')

        if not hasattr(self, 'host'):
            self.host = socket.gethostbyname(socket.gethostname())
        if not hasattr(self, 'port'):
            self.port = self._get_next_available_port()

        sink = self.pipeline.get_by_name('sink')
        sink.set_property('port', int(self.port))
        sink.set_property('host', self.host)
        sink.set_property('recover-policy', 'keyframe')
        sink.set_property('sync', False)

        self.logger.info('TCP output created at tcp://%s:%s' %
                         (self.host, self.port))
Example #4
0
    def create_elements(self):
        '''
        Create the elements needed whether this is audio, video, or both
        '''
        mux_type = 'oggmux' if self.props['container'] == 'ogg' else 'mpegtsmux'
        video_encoder_type = 'theoraenc' if self.props['container'] == 'ogg' else 'x264enc'
        audio_encoder_type = 'vorbisenc' if self.props['container'] == 'ogg' else 'avenc_ac3'

        pipeline_string = 'queue leaky=2 name=queue ! tcpserversink name=sink'

        # We only want a mux if there's video:
        has_mux = config.enable_video
        if has_mux:
            pipeline_string = f'{mux_type} name=mux ! {pipeline_string}'

        if config.enable_video():
            pipeline_string += ' ' + self._video_pipeline_start() + video_encoder_type + ' name=encoder ! queue ! mux.'

        if config.enable_audio():
            audio_bitrate = self.props['audio_bitrate']

            audio_pipeline_string = ('interaudiosrc name=interaudiosrc ! audioconvert ! '
                                     'audioresample ! %s name=audio_encoder bitrate=%d') % \
                (audio_encoder_type, audio_bitrate)
            if has_mux:
                audio_pipeline_string += f' ! queue max-size-bytes={10*(2 ** 20)} ! mux.'
            else:
                audio_pipeline_string += ' ! queue.'

            pipeline_string = pipeline_string + ' ' + audio_pipeline_string

        self.create_pipeline_from_string(pipeline_string)

        if config.enable_video():
            if self.props['container'] == 'mpeg':
                self.pipeline.get_by_name('encoder').set_property('key-int-max', 120)  # 4x 30fps TODO not hard-code

            # tune=zerolatency reduces the delay of TCP output
            # encoder.set_property('tune', 'zerolatency')

        if 'host' not in self.props:
            self.props['host'] = socket.gethostbyname(socket.gethostname())
        if 'port' not in self.props:
            self.props['port'] = self._get_next_available_port()

        sink = self.pipeline.get_by_name('sink')
        sink.set_property('port', int(self.props['port']))
        sink.set_property('host', self.props['host'])
        sink.set_property('recover-policy', 'keyframe')
        sink.set_property('sync', False)

        self.logger.info('TCP output created at tcp://%s:%s' % (self.props['host'], self.props['port']))
Example #5
0
    def create_elements(self):
        '''
        Create the initial elements needed for this mixer.
        '''
        pipeline_string = ''
        if config.enable_video():
            # To work reliably we have a default source (videotestsrc)
            # It has the lowest permitted zorder (0) so that other things will appear on top.
            # After the compositor, the format is changed from RGBA to RGBx (i.e. remove the alpha chanel)
            # This is done (a) for overlay effects to work, and (b) for all outputs to work.
            pipeline_string += (
                'videotestsrc is-live=true name=videotestsrc ! videoconvert ! videoscale ! '
                'capsfilter name=capsfilter ! compositor name=video_mixer ! '
                'video/x-raw,format=RGBA ! videoconvert ! queue name=video_mixer_output_queue ! '
                'capsfilter name=end_capsfilter caps="video/x-raw,format=RGBx" ! videoconvert ! '
                'tee name=final_video_tee allow-not-linked=true')
        if config.enable_audio():
            pipeline_string += \
                f' audiotestsrc is-live=true volume=0 ! {config.default_audio_caps()} ! ' + \
                'queue name=audio_queue ! audiomixer name=audio_mixer ! ' + \
                'tee name=final_audio_tee allow-not-linked=true'

        if not self.create_pipeline_from_string(pipeline_string):
            return False

        self.end_capsfilter = self.pipeline.get_by_name('end_capsfilter')

        if config.enable_video():
            self.videotestsrc = self.pipeline.get_by_name('videotestsrc')
            self.mixer_element['video'] = self.pipeline.get_by_name(
                'video_mixer')
            self.video_mixer_output_queue = self.pipeline.get_by_name(
                'video_mixer_output_queue')
            self.final_video_tee = self.pipeline.get_by_name('final_video_tee')
            self.capsfilter = self.pipeline.get_by_name('capsfilter')
            self._set_dimensions()
            self.handle_updated_props()
            self.session().overlays.ensure_overlays_are_correctly_connected(
                self)

        if config.enable_audio():
            self.mixer_element['audio'] = self.pipeline.get_by_name(
                'audio_mixer')
            self.audio_mixer_output_queue = self.pipeline.get_by_name(
                'audio_mixer_output_queue')
            self.final_audio_tee = self.pipeline.get_by_name('final_audio_tee')

        return True
Example #6
0
File: kvs.py Project: swinsey/brave
    def create_elements(self):
        if not config.enable_video():
            return

        access_key = os.environ['AWS_ACCESS_KEY_ID']
        secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
        if not access_key:
            raise brave.exceptions.InvalidConfiguration('Missing AWS_ACCESS_KEY_ID environemnt variable')
        if not secret_key:
            raise brave.exceptions.InvalidConfiguration('Missing AWS_SECRET_ACCESS_KEY environemnt variable')

        self._create_initial_multiqueue()

        video_caps = 'video/x-raw,format=I420,width=%d,height=%d,pixel-aspect-ratio=1/1,framerate=30/1' % \
            (self.props['width'], self.props['height'])

        pipeline_string = ('intervideosrc name=intervideosrc ! videoconvert ! videoscale ! ' +
                           video_caps +
                           ' ! x264enc bframes=0 key-int-max=45 bitrate=500 ! '
                           'video/x-h264,stream-format=avc,alignment=au ! '
                           'kvssink name=kvssink')

        if not self.create_pipeline_from_string(pipeline_string):
            self.logger.error('TEMP cannot create pipeline from string:%s' % pipeline_string)
            return False

        kvssink = self.pipeline.get_by_name('kvssink')
        kvssink.set_property('access-key', access_key)
        kvssink.set_property('secret-key', secret_key)
        kvssink.set_property('stream-name', self.props['stream_name'])

        self.intervideosrc = self.pipeline.get_by_name('intervideosrc')
        self.intervideosrc_src_pad = self.intervideosrc.get_static_pad('src')
        self.create_intervideosink_and_connections()
Example #7
0
    def create_elements(self):
        # Playbin or playbin3 does all the hard work.
        # Playbin3 works better for continuous playback.
        # But it does not handle RTMP inputs as well.
        # See http://gstreamer-devel.966125.n4.nabble.com/Behavior-differences-between-
        #   decodebin3-and-decodebin-and-vtdec-hw-not-working-on-OSX-td4680895.html
        # should do a check of the url by passing it through the stream link script
        self.suri = ''
        try:
            streams = streamlink.streams(self.uri)
            self.stream = self.uri
            tstream = streams['best']
            self.suri = tstream.url
        except:
            pass

        is_rtmp = self.suri.startswith('rtmp')
        playbin_element = 'playbin' if is_rtmp else 'playbin'
        self.create_pipeline_from_string(playbin_element)
        self.playsink = self.pipeline.get_by_name('playsink')
        self.playbin = self.playsink.parent
        self.playbin.set_property('uri', self.suri)
        self.playbin.connect('about-to-finish', self.__on_about_to_finish)

        if config.enable_video():
            self.create_video_elements()
        else:
            self._create_fake_video()

        if config.enable_audio():
            self.create_audio_elements()
        else:
            self._create_fake_audio()
Example #8
0
    def _create_pipeline(self):
        '''
        Create the pipeline. This will not have the webrtcbin element in it.
        That is added when a user tries to connect, via new_peer_request().
        Instead, a 'fakesink' destination allows the pipeline to work even with 0 clients.
        '''
        pipeline_string = ''
        if config.enable_video():
            # format=RGB is required to remove alpha channels which can upset the encoder
            video_caps = 'application/x-rtp,format=RGB,media=video,encoding-name=VP8,payload=97,width=%d,height=%d' % \
                (self.props['width'], self.props['height'])

            # vp8enc has 'target-bitrate' which can be reduced from its default (256000)
            # Setting keyframe-max-dist lower reduces impact of packet loss on dodgy networks
            pipeline_string += (
                'intervideosrc name=intervideosrc ! queue ! videoconvert ! videoscale ! '
                'vp8enc deadline=1 keyframe-max-dist=30 ! rtpvp8pay ! ' +
                video_caps +
                ' ! tee name=webrtc_video_tee webrtc_video_tee. ! fakesink')
        if config.enable_audio():
            # bandwidth=superwideband allows the encoder to focus a little more on the important audio
            # (Basic testing showed 'wideband' to be quite poor poor)
            pipeline_string += (
                ' interaudiosrc name=interaudiosrc ! audioconvert ! level message=true ! '
                'audioresample name=webrtc-audioresample ! opusenc bandwidth=superwideband  ! '
                'rtpopuspay ! application/x-rtp,media=audio,encoding-name=OPUS,payload=96 ! '
                'tee name=webrtc_audio_tee webrtc_audio_tee. ! fakesink')

        return self.create_pipeline_from_string(pipeline_string)
Example #9
0
    def create_elements(self):
        if not config.enable_video():
            return

        access_key = os.environ['AWS_ACCESS_KEY_ID']
        secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
        if not access_key:
            raise brave.exceptions.InvalidConfiguration(
                'Missing AWS_ACCESS_KEY_ID environment variable')
        if not secret_key:
            raise brave.exceptions.InvalidConfiguration(
                'Missing AWS_SECRET_ACCESS_KEY environment variable')

        pipeline_string = (
            self._video_pipeline_start() +
            'x264enc bframes=0 key-int-max=45 bitrate=500 ! '
            'video/x-h264,stream-format=avc,alignment=au ! kvssink name=kvssink'
        )

        self.create_pipeline_from_string(pipeline_string)

        kvssink = self.pipeline.get_by_name('kvssink')
        kvssink.set_property('access-key', access_key)
        kvssink.set_property('secret-key', secret_key)
        kvssink.set_property('stream-name', self.stream_name)
Example #10
0
    def _create_webrtc_element_for_new_connection(self, ws):
        '''
        We make a new webrtc element, and queue to feed into it, for every new peer (client).
        That way, multiple clients can connect at once.
        '''

        self.peers[ws]['webrtcbin'] = Gst.ElementFactory.make('webrtcbin')
        self.pipeline.add(self.peers[ws]['webrtcbin'])
        self.peers[ws]['webrtcbin'].add_property_notify_watch(None, True)
        if len(config.ice_servers()) > 0:
            ice_server_url = config.ice_servers()[0]['urls']
            self.peers[ws]['webrtcbin'].set_property('stun-server',
                                                     ice_server_url)

        if config.enable_video():
            self.peers[ws]['video_queue'] = Gst.ElementFactory.make('queue')
            self.pipeline.add(self.peers[ws]['video_queue'])
            self.webrtc_video_tee.link(self.peers[ws]['video_queue'])
            self.peers[ws]['video_queue'].link(self.peers[ws]['webrtcbin'])

        if config.enable_audio():
            self.peers[ws]['audio_queue'] = Gst.ElementFactory.make('queue')
            self.pipeline.add(self.peers[ws]['audio_queue'])
            self.webrtc_audio_tee.link(self.peers[ws]['audio_queue'])
            self.peers[ws]['audio_queue'].link(self.peers[ws]['webrtcbin'])
Example #11
0
    def create_elements(self):
        '''
        Create the elements needed whether this is audio, video, or both
        '''

        self._create_initial_multiqueue()
        pipeline_string = 'flvmux name=mux streamable=true ! rtmpsink name=sink'

        if config.enable_video():
            # framerate=30/1 because Facebook Live and YouTube live want this framerate.
            # profile=baseline may be superflous but some have recommended it for Facebook
            video_caps = 'video/x-h264,framerate=30/1,profile=baseline,width=%d,height=%d,format=YUV' % \
                (self.props['width'], self.props['height'])

            # key-int-max=60 puts a keyframe every 2 seconds (60 as 2*framerate)
            pipeline_string = (
                pipeline_string +
                ' intervideosrc name=intervideosrc ! videorate ! videoconvert ! videoscale ! '
                + ' x264enc name=video_encoder key-int-max=60 ! ' +
                video_caps + ' ! h264parse ! queue ! mux.')

        if config.enable_audio():
            pipeline_string = pipeline_string + \
                ' interaudiosrc name=interaudiosrc ! audioconvert ! audioresample ! avenc_aac name=audio_encoder ! ' + \
                'aacparse ! audio/mpeg, mpegversion=4 ! queue ! mux.'

        self.logger.debug('Creating RTMP output with this pipeline: ' +
                          pipeline_string)
        if not self.create_pipeline_from_string(pipeline_string):
            return False
        self.pipeline.get_by_name('sink').set_property(
            'location', self.props['uri'] + ' live=1')

        if config.enable_video():
            self.intervideosrc = self.pipeline.get_by_name('intervideosrc')
            self.intervideosrc_src_pad = self.intervideosrc.get_static_pad(
                'src')
            self.create_intervideosink_and_connections()

        if config.enable_audio():
            self.interaudiosrc = self.pipeline.get_by_name('interaudiosrc')
            self.interaudiosrc_src_pad = self.interaudiosrc.get_static_pad(
                'src')
            self.create_interaudiosink_and_connections()

        self.logger.info('RTMP output now configured to send to ' +
                         self.props['uri'])
Example #12
0
    def create_elements(self):
        self._create_pipeline()

        if config.enable_video():
            self.webrtc_video_tee = self.pipeline.get_by_name('webrtc_video_tee')

        if config.enable_audio():
            self.webrtc_audio_tee = self.pipeline.get_by_name('webrtc_audio_tee')
Example #13
0
File: local.py Project: ottes/brave
    def create_elements(self):
        pipeline_string = ''
        if config.enable_video():
            pipeline_string += self._video_pipeline_start() + 'queue ! glimagesink'
        if config.enable_audio():
            pipeline_string += ' interaudiosrc name=interaudiosrc ! queue ! autoaudiosink'

        self.create_pipeline_from_string(pipeline_string)
Example #14
0
 def create_elements(self):
     if not config.enable_video():
         return
     self.__delete_file_if_exists()
     pipeline_string = self._video_pipeline_start(
     ) + 'jpegenc ! multifilesink name=sink'
     self.create_pipeline_from_string(pipeline_string)
     sink = self.pipeline.get_by_name('sink')
     sink.set_property('location', self.location)
Example #15
0
    def _create_webrtc_element_for_new_connection(self, ws):
        '''
        We make a new webrtc element, and queue to feed into it, for every new peer (client).
        That way, multiple clients can connect at once.
        '''

        self.peers[ws]['webrtcbin'] = Gst.ElementFactory.make('webrtcbin')
        self.pipeline.add(self.peers[ws]['webrtcbin'])
        self.peers[ws]['webrtcbin'].set_property('bundle-policy', 'max-bundle')
        self.peers[ws]['webrtcbin'].add_property_notify_watch(None, True)
        self.peers[ws]['webrtcbin'].set_state(Gst.State.READY)

        if config.stun_server():
            self.peers[ws]['webrtcbin'].set_property('stun-server', 'stun://' + config.stun_server())
        if config.turn_server():
            self.peers[ws]['webrtcbin'].set_property('turn-server', 'turn://' + config.turn_server())

        if config.enable_video():
            self.peers[ws]['video_queue'] = Gst.ElementFactory.make('queue')
            self.peers[ws]['video_queue'].set_property('leaky', 'upstream')
            self.pipeline.add(self.peers[ws]['video_queue'])
            self.webrtc_video_tee.link(self.peers[ws]['video_queue'])
            self.peers[ws]['video_queue'].link(self.peers[ws]['webrtcbin'])
            self.peers[ws]['video_queue'].set_state(Gst.State.READY)

        if config.enable_audio():
            self.peers[ws]['audio_queue'] = Gst.ElementFactory.make('queue')
            self.peers[ws]['audio_queue'].set_property('leaky', 'upstream')
            self.pipeline.add(self.peers[ws]['audio_queue'])
            self.webrtc_audio_tee.link(self.peers[ws]['audio_queue'])
            self.peers[ws]['audio_queue'].link(self.peers[ws]['webrtcbin'])
            self.peers[ws]['audio_queue'].set_state(Gst.State.READY)

        # We set the three elements above to READY first.
        # We now move them to PLAYING
        # This appears to prevent a race-condition that can
        # intermittently cause _on_negotiation_needed to not be called.
        self.peers[ws]['webrtcbin'].set_state(Gst.State.PLAYING)

        if config.enable_video():
            self.peers[ws]['video_queue'].set_state(Gst.State.PLAYING)

        if config.enable_audio():
            self.peers[ws]['audio_queue'].set_state(Gst.State.PLAYING)
Example #16
0
    def create_elements(self):
        self._create_initial_multiqueue()
        pipeline_string = 'mp4mux name=mux ! filesink name=sink'

        if config.enable_video():
            video_pipeline_string = (
                'intervideosrc name=intervideosrc ! videoconvert ! '
                'videoscale ! videorate ! '
                f'{self.create_caps_string()} ! '
                'x264enc name=video_encoder ! queue ! mux.')

            pipeline_string = pipeline_string + ' ' + video_pipeline_string

        if config.enable_audio():
            audio_pipeline_string = (
                'interaudiosrc name=interaudiosrc ! '
                'audioconvert ! audioresample ! avenc_aac name=audio_encoder')

            # A larger queue size enables the video encoder to take longer
            audio_pipeline_string += f' ! queue max-size-bytes={10*(3 ** 20)} ! mux.'

            pipeline_string = pipeline_string + ' ' + audio_pipeline_string

        if not self.create_pipeline_from_string(pipeline_string):
            return

        self.logger.debug('Writing to the file ' + self.props['location'])
        sink = self.pipeline.get_by_name('sink')
        sink.set_property('location', self.props['location'])

        if config.enable_video():
            self.video_encoder = self.pipeline.get_by_name('video_encoder')
            self.intervideosrc = self.pipeline.get_by_name('intervideosrc')
            self.intervideosrc_src_pad = self.intervideosrc.get_static_pad(
                'src')
            self.create_intervideosink_and_connections()

        if config.enable_audio():
            self.audio_encoder = self.pipeline.get_by_name('audio_encoder')
            self.interaudiosrc = self.pipeline.get_by_name('interaudiosrc')
            self.interaudiosrc_src_pad = self.interaudiosrc.get_static_pad(
                'src')
            self.create_interaudiosink_and_connections()
Example #17
0
File: image.py Project: whhxf/brave
    def create_elements(self):
        if not config.enable_video():
            return

        # To crop (not resize): videobox autocrop=true border-alpha=0
        pipeline_string = ('uridecodebin name=uridecodebin uri="' + self.props['uri'] +
                           '" ! imagefreeze ! videoconvert ! video/x-raw,pixel-aspect-ratio=1/1,framerate=30/1' +
                           self.default_video_pipeline_string_end())
        self.create_pipeline_from_string(pipeline_string)
        self.final_video_tee = self.pipeline.get_by_name('final_video_tee')
        self.uridecodebin = self.pipeline.get_by_name('uridecodebin')
Example #18
0
    def create_elements(self):
        if not config.enable_video():
            return

        if not self.create_pipeline_from_string('cef url="' + self.props['uri'] + '" ! '
                                                ' videoconvert ! video/x-raw,format=ARGB ! '
                                                'queue' + self.default_video_pipeline_string_end()):
            return False

        self.intervideosink = self.pipeline.get_by_name('intervideosink')
        self.final_video_tee = self.pipeline.get_by_name('final_video_tee')
        self.handle_updated_props()
Example #19
0
    def create_elements(self):
        if not config.enable_video():
            return

        self.create_pipeline_from_string(
            'cef url="' + self.uri + '" ! '
            ' videoconvert ! video/x-raw,format=ARGB ! '
            'queue ! ' + self.default_video_pipeline_string_end())

        self.intervideosink = self.pipeline.get_by_name('intervideosink')
        self.final_video_tee = self.pipeline.get_by_name('final_video_tee')
        self.video_output_queue = self.pipeline.get_by_name(
            'video_output_queue')
Example #20
0
    def create_elements(self):
        # Playbin or playbin3 does all the hard work.
        # Playbin3 works better for continuous playback.
        # But it does not handle RTMP inputs as well.
        # See http://gstreamer-devel.966125.n4.nabble.com/Behavior-differences-between-
        #   decodebin3-and-decodebin-and-vtdec-hw-not-working-on-OSX-td4680895.html
        # should do a check of the url by passing it through the stream link script
        # https://github.com/ytdl-org/youtube-dl/blob/master/README.md#embedding-youtube-dl
        self.suri = ''
        try:
            ydl_opts = {
                'simulate': True,
                'noplaylist': True,
                'forceurl': True,
                'logger': MyLogger(),
            }
            with youtube_dl.YoutubeDL(ydl_opts) as ydl:
                ydl.download([self.uri])
                global channel_val
                meta = ydl.extract_info(self.uri, download=False)
                channel_val = meta['uploader']
                self.channel = channel_val
                # should then try to get the meta data out that we want like channel and description
                #self.playbin.set_property('channel', 'test channel')

            #streams = streamlink.streams(self.uri)
            global purl
            self.stream = purl
            #tstream = streams['best']
            self.suri = purl
        except:
            pass

        is_rtmp = self.suri.startswith('rtmp')
        playbin_element = 'playbin' if is_rtmp else 'playbin'
        self.create_pipeline_from_string(playbin_element)
        self.playsink = self.pipeline.get_by_name('playsink')
        self.playbin = self.playsink.parent
        self.playbin.set_property('uri', self.suri)
        self.playbin.connect('about-to-finish', self.__on_about_to_finish)

        if config.enable_video():
            self.create_video_elements()
        else:
            self._create_fake_video()

        if config.enable_audio():
            self.create_audio_elements()
        else:
            self._create_fake_audio()
Example #21
0
    def create_elements(self):
        if not config.enable_video():
            return
        self.__delete_file_if_exists()
        self._create_initial_multiqueue()
        pipeline_string = 'intervideosrc name=src ! videoconvert ! videoscale ! videorate ! ' + \
            self.create_caps_string() + ' ! jpegenc ! multifilesink name=sink'
        if not self.create_pipeline_from_string(pipeline_string):
            return False

        self.intervideosrc = self.pipeline.get_by_name('src')
        self.intervideosrc_src_pad = self.intervideosrc.get_static_pad('src')
        sink = self.pipeline.get_by_name('sink')
        sink.set_property('location', self.props['location'])

        self.create_intervideosink_and_connections()
Example #22
0
    def create_elements(self):
        # Playbin does all the hard work
        self.create_pipeline_from_string("playbin uri=\"" + self.props['uri'] +
                                         "\"")

        # playbin appears as 'playsink' (because it's a bin with elements inside)
        self.playsink = self.pipeline.get_by_name('playsink')

        if config.enable_video():
            self.create_video_elements()
        else:
            self._create_fake_video()

        if config.enable_audio():
            self.create_audio_elements()
        else:
            self._create_fake_audio()
Example #23
0
    def create_elements(self):
        if not config.enable_video():
            return

        if not self.create_pipeline_from_string(
                'cef url="' + self.props['uri'] + '" ! '
                ' videoconvert ! video/x-raw,format=ARGB ! '
                'queue ! intervideosink sync=true name=intervideosink'):
            return False

        self.intervideosink = self.pipeline.get_by_name('intervideosink')
        if self.intervideosink is None:
            raise Exception(
                'Unable to make image input - cannot find intervideosink')

        self.create_intervideosrc_and_connections()
        self.handle_updated_props()
Example #24
0
    def create_elements(self):
        if not config.enable_video():
            return

        # To crop (not resize): videobox autocrop=true border-alpha=0
        pipeline_string = (
            'uridecodebin name=uridecodebin uri="' + self.props['uri'] +
            '" ! imagefreeze ! videoconvert ! video/x-raw,pixel-aspect-ratio=1/1,framerate=30/1 ! '
            'intervideosink sync=true name=intervideosink')
        if not self.create_pipeline_from_string(pipeline_string):
            return False
        self.intervideosink = self.pipeline.get_by_name('intervideosink')
        self.uridecodebin = self.pipeline.get_by_name('uridecodebin')
        if self.intervideosink is None:
            raise Exception(
                'Unable to make image input - cannot find intervideosink')

        self.create_intervideosrc_and_connections()
        self.handle_updated_props()
Example #25
0
    def create_elements(self):
        '''
        Create the elements needed whether this is audio, video, or both
        '''
        pipeline_string = 'flvmux name=mux streamable=true ! rtmpsink name=sink'

        if config.enable_video():
            # key-int-max=60 puts a keyframe every 2 seconds (60 as 2*framerate)
            pipeline_string += ' ' + self._video_pipeline_start() + \
                'x264enc name=video_encoder key-int-max=60 ! h264parse ! queue ! mux.'

        if config.enable_audio():
            pipeline_string += ' ' + self._audio_pipeline_start() + \
                'avenc_aac name=audio_encoder ! aacparse ! audio/mpeg, mpegversion=4 ! queue ! mux.'

        self.create_pipeline_from_string(pipeline_string)
        self.pipeline.get_by_name('sink').set_property('location',
                                                       self.uri + ' live=1')

        self.logger.info('RTMP output now configured to send to ' + self.uri)
Example #26
0
    def create_elements(self):
        self._create_initial_multiqueue()

        if not self._create_pipeline():
            return False

        if config.enable_video():
            self.intervideosrc = self.pipeline.get_by_name('intervideosrc')
            self.intervideosrc_src_pad = self.intervideosrc.get_static_pad('src')
            self.create_intervideosink_and_connections()

        if config.enable_audio():
            self.interaudiosrc = self.pipeline.get_by_name('interaudiosrc')
            self.interaudiosrc_src_pad = self.interaudiosrc.get_static_pad('src')
            self.create_interaudiosink_and_connections()

        self.webrtc_video_tee = self.pipeline.get_by_name('webrtc_video_tee')
        self.webrtc_audio_tee = self.pipeline.get_by_name('webrtc_audio_tee')

        return True
Example #27
0
    def _setup_initial_inputs_outputs_mixers_and_overlays(self):
        '''
        Create the inputs/outputs/mixers/overlays declared in the config file.
        '''
        for mixer_config in config.mixers():
            self.mixers.add(**mixer_config)

        for input_config in config.inputs():
            input = self.inputs.add(**input_config)
            input.setup()

        for output_config in config.outputs():
            self.outputs.add(**output_config)

        for id, mixer in self.mixers.items():
            mixer.setup_initial_sources()

        if config.enable_video():
            for overlay_config in config.overlays():
                self.overlays.add(**overlay_config)
Example #28
0
    def _setup_initial_inputs_outputs_mixers_and_overlays(self):
        '''
        Create the inputs/outputs/mixers/overlays declared in the config file.
        '''
        for mixer_config in config.default_mixers():
            self.mixers.add(**mixer_config)

        for output_config in config.default_outputs():
            output = self.outputs.add(**output_config)

        for name, output in self.outputs.items():
            output.link_from_source()

        if config.enable_video():
            for overlay_config in config.default_overlays():
                self.overlays.add(**overlay_config)

        for input_config in config.default_inputs():
            input = self.inputs.add(**input_config)
            for source in input.sources():
                source.add_to_mix()
Example #29
0
    def _setup_initial_inputs_outputs_mixers_and_overlays(self):
        '''
        Create the inputs/outputs/mixers/overlays declared in the config file.
        '''
        for mixer_config in config.default_mixers():
            self.mixers.add(**mixer_config)

        for output_config in config.default_outputs():
            self.outputs.add(**output_config)

        if config.enable_video():
            for overlay_config in config.default_overlays():
                self.overlays.add(**overlay_config)

        for name, mixer in self.mixers.items():
            mixer.set_state(Gst.State.PLAYING)

        for input_config in config.default_inputs():
            input = self.inputs.add(**input_config)
            for id, mixer in self.mixers.items():
                source = mixer.sources.get_or_create(input)
                source.add_to_mix()
Example #30
0
 def has_video(self):
     return config.enable_video()