def _append_nodes_for_inputs_list(self,
                                      inputs: List[Input],
                                      output_location: str,
                                      period_dir: Optional[str] = None,
                                      index: int = 0) -> None:
        """A common method that creates Transcoder and Packager nodes for a list of Inputs passed to it.

    Args:
      inputs (List[Input]): A list of Input streams.
      output_location (str): A path were the packager will write outputs in.
      period_dir (Optional[str]): A subdirectory name where a single period will be outputted to.
      If passed, this indicates that inputs argument is one period in a list of periods.
      index (int): The index of the current Transcoder/Packager nodes.
    """

        outputs: List[OutputStream] = []
        for input in inputs:
            # External command inputs need to be processed by an additional node
            # before being transcoded.  In this case, the input doesn't have a
            # filename that FFmpeg can read, so we generate an intermediate pipe for
            # that node to write to.  TranscoderNode will then instruct FFmpeg to
            # read from that pipe for this input.
            if input.input_type == InputType.EXTERNAL_COMMAND:
                command_output = Pipe.create_ipc_pipe(self._temp_dir)
                self._nodes.append(
                    ExternalCommandNode(input.name,
                                        command_output.write_end()))
                # reset the name of the input to be the output pipe path - which the
                # transcoder node will read from - instead of a shell command.
                input.reset_name(command_output.read_end())

            if input.media_type == MediaType.AUDIO:
                for audio_codec in self._pipeline_config.audio_codecs:
                    for output_channel_layout in self._pipeline_config.get_channel_layouts(
                    ):
                        # We won't upmix a lower channel count input to a higher one.
                        # Skip channel counts greater than the input channel count.
                        if input.get_channel_layout() < output_channel_layout:
                            continue

                        outputs.append(
                            AudioOutputStream(input, self._temp_dir,
                                              audio_codec,
                                              output_channel_layout))

            elif input.media_type == MediaType.VIDEO:
                for video_codec in self._pipeline_config.video_codecs:
                    for output_resolution in self._pipeline_config.get_resolutions(
                    ):
                        # Only going to output lower or equal resolution videos.
                        # Upscaling is costly and does not do anything.
                        if input.get_resolution() < output_resolution:
                            continue

                        outputs.append(
                            VideoOutputStream(input, self._temp_dir,
                                              video_codec, output_resolution))

            elif input.media_type == MediaType.TEXT:
                if input.name.endswith('.vtt') or input.name.endswith('.ttml'):
                    # If the input is a VTT or TTML file, pass it directly to the packager
                    # without any intermediate processing or any named pipe.
                    # TODO: Test TTML inputs
                    skip_transcoding = True  # Bypass transcoder
                else:
                    # Otherwise, the input is something like an mkv file with text tracks
                    # in it.  These will be extracted by the transcoder and passed in a
                    # pipe to the packager.
                    skip_transcoding = False

                outputs.append(
                    TextOutputStream(input, self._temp_dir, skip_transcoding))

        self._nodes.append(
            TranscoderNode(inputs, self._pipeline_config, outputs, index,
                           self.hermetic_ffmpeg))

        # If the inputs list was a period in multiperiod_inputs_list, create a nested directory
        # and put that period in it.
        if period_dir:
            output_location = os.path.join(output_location, period_dir)
            os.mkdir(output_location)

        self._nodes.append(
            PackagerNode(self._pipeline_config, output_location, outputs,
                         index, self.hermetic_packager))
Esempio n. 2
0
    def _encode_video(self, stream: VideoOutputStream,
                      input: Input) -> List[str]:
        filters: List[str] = []
        args: List[str] = []

        if input.is_interlaced:
            filters.append('pp=fd')
            args.extend(['-r', str(input.frame_rate)])

        filters.extend(input.filters)

        hwaccel_api = self._pipeline_config.hwaccel_api

        # -2 in the scale filters means to choose a value to keep the original
        # aspect ratio.
        if stream.is_hardware_accelerated() and hwaccel_api == 'vaapi':
            # These filters are specific to Linux's vaapi.
            filters.append('format=nv12')
            filters.append('hwupload')
            filters.append('scale_vaapi=-2:{0}'.format(
                stream.resolution.max_height))
        else:
            filters.append('scale=-2:{0}'.format(stream.resolution.max_height))

        # To avoid weird rounding errors in Sample Aspect Ratio, set it explicitly
        # to 1:1.  Without this, you wind up with SAR set to weird values in DASH
        # that are very close to 1, such as 5120:5123.  In HLS, the behavior is
        # worse.  Some of the width values in the playlist wind up off by one,
        # which causes playback failures in ExoPlayer.
        # https://github.com/google/shaka-streamer/issues/36
        filters.append('setsar=1:1')

        if stream.codec == VideoCodec.H264:
            # These presets are specifically recognized by the software encoder.
            if self._pipeline_config.streaming_mode == StreamingMode.LIVE:
                args += [
                    # Encodes with highest-speed presets for real-time live streaming.
                    '-preset',
                    'ultrafast',
                ]
            else:
                args += [
                    # Take your time for VOD streams.
                    '-preset',
                    'slow',
                    # Apply the loop filter for higher quality output.
                    '-flags',
                    '+loop',
                ]

        if stream.codec.get_base_codec(
        ) == VideoCodec.H264:  # Software or hardware
            # Use the "high" profile for HD and up, and "main" for everything else.
            # https://en.wikipedia.org/wiki/Advanced_Video_Coding#Profiles
            if stream.resolution.max_height >= 720:
                profile = 'high'
            else:
                profile = 'main'

            args += [
                # The only format supported by QT/Apple.
                '-pix_fmt',
                'yuv420p',
                # Require a closed GOP.  Some decoders don't support open GOPs.
                '-flags',
                '+cgop',
                # Set the H264 profile.  Without this, the default would be "main".
                # Note that this gets overridden to "baseline" in live streams by the
                # "-preset ultrafast" option, presumably because the baseline encoder
                # is faster.
                '-profile:v',
                profile,
            ]

        elif stream.codec.get_base_codec() == VideoCodec.VP9:
            # TODO: Does -preset apply here?
            args += [
                # According to the wiki (https://trac.ffmpeg.org/wiki/Encode/VP9),
                # this allows threaded encoding in VP9, which makes better use of CPU
                # resources and speeds up encoding.  This is still not the default
                # setting as of libvpx v1.7.
                '-row-mt',
                '1',
            ]
        elif stream.codec == VideoCodec.AV1:
            args += [
                # According to graphs at https://bit.ly/2BmIVt6, this AV1 setting
                # results in almost no reduction in quality (0.8%), but a significant
                # boost in speed (20x).
                '-cpu-used',
                '8',
                # According to the wiki (https://trac.ffmpeg.org/wiki/Encode/AV1),
                # this allows threaded encoding in AV1, which makes better use of CPU
                # resources and speeds up encoding.  This will be ignored by libaom
                # before version 1.0.0-759-g90a15f4f2, and so there may be no benefit
                # unless libaom and ffmpeg are built from source (as of Oct 2019).
                '-row-mt',
                '1',
                # According to the wiki (https://trac.ffmpeg.org/wiki/Encode/AV1),
                # this allows for threaded _decoding_ in AV1, which will provide a
                # smoother playback experience for the end user.
                '-tiles',
                '2x2',
                # AV1 is considered "experimental".
                '-strict',
                'experimental',
            ]

        keyframe_interval = int(self._pipeline_config.segment_size *
                                input.frame_rate)

        args += [
            # No audio encoding for video.
            '-an',
            # Set codec and bitrate.
            '-c:v',
            stream.get_ffmpeg_codec_string(hwaccel_api),
            '-b:v',
            stream.get_bitrate(),
            # Output MP4 in the pipe, for all codecs.
            '-f',
            'mp4',
            # This flag forces a video fragment at each keyframe.
            '-movflags',
            '+frag_keyframe',
            # This explicit fragment duration affects both audio and video, and
            # ensures that there are no single large MP4 boxes that Shaka Packager
            # can't consume from a pipe.
            # FFmpeg fragment duration is in microseconds.
            '-frag_duration',
            str(self._pipeline_config.segment_size * 1e6),
            # Set minimum and maximum GOP length.
            '-keyint_min',
            str(keyframe_interval),
            '-g',
            str(keyframe_interval),
            # Set video filters.
            '-vf',
            ','.join(filters),
        ]
        return args
    def start(self,
              output_dir: str,
              input_config_dict: Dict[str, Any],
              pipeline_config_dict: Dict[str, Any],
              bitrate_config_dict: Dict[Any, Any] = {},
              bucket_url: Union[str, None] = None,
              check_deps: bool = True) -> 'ControllerNode':
        """Create and start all other nodes.

    :raises: `RuntimeError` if the controller has already started.
    :raises: :class:`streamer.configuration.ConfigError` if the configuration is
             invalid.
    """

        if self._nodes:
            raise RuntimeError('Controller already started!')

        if check_deps:
            # Check that ffmpeg version is 4.1 or above.
            _check_version('FFmpeg', ['ffmpeg', '-version'], (4, 1))

            # Check that ffprobe version (used for autodetect features) is 4.1 or
            # above.
            _check_version('ffprobe', ['ffprobe', '-version'], (4, 1))

            # Check that Shaka Packager version is 2.4.2 or above.
            _check_version('Shaka Packager', ['packager', '-version'],
                           (2, 4, 2))

            if bucket_url:
                # Check that the Google Cloud SDK is at least v212, which introduced
                # gsutil 4.33 with an important rsync bug fix.
                # https://cloud.google.com/sdk/docs/release-notes
                # https://github.com/GoogleCloudPlatform/gsutil/blob/master/CHANGES.md
                # This is only required if the user asked for upload to cloud storage.
                _check_version('Google Cloud SDK', ['gcloud', '--version'],
                               (212, 0, 0))

        if bucket_url:
            # If using cloud storage, make sure the user is logged in and can access
            # the destination, independent of the version check above.
            CloudNode.check_access(bucket_url)

        # Define resolutions and bitrates before parsing other configs.
        bitrate_config = BitrateConfig(bitrate_config_dict)

        # Now that the definitions have been parsed, register the maps of valid
        # resolutions and channel layouts so that InputConfig and PipelineConfig
        # can be validated accordingly.
        VideoResolution.set_map(bitrate_config.video_resolutions)
        AudioChannelLayout.set_map(bitrate_config.audio_channel_layouts)

        input_config = InputConfig(input_config_dict)
        pipeline_config = PipelineConfig(pipeline_config_dict)
        self._pipeline_config = pipeline_config

        outputs: List[OutputStream] = []
        for input in input_config.inputs:
            # External command inputs need to be processed by an additional node
            # before being transcoded.  In this case, the input doesn't have a
            # filename that FFmpeg can read, so we generate an intermediate pipe for
            # that node to write to.  TranscoderNode will then instruct FFmpeg to
            # read from that pipe for this input.
            if input.input_type == InputType.EXTERNAL_COMMAND:
                command_output = self._create_pipe()
                self._nodes.append(
                    ExternalCommandNode(input.name, command_output))
                input.set_pipe(command_output)

            if input.media_type == MediaType.AUDIO:
                for audio_codec in pipeline_config.audio_codecs:
                    outputs.append(
                        AudioOutputStream(self._create_pipe(), input,
                                          audio_codec,
                                          pipeline_config.channels))

            elif input.media_type == MediaType.VIDEO:
                for video_codec in pipeline_config.video_codecs:
                    for output_resolution in pipeline_config.get_resolutions():
                        # Only going to output lower or equal resolution videos.
                        # Upscaling is costly and does not do anything.
                        if input.get_resolution() < output_resolution:
                            continue

                        outputs.append(
                            VideoOutputStream(self._create_pipe(), input,
                                              video_codec, output_resolution))

            elif input.media_type == MediaType.TEXT:
                if input.name.endswith('.vtt') or input.name.endswith('.ttml'):
                    # If the input is a VTT or TTML file, pass it directly to the packager
                    # without any intermediate processing or any named pipe.
                    # TODO: Test TTML inputs
                    text_pipe = None  # Bypass transcoder
                else:
                    # Otherwise, the input is something like an mkv file with text tracks
                    # in it.  These will be extracted by the transcoder and passed in a
                    # pipe to the packager.
                    text_pipe = self._create_pipe('.vtt')

                outputs.append(TextOutputStream(text_pipe, input))

        self._nodes.append(
            TranscoderNode(input_config, pipeline_config, outputs))

        self._nodes.append(PackagerNode(pipeline_config, output_dir, outputs))

        if bucket_url:
            cloud_temp_dir = os.path.join(self._temp_dir, 'cloud')
            os.mkdir(cloud_temp_dir)

            self._nodes.append(
                CloudNode(output_dir, bucket_url, cloud_temp_dir,
                          self.is_vod()))

        for node in self._nodes:
            node.start()
        return self