Esempio n. 1
0
    def _encode_audio(self, stream: AudioOutputStream,
                      input: Input) -> List[str]:
        filters: List[str] = []
        args: List[str] = [
            # No video encoding for audio.
            '-vn',
            # TODO: This implied downmixing is not ideal.
            # Set the number of channels to the one specified in the config.
            '-ac',
            str(stream.channels),
        ]

        if stream.channels == 6:
            filters += [
                # Work around for https://github.com/google/shaka-packager/issues/598,
                # as seen on https://trac.ffmpeg.org/ticket/6974
                'channelmap=channel_layout=5.1',
            ]

        filters.extend(input.filters)

        hwaccel_api = self._pipeline_config.hwaccel_api
        args += [
            # Set codec and bitrate.
            '-c:a',
            stream.get_ffmpeg_codec_string(hwaccel_api),
            '-b:a',
            stream.get_bitrate(),
            # Output MP4 in the pipe, for all codecs.
            '-f',
            'mp4',
            # This explicit fragment duration affects both audio and video, and
            # ensures that there are no single large MP4 boxes that Shaka Packager
            # can't consume from a pipe.
            # FFmpeg fragment duration is in microseconds.
            '-frag_duration',
            str(self._pipeline_config.segment_size * 1e6),
            # Opus in MP4 is considered "experimental".
            '-strict',
            'experimental',
        ]

        if len(filters):
            args += [
                # Set audio filters.
                '-af',
                ','.join(filters),
            ]

        return args
    def _encode_audio(self, stream: AudioOutputStream,
                      input: Input) -> List[str]:
        filters: List[str] = []
        args: List[str] = [
            # No video encoding for audio.
            '-vn',
            # TODO: This implied downmixing is not ideal.
            # Set the number of channels to the one specified in the config.
            '-ac',
            str(stream.channels),
        ]

        if stream.channels == 6:
            filters += [
                # Work around for https://github.com/google/shaka-packager/issues/598,
                # as seen on https://trac.ffmpeg.org/ticket/6974
                'channelmap=channel_layout=5.1',
            ]

        filters.extend(input.filters)

        hwaccel_api = self._pipeline_config.hwaccel_api
        args += [
            # Set codec and bitrate.
            '-c:a',
            stream.get_ffmpeg_codec_string(hwaccel_api),
            '-b:a',
            stream.get_bitrate(),
            # Output MP4 in the pipe, for all codecs.
            '-f',
            'mp4',
            # These flags make it fragmented MP4, which is necessary for a pipe.
            '-movflags',
            '+faststart+frag_keyframe',
            # Opus in MP4 is considered "experimental".
            '-strict',
            'experimental',
        ]

        if len(filters):
            args += [
                # Set audio filters.
                '-af',
                ','.join(filters),
            ]

        return args
    def _append_nodes_for_inputs_list(self,
                                      inputs: List[Input],
                                      output_location: str,
                                      period_dir: Optional[str] = None,
                                      index: int = 0) -> None:
        """A common method that creates Transcoder and Packager nodes for a list of Inputs passed to it.

    Args:
      inputs (List[Input]): A list of Input streams.
      output_location (str): A path were the packager will write outputs in.
      period_dir (Optional[str]): A subdirectory name where a single period will be outputted to.
      If passed, this indicates that inputs argument is one period in a list of periods.
      index (int): The index of the current Transcoder/Packager nodes.
    """

        outputs: List[OutputStream] = []
        for input in inputs:
            # External command inputs need to be processed by an additional node
            # before being transcoded.  In this case, the input doesn't have a
            # filename that FFmpeg can read, so we generate an intermediate pipe for
            # that node to write to.  TranscoderNode will then instruct FFmpeg to
            # read from that pipe for this input.
            if input.input_type == InputType.EXTERNAL_COMMAND:
                command_output = Pipe.create_ipc_pipe(self._temp_dir)
                self._nodes.append(
                    ExternalCommandNode(input.name,
                                        command_output.write_end()))
                # reset the name of the input to be the output pipe path - which the
                # transcoder node will read from - instead of a shell command.
                input.reset_name(command_output.read_end())

            if input.media_type == MediaType.AUDIO:
                for audio_codec in self._pipeline_config.audio_codecs:
                    for output_channel_layout in self._pipeline_config.get_channel_layouts(
                    ):
                        # We won't upmix a lower channel count input to a higher one.
                        # Skip channel counts greater than the input channel count.
                        if input.get_channel_layout() < output_channel_layout:
                            continue

                        outputs.append(
                            AudioOutputStream(input, self._temp_dir,
                                              audio_codec,
                                              output_channel_layout))

            elif input.media_type == MediaType.VIDEO:
                for video_codec in self._pipeline_config.video_codecs:
                    for output_resolution in self._pipeline_config.get_resolutions(
                    ):
                        # Only going to output lower or equal resolution videos.
                        # Upscaling is costly and does not do anything.
                        if input.get_resolution() < output_resolution:
                            continue

                        outputs.append(
                            VideoOutputStream(input, self._temp_dir,
                                              video_codec, output_resolution))

            elif input.media_type == MediaType.TEXT:
                if input.name.endswith('.vtt') or input.name.endswith('.ttml'):
                    # If the input is a VTT or TTML file, pass it directly to the packager
                    # without any intermediate processing or any named pipe.
                    # TODO: Test TTML inputs
                    skip_transcoding = True  # Bypass transcoder
                else:
                    # Otherwise, the input is something like an mkv file with text tracks
                    # in it.  These will be extracted by the transcoder and passed in a
                    # pipe to the packager.
                    skip_transcoding = False

                outputs.append(
                    TextOutputStream(input, self._temp_dir, skip_transcoding))

        self._nodes.append(
            TranscoderNode(inputs, self._pipeline_config, outputs, index,
                           self.hermetic_ffmpeg))

        # If the inputs list was a period in multiperiod_inputs_list, create a nested directory
        # and put that period in it.
        if period_dir:
            output_location = os.path.join(output_location, period_dir)
            os.mkdir(output_location)

        self._nodes.append(
            PackagerNode(self._pipeline_config, output_location, outputs,
                         index, self.hermetic_packager))
    def start(self,
              output_dir: str,
              input_config_dict: Dict[str, Any],
              pipeline_config_dict: Dict[str, Any],
              bitrate_config_dict: Dict[Any, Any] = {},
              bucket_url: Union[str, None] = None,
              check_deps: bool = True) -> 'ControllerNode':
        """Create and start all other nodes.

    :raises: `RuntimeError` if the controller has already started.
    :raises: :class:`streamer.configuration.ConfigError` if the configuration is
             invalid.
    """

        if self._nodes:
            raise RuntimeError('Controller already started!')

        if check_deps:
            # Check that ffmpeg version is 4.1 or above.
            _check_version('FFmpeg', ['ffmpeg', '-version'], (4, 1))

            # Check that ffprobe version (used for autodetect features) is 4.1 or
            # above.
            _check_version('ffprobe', ['ffprobe', '-version'], (4, 1))

            # Check that Shaka Packager version is 2.4.2 or above.
            _check_version('Shaka Packager', ['packager', '-version'],
                           (2, 4, 2))

            if bucket_url:
                # Check that the Google Cloud SDK is at least v212, which introduced
                # gsutil 4.33 with an important rsync bug fix.
                # https://cloud.google.com/sdk/docs/release-notes
                # https://github.com/GoogleCloudPlatform/gsutil/blob/master/CHANGES.md
                # This is only required if the user asked for upload to cloud storage.
                _check_version('Google Cloud SDK', ['gcloud', '--version'],
                               (212, 0, 0))

        if bucket_url:
            # If using cloud storage, make sure the user is logged in and can access
            # the destination, independent of the version check above.
            CloudNode.check_access(bucket_url)

        # Define resolutions and bitrates before parsing other configs.
        bitrate_config = BitrateConfig(bitrate_config_dict)

        # Now that the definitions have been parsed, register the maps of valid
        # resolutions and channel layouts so that InputConfig and PipelineConfig
        # can be validated accordingly.
        VideoResolution.set_map(bitrate_config.video_resolutions)
        AudioChannelLayout.set_map(bitrate_config.audio_channel_layouts)

        input_config = InputConfig(input_config_dict)
        pipeline_config = PipelineConfig(pipeline_config_dict)
        self._pipeline_config = pipeline_config

        outputs: List[OutputStream] = []
        for input in input_config.inputs:
            # External command inputs need to be processed by an additional node
            # before being transcoded.  In this case, the input doesn't have a
            # filename that FFmpeg can read, so we generate an intermediate pipe for
            # that node to write to.  TranscoderNode will then instruct FFmpeg to
            # read from that pipe for this input.
            if input.input_type == InputType.EXTERNAL_COMMAND:
                command_output = self._create_pipe()
                self._nodes.append(
                    ExternalCommandNode(input.name, command_output))
                input.set_pipe(command_output)

            if input.media_type == MediaType.AUDIO:
                for audio_codec in pipeline_config.audio_codecs:
                    outputs.append(
                        AudioOutputStream(self._create_pipe(), input,
                                          audio_codec,
                                          pipeline_config.channels))

            elif input.media_type == MediaType.VIDEO:
                for video_codec in pipeline_config.video_codecs:
                    for output_resolution in pipeline_config.get_resolutions():
                        # Only going to output lower or equal resolution videos.
                        # Upscaling is costly and does not do anything.
                        if input.get_resolution() < output_resolution:
                            continue

                        outputs.append(
                            VideoOutputStream(self._create_pipe(), input,
                                              video_codec, output_resolution))

            elif input.media_type == MediaType.TEXT:
                if input.name.endswith('.vtt') or input.name.endswith('.ttml'):
                    # If the input is a VTT or TTML file, pass it directly to the packager
                    # without any intermediate processing or any named pipe.
                    # TODO: Test TTML inputs
                    text_pipe = None  # Bypass transcoder
                else:
                    # Otherwise, the input is something like an mkv file with text tracks
                    # in it.  These will be extracted by the transcoder and passed in a
                    # pipe to the packager.
                    text_pipe = self._create_pipe('.vtt')

                outputs.append(TextOutputStream(text_pipe, input))

        self._nodes.append(
            TranscoderNode(input_config, pipeline_config, outputs))

        self._nodes.append(PackagerNode(pipeline_config, output_dir, outputs))

        if bucket_url:
            cloud_temp_dir = os.path.join(self._temp_dir, 'cloud')
            os.mkdir(cloud_temp_dir)

            self._nodes.append(
                CloudNode(output_dir, bucket_url, cloud_temp_dir,
                          self.is_vod()))

        for node in self._nodes:
            node.start()
        return self