def strip(self, file): result = io.BytesIO() with tempfile.NamedTemporaryFile() as tmp: tmp.write(file.read()) tmp.flush() metadata = pyexiv2.ImageMetadata(tmp.name) try: metadata.read() except OSError: raise UnsupportedFormatError('Unknown file format.') if metadata: try: metadata.clear() metadata.write() except OSError: raise UnsupportedFormatError('Unknown file format.') tmp.seek(0) shutil.copyfileobj(tmp, result) result.seek(0) return result
def read(self, file: IO) -> Mapping[str, Mapping]: try: probe_data = _probe(file) except subprocess.CalledProcessError: raise UnsupportedFormatError('Unsupported file format.') decoder_and_stream_type = _get_decoder_and_stream_type(probe_data) mime_type = self.__decoder_and_stream_type_to_mime_type.get( decoder_and_stream_type) if not mime_type: raise UnsupportedFormatError('Unsupported metadata source.') # Extract metadata (tags) from ffprobe information ffmetadata = probe_data['format'].get('tags', {}) for stream in probe_data['streams']: ffmetadata.update(stream.get('tags', {})) # Convert FFMetadata items to metadata items metadata = {} metadata_keys = self.metadata_keys_by_mime_type[mime_type] for ffmetadata_key, value in ffmetadata.items(): metadata_key = metadata_keys.inv.get(ffmetadata_key) if metadata_key is not None: metadata[metadata_key] = value return {'ffmetadata': metadata}
def strip(self, file: IO) -> IO: try: probe_data = _probe(file) except subprocess.CalledProcessError: raise UnsupportedFormatError('Unsupported file format.') decoder_and_stream_type = _get_decoder_and_stream_type(probe_data) mime_type = self.__decoder_and_stream_type_to_mime_type.get( decoder_and_stream_type) if not mime_type: raise UnsupportedFormatError('Unsupported metadata source.') # Strip metadata result = io.BytesIO() with _FFmpegContext(file, result) as ctx: encoder_name = self.__mime_type_to_encoder[mime_type] command = [ 'ffmpeg', '-loglevel', 'error', '-i', ctx.input_path, '-map_metadata', '-1', '-codec', 'copy', '-y', '-f', encoder_name, ctx.output_path ] try: subprocess.run(command, stderr=subprocess.PIPE, check=True) except subprocess.CalledProcessError as ffmpeg_error: error_message = ffmpeg_error.stderr.decode('utf-8') raise OperatorError( f'Could not strip metadata: {error_message}') return result
def combine(self, essence, metadata_by_format): result = io.BytesIO() with tempfile.NamedTemporaryFile() as tmp: tmp.write(essence.read()) tmp.flush() exiv2_metadata = pyexiv2.ImageMetadata(tmp.name) try: exiv2_metadata.read() except OSError: raise UnsupportedFormatError('Unknown essence format.') for metadata_format, metadata in metadata_by_format.items(): if metadata_format not in self.formats: raise UnsupportedFormatError('Metadata format %r is not supported.' % metadata_format) for madam_key, madam_value in metadata.items(): exiv2_key = Exiv2MetadataProcessor.metadata_to_exiv2.get(madam_key) if exiv2_key is None: continue _, convert_to_exiv2 = Exiv2MetadataProcessor.converters[madam_key] exiv2_metadata[exiv2_key] = convert_to_exiv2(madam_value) try: exiv2_metadata.write() tmp.flush() tmp.seek(0) except OSError: raise UnsupportedFormatError('Could not write metadata: %r' % metadata_by_format) shutil.copyfileobj(tmp, result) result.seek(0) return result
def _parse_svg(file: IO) -> Tuple[ET.ElementTree, ET.Element]: _register_xml_namespaces() try: tree = ET.parse(file) except ET.ParseError as e: raise UnsupportedFormatError( f'Error while parsing XML in line {e.position[0]:d}, column {e.position[1]:d}' ) root = tree.getroot() if root.tag not in ('{%s}svg' % XML_NS['svg'], 'svg'): raise UnsupportedFormatError('XML file is not an SVG file.') return tree, root
def extract_frame(self, asset: Asset, mime_type: Union[MimeType, str], seconds: float = 0) -> Asset: """ Creates a new image asset of the specified MIME type from the essence of the specified video asset. :param asset: Video asset which will serve as the source for the frame :type asset: Asset :param mime_type: MIME type of the destination image :type mime_type: MimeType or str :param seconds: Offset of the frame in seconds :type seconds: float :return: New image asset with converted essence :rtype: Asset """ source_mime_type = MimeType(asset.mime_type) if source_mime_type.type != 'video': raise UnsupportedFormatError( f'Unsupported source asset type: {source_mime_type}') mime_type = MimeType(mime_type) encoder_name = self.__mime_type_to_encoder.get(mime_type) codec_name = self.__mime_type_to_codec.get(mime_type) if not (encoder_name and codec_name): raise UnsupportedFormatError( f'Unsupported target asset type: {mime_type}') result = io.BytesIO() with _FFmpegContext(asset.essence, result) as ctx: command = [ 'ffmpeg', '-v', 'error', '-i', ctx.input_path, '-ss', str(float(seconds)), '-codec:v', codec_name, '-vframes', '1', '-f', encoder_name, '-y', ctx.output_path ] try: subprocess.run(command, stderr=subprocess.PIPE, check=True) except subprocess.CalledProcessError as ffmpeg_error: error_message = ffmpeg_error.stderr.decode('utf-8') raise OperatorError( f'Could not extract frame from asset: {error_message}') metadata = _combine_metadata(asset, 'width', 'height', mime_type=mime_type) if 'video' in asset.metadata: metadata['depth'] = asset.metadata['video']['depth'] return Asset(essence=result, **metadata)
def combine(self, file: IO, metadata_by_type: Mapping[str, Mapping]) -> IO: try: probe_data = _probe(file) except subprocess.CalledProcessError: raise UnsupportedFormatError('Unsupported file format.') decoder_and_stream_type = _get_decoder_and_stream_type(probe_data) mime_type = self.__decoder_and_stream_type_to_mime_type.get( decoder_and_stream_type) if not mime_type: raise UnsupportedFormatError('Unsupported metadata source.') # Validate provided metadata if not metadata_by_type: raise ValueError('No metadata provided') if 'ffmetadata' not in metadata_by_type: raise UnsupportedFormatError( f'Invalid metadata to be combined with essence: {metadata_by_type.keys()!r}' ) if not metadata_by_type['ffmetadata']: raise ValueError('No metadata provided') # Add metadata to file result = io.BytesIO() with _FFmpegContext(file, result) as ctx: encoder_name = self.__mime_type_to_encoder[mime_type] command = [ 'ffmpeg', '-loglevel', 'error', '-f', encoder_name, '-i', ctx.input_path ] ffmetadata = metadata_by_type['ffmetadata'] metadata_keys = self.metadata_keys_by_mime_type[mime_type] for metadata_key, value in ffmetadata.items(): ffmetadata_key = metadata_keys.get(metadata_key) if ffmetadata_key is None: raise ValueError( f'Unsupported metadata key: {metadata_key!r}') command.append('-metadata') command.append(f'{ffmetadata_key}={value}') command.extend( ['-codec', 'copy', '-y', '-f', encoder_name, ctx.output_path]) try: subprocess.run(command, stderr=subprocess.PIPE, check=True) except subprocess.CalledProcessError as ffmpeg_error: error_message = ffmpeg_error.stderr.decode('utf-8') raise OperatorError(f'Could not add metadata: {error_message}') return result
def read(self, file: IO) -> Mapping[str, Mapping]: with tempfile.NamedTemporaryFile(mode='wb') as tmp: tmp.write(file.read()) tmp.flush() try: metadata = piexif.load(tmp.name) except (piexif.InvalidImageDataError, ValueError): raise UnsupportedFormatError('Unsupported file format.') metadata_by_format = {} for metadata_format in self.formats: format_metadata = {} for ifd_key, ifd_values in metadata.items(): if not isinstance(ifd_values, dict): continue for exif_key, exif_value in ifd_values.items(): madam_key = ExifMetadataProcessor.metadata_to_exif.inv.get((ifd_key, exif_key)) if madam_key is None: continue convert_to_madam, _ = ExifMetadataProcessor.converters[madam_key] format_metadata[madam_key] = convert_to_madam(exif_value) if format_metadata: metadata_by_format[metadata_format] = format_metadata return metadata_by_format
def __parse(file): try: tree = ET.parse(file) except ET.ParseError as e: raise UnsupportedFormatError('Error while parsing XML: %s' % e) root = tree.getroot() return tree, root, root.find('./svg:metadata', XML_NS)
def read(self, file: IO) -> Asset: try: probe_data = _probe(file) except subprocess.CalledProcessError: raise UnsupportedFormatError('Unsupported file format.') decoder_and_stream_type = _get_decoder_and_stream_type(probe_data) mime_type = self.__decoder_and_stream_type_to_mime_type.get( decoder_and_stream_type) if not mime_type: raise UnsupportedFormatError('Unsupported metadata source.') metadata: Dict[str, Any] = dict(mime_type=str(mime_type), ) if 'duration' in probe_data['format']: metadata['duration'] = float(probe_data['format']['duration']) for stream in probe_data['streams']: stream_type = stream.get('codec_type') if stream_type in {'video', 'audio', 'subtitle'}: # Only use first stream if stream_type in metadata: break metadata[stream_type] = {} if 'width' in stream: metadata['width'] = max(stream['width'], metadata.get('width', 0)) if 'height' in stream: metadata['height'] = max(stream['height'], metadata.get('height', 0)) if stream_type not in metadata: continue if 'codec_name' in stream: metadata[stream_type]['codec'] = stream['codec_name'] if 'bit_rate' in stream: metadata[stream_type]['bitrate'] = float( stream['bit_rate']) / 1000.0 if 'pix_fmt' in stream: color_space, depth, data_type = FFmpegProcessor.__ffmpeg_pix_fmt_to_color_mode[ stream['pix_fmt']] metadata[stream_type]['color_space'] = color_space metadata[stream_type]['depth'] = depth metadata[stream_type]['data_type'] = data_type return Asset(essence=file, **metadata)
def rotate(self, asset, angle, expand=False): """ Creates an asset whose essence is rotated by the specified angle in degrees. :param asset: Asset whose contents will be rotated :type asset: Asset :param angle: Angle in degrees, counter clockwise :type angle: float :param expand: If true, changes the dimensions of the new asset so it can hold the entire rotated essence, otherwise the dimensions of the original asset will be used. :type expand: bool :return: New asset with rotated essence :rtype: Asset """ mime_type = MimeType(asset.mime_type) encoder_name = self.__mime_type_to_encoder.get(mime_type) if not encoder_name or mime_type.type != 'video': raise UnsupportedFormatError('Unsupported source asset type: %s' % mime_type) if angle % 360.0 == 0.0: return asset angle_rad = radians(angle) width = asset.width height = asset.height if expand: if angle % 180 < 90: width_ = asset.width height_ = asset.height angle_rad_ = angle_rad % pi else: width_ = asset.height height_ = asset.width angle_rad_ = angle_rad % pi - pi/2 cos_a = cos(angle_rad_) sin_a = sin(angle_rad_) width = ceil(round(width_ * cos_a + height_ * sin_a, 7)) height = ceil(round(width_ * sin_a + height_ * cos_a, 7)) result = io.BytesIO() with _FFmpegContext(asset.essence, result) as ctx: command = ['ffmpeg', '-v', 'error', '-i', ctx.input_path, '-codec', 'copy', '-f:v', 'rotate=a=%(a)f:ow=%(w)d:oh=%(h)d)' % dict(a=angle_rad, w=width, h=height), '-f', encoder_name, '-y', ctx.output_path] try: subprocess_run(command, stderr=subprocess.PIPE, check=True) except CalledProcessError as ffmpeg_error: error_message = ffmpeg_error.stderr.decode('utf-8') raise OperatorError('Could not convert video asset: %s' % error_message) return Asset(essence=result, mime_type=mime_type, width=width, height=height)
def resize(self, asset: Asset, width: int, height: int) -> Asset: """ Creates a new image or video asset of the specified width and height from the essence of the specified image or video asset. Width and height must be positive numbers. :param asset: Video asset that will serve as the source for the frame :type asset: Asset :param width: Width of the resized asset :type width: int :param height: Height of the resized asset :type height: int :return: New asset with specified width and height :rtype: Asset """ if width < 1 or height < 1: raise ValueError(f'Invalid dimensions: {width:d}x{height:d}') mime_type = MimeType(asset.mime_type) encoder_name = self.__mime_type_to_encoder.get(mime_type) if not encoder_name: raise UnsupportedFormatError( f'Unsupported asset type: {mime_type}') if mime_type.type not in ('image', 'video'): raise OperatorError(f'Cannot resize asset of type {mime_type}') result = io.BytesIO() with _FFmpegContext(asset.essence, result) as ctx: with open(ctx.input_path, 'wb') as temp_in: shutil.copyfileobj(asset.essence, temp_in) temp_in.flush() command = [ 'ffmpeg', '-loglevel', 'error', '-f', encoder_name, '-i', ctx.input_path, '-filter:v', f'scale={width:d}:{height:d}', '-qscale', '0', '-threads', str(self.__threads), '-f', encoder_name, '-y', ctx.output_path ] try: subprocess.run(command, stderr=subprocess.PIPE, check=True) except subprocess.CalledProcessError as ffmpeg_error: error_message = ffmpeg_error.stderr.decode('utf-8') raise OperatorError(f'Could not resize asset: {error_message}') metadata = _combine_metadata(asset, 'mime_type', 'duration', 'video', 'audio', 'subtitle', width=width, height=height) return Asset(essence=result, **metadata)
def trim(self, asset: Asset, from_seconds: float = 0, to_seconds: float = 0) -> Asset: """ Creates a trimmed audio or video asset that only contains the data between from_seconds and to_seconds. :param asset: Audio or video asset, which will serve as the source :type asset: Asset :param from_seconds: Start time of the clip in seconds :type from_seconds: float :param to_seconds: End time of the clip in seconds :type to_seconds: float :return: New asset with trimmed essence :rtype: Asset """ mime_type = MimeType(asset.mime_type) encoder_name = self.__mime_type_to_encoder.get(mime_type) if not encoder_name or mime_type.type not in ('audio', 'video'): raise UnsupportedFormatError( f'Unsupported source asset type: {mime_type}') if to_seconds <= 0: to_seconds = asset.duration + to_seconds duration = float(to_seconds) - float(from_seconds) if duration <= 0: raise ValueError('Start time must be before end time') result = io.BytesIO() with _FFmpegContext(asset.essence, result) as ctx: command = [ 'ffmpeg', '-v', 'error', '-ss', str(float(from_seconds)), '-t', str(duration), '-i', ctx.input_path, '-codec', 'copy', '-f', encoder_name, '-y', ctx.output_path ] try: subprocess.run(command, stderr=subprocess.PIPE, check=True) except subprocess.CalledProcessError as ffmpeg_error: error_message = ffmpeg_error.stderr.decode('utf-8') raise OperatorError(f'Could not trim asset: {error_message}') metadata = _combine_metadata(asset, 'mime_type', 'width', 'height', 'video', 'audio', 'subtitle', duration=duration) return Asset(essence=result, **metadata)
def crop(self, asset, x, y, width, height): """ Creates a cropped video asset whose essence is cropped to the specified rectangular area. :param asset: Video asset whose contents will be cropped :type asset: Asset :param x: Horizontal offset of the cropping area from left :type x: int :param y: Vertical offset of the cropping area from top :type y: int :param width: Width of the cropping area :type width: int :param height: Height of the cropping area :type height: int :return: New asset with cropped essence :rtype: Asset """ mime_type = MimeType(asset.mime_type) encoder_name = self.__mime_type_to_encoder.get(mime_type) if not encoder_name or mime_type.type != 'video': raise UnsupportedFormatError('Unsupported source asset type: %s' % mime_type) if x == 0 and y == 0 and width == asset.width and height == asset.height: return asset max_x = max(0, min(asset.width, width + x)) max_y = max(0, min(asset.height, height + y)) min_x = max(0, min(asset.width, x)) min_y = max(0, min(asset.height, y)) if min_x == asset.width or min_y == asset.height or max_x <= min_x or max_y <= min_y: raise OperatorError('Invalid cropping area: <x=%r, y=%r, width=%r, height=%r>' % (x, y, width, height)) width = max_x - min_x height = max_y - min_y result = io.BytesIO() with _FFmpegContext(asset.essence, result) as ctx: command = ['ffmpeg', '-v', 'error', '-i', ctx.input_path, '-codec', 'copy', '-f:v', 'crop=w=%d:h=%d:x=%d:y=%d' % (width, height, x, y), '-f', encoder_name, '-y', ctx.output_path] try: subprocess_run(command, stderr=subprocess.PIPE, check=True) except CalledProcessError as ffmpeg_error: error_message = ffmpeg_error.stderr.decode('utf-8') raise OperatorError('Could not convert video asset: %s' % error_message) return Asset(essence=result, mime_type=mime_type, width=width, height=height)
def read(self, file): with tempfile.NamedTemporaryFile() as tmp: tmp.write(file.read()) tmp.flush() metadata = pyexiv2.ImageMetadata(tmp.name) try: metadata.read() except OSError: raise UnsupportedFormatError('Unknown file format.') if MimeType(metadata.mime_type) not in Exiv2MetadataProcessor.supported_mime_types: raise UnsupportedFormatError('Unsupported format: %s' % metadata.mime_type) metadata_by_format = {} for metadata_format in self.formats: format_metadata = {} for exiv2_key in getattr(metadata, metadata_format + '_keys'): madam_key = Exiv2MetadataProcessor.metadata_to_exiv2.inv.get(exiv2_key) if madam_key is None: continue exiv2_value = metadata[exiv2_key].value convert_to_madam, _ = Exiv2MetadataProcessor.converters[madam_key] format_metadata[madam_key] = convert_to_madam(exiv2_value) if format_metadata: metadata_by_format[metadata_format] = format_metadata return metadata_by_format
def read(self, file): try: tree = ET.parse(file) except ET.ParseError as e: raise UnsupportedFormatError( 'Error while parsing XML in line %d, column %d' % e.position) root = tree.getroot() metadata = dict(mime_type='image/svg+xml') if 'width' in root.keys(): metadata['width'] = svg_length_to_px(root.get('width')) if 'height' in root.keys(): metadata['height'] = svg_length_to_px(root.get('height')) file.seek(0) return Asset(essence=file, **metadata)
def combine(self, file: IO, metadata: Mapping[str, Mapping]) -> IO: if not metadata: raise ValueError('No metadata provided.') if 'rdf' not in metadata: raise UnsupportedFormatError('No RDF metadata found.') rdf = metadata['rdf'] if 'xml' not in rdf: raise ValueError('XML string missing from RDF metadata.') tree, root = _parse_svg(file) metadata_elem = root.find('./svg:metadata', XML_NS) if metadata_elem is None: metadata_elem = ET.SubElement(root, '{%(svg)s}metadata' % XML_NS) metadata_elem.append(ET.fromstring(rdf['xml'])) result = _write_svg(tree) return result
def strip(self, file: IO) -> IO: result = io.BytesIO() with tempfile.NamedTemporaryFile(mode='w+b') as tmp: tmp.write(file.read()) tmp.flush() try: metadata = piexif.load(tmp.name) if any(metadata.values()): open(tmp.name, 'rb').read() piexif.remove(tmp.name) except (piexif.InvalidImageDataError, ValueError, UnboundLocalError): raise UnsupportedFormatError('Unsupported file format.') tmp.seek(0) shutil.copyfileobj(tmp, result) result.seek(0) return result
def combine(self, file, metadata): if not metadata: raise ValueError('No metadata provided.') if 'rdf' not in metadata: raise UnsupportedFormatError('No RDF metadata found.') rdf = metadata['rdf'] if 'xml' not in rdf: raise ValueError('XML string missing from RDF metadata.') tree, root, metadata_elem = SVGMetadataProcessor.__parse(file) if metadata_elem is None: metadata_elem = ET.SubElement(root, '{%(svg)s}metadata' % XML_NS) metadata_elem.append(ET.fromstring(rdf['xml'])) result = io.BytesIO() SVGMetadataProcessor.__register_xml_namespaces() tree.write(result, xml_declaration=True, encoding='utf-8') result.seek(0) return result
def convert(self, asset, mime_type, video=None, audio=None, subtitle=None): """ Creates a new asset of the specified MIME type from the essence of the specified asset. Additional options can be specified for video, audio, and subtitle streams. Options are passed as dictionary instances and can contain various keys for each stream type. **Options for video streams:** - **codec** – Processor-specific name of the video codec as string - **bitrate** – Target bitrate in kBit/s as float number **Options for audio streams:** - **codec** – Processor-specific name of the audio codec as string - **bitrate** – Target bitrate in kBit/s as float number **Options for subtitle streams:** - **codec** – Processor-specific name of the subtitle format as string :param asset: Asset whose contents will be converted :type asset: Asset :param mime_type: MIME type of the video container :type mime_type: MimeType or str :param video: Dictionary with options for video streams. :type video: dict or None :param audio: Dictionary with options for audio streams. :type audio: dict or None :param subtitle: Dictionary with the options for subtitle streams. :type subtitle: dict or None :return: New asset with converted essence :rtype: Asset """ mime_type = MimeType(mime_type) encoder_name = self.__mime_type_to_encoder.get(mime_type) if not encoder_name: raise UnsupportedFormatError('Unsupported asset type: %s' % mime_type) result = io.BytesIO() with _FFmpegContext(asset.essence, result) as ctx: command = ['ffmpeg', '-loglevel', 'error', '-i', ctx.input_path] if video: if 'codec' in video: if video['codec']: command.extend(['-c:v', video['codec']]) codec_options = FFmpegProcessor.__codec_options.get('video', {}) command.extend(codec_options.get(video['codec'], [])) else: command.extend(['-vn']) if video.get('bitrate'): # Set minimum at 50% of bitrate and maximum at 145% of bitrate # (see https://developers.google.com/media/vp9/settings/vod/) command.extend(['-minrate', '%dk' % round(0.5*video['bitrate']), '-b:v', '%dk' % video['bitrate'], '-maxrate', '%dk' % round(1.45*video['bitrate'])]) if audio: if 'codec' in audio: if audio['codec']: command.extend(['-c:a', audio['codec']]) codec_options = FFmpegProcessor.__codec_options.get('audio', {}) command.extend(codec_options.get(audio['codec'], [])) else: command.extend(['-an']) if audio.get('bitrate'): command.extend(['-b:a', '%dk' % audio['bitrate']]) if subtitle: if 'codec' in subtitle: if subtitle['codec']: command.extend(['-c:s', subtitle['codec']]) codec_options = FFmpegProcessor.__codec_options.get('subtitles', {}) command.extend(codec_options.get(subtitle['codec'], [])) else: command.extend(['-sn']) container_options = FFmpegProcessor.__container_options.get(mime_type, []) command.extend(container_options) command.extend(['-threads', str(self.__threads), '-f', encoder_name, '-y', ctx.output_path]) try: subprocess_run(command, stderr=subprocess.PIPE, check=True) except CalledProcessError as ffmpeg_error: error_message = ffmpeg_error.stderr.decode('utf-8') raise OperatorError('Could not convert video asset: %s' % error_message) metadata = { 'mime_type': str(mime_type) } if mime_type.type in ('image', 'video'): metadata['width'] = asset.width metadata['height'] = asset.height if mime_type.type in ('audio', 'video'): metadata['duration'] = asset.duration return Asset(essence=result, **metadata)
def convert(self, asset: Asset, mime_type: Union[MimeType, str], video: Optional[Mapping[str, Any]] = None, audio: Optional[Mapping[str, Any]] = None, subtitle: Optional[Mapping[str, Any]] = None) -> Asset: """ Creates a new asset of the specified MIME type from the essence of the specified asset. Additional options can be specified for video, audio, and subtitle streams. Options are passed as dictionary instances and can contain various keys for each stream type. **Options for video streams:** - **codec** – Processor-specific name of the video codec as string - **bitrate** – Target bitrate in kBit/s as float number **Options for audio streams:** - **codec** – Processor-specific name of the audio codec as string - **bitrate** – Target bitrate in kBit/s as float number **Options for subtitle streams:** - **codec** – Processor-specific name of the subtitle format as string :param asset: Asset whose contents will be converted :type asset: Asset :param mime_type: MIME type of the video container :type mime_type: MimeType or str :param video: Dictionary with options for video streams. :type video: dict or None :param audio: Dictionary with options for audio streams. :type audio: dict or None :param subtitle: Dictionary with the options for subtitle streams. :type subtitle: dict or None :return: New asset with converted essence :rtype: Asset """ mime_type = MimeType(mime_type) encoder_name = self.__mime_type_to_encoder.get(mime_type) if not encoder_name: raise UnsupportedFormatError( f'Unsupported asset type: {mime_type}') result = io.BytesIO() with _FFmpegContext(asset.essence, result) as ctx: command = ['ffmpeg', '-loglevel', 'error', '-i', ctx.input_path] format_config = dict(self.config.get(mime_type.type, {})) if mime_type.type == 'video': keyframe_interval = int( format_config.get('keyframe_interval', 100)) command.extend(['-g', str(keyframe_interval)]) if video: if 'codec' in video: if video['codec']: command.extend(['-c:v', video['codec']]) codec_options = dict( FFmpegProcessor.__codec_options.get( 'video', {}).get(video['codec'], [])) codec_config = self.config.get( f'codec/{video["codec"]}', {}) if 'crf' in codec_config: codec_options['crf'] = int(codec_config['crf']) command.extend(_param_map_to_seq(codec_options)) else: command.extend(['-vn']) if video.get('bitrate'): # Set minimum at 50% of bitrate and maximum at 145% of bitrate # (see https://developers.google.com/media/vp9/settings/vod/) command.extend([ '-minrate', f'{round(0.5 * video["bitrate"]):d}k', '-b:v', f'{video["bitrate"]:d}k', '-maxrate', f'{round(1.45 * video["bitrate"]):d}k', ]) if video.get('color_space') or video.get('depth') or video.get( 'data_type'): color_mode = ( video.get('color_space', asset.video.get('color_space', 'YUV')), video.get('depth', asset.video.get('depth', 8)), video.get('data_type', asset.video.get('data_type', 'uint')), ) ffmpeg_pix_fmt = FFmpegProcessor.__color_mode_to_ffmpeg_pix_fmt.get( color_mode) if ffmpeg_pix_fmt: command.extend(['-pix_fmt', ffmpeg_pix_fmt]) if audio: if 'codec' in audio: if audio['codec']: command.extend(['-c:a', audio['codec']]) codec_options = FFmpegProcessor.__codec_options.get( 'audio', {}).get(audio['codec'], []) command.extend(codec_options) else: command.extend(['-an']) if audio.get('bitrate'): command.extend(['-b:a', f'{audio["bitrate"]:d}k']) if subtitle: if 'codec' in subtitle: if subtitle['codec']: command.extend(['-c:s', subtitle['codec']]) codec_options = FFmpegProcessor.__codec_options.get( 'subtitles', {}) command.extend(codec_options.get( subtitle['codec'], [])) else: command.extend(['-sn']) container_options = FFmpegProcessor.__container_options.get( mime_type, []) container_config = self.config.get(str(mime_type), {}) if mime_type == 'video/quicktime': use_faststart = container_config.get('faststart', True) if use_faststart: container_options.extend(['-movflags', '+faststart']) command.extend(container_options) command.extend([ '-threads', str(self.__threads), '-f', encoder_name, '-y', ctx.output_path ]) try: subprocess.run(command, stderr=subprocess.PIPE, check=True) except subprocess.CalledProcessError as ffmpeg_error: error_message = ffmpeg_error.stderr.decode('utf-8') raise OperatorError( f'Could not convert asset: {error_message}') return self.read(result)
def rotate(self, asset: Asset, angle: float, expand: bool = False) -> Asset: """ Creates an asset whose essence is rotated by the specified angle in degrees. :param asset: Asset whose contents will be rotated :type asset: Asset :param angle: Angle in degrees, counter clockwise :type angle: float :param expand: If true, changes the dimensions of the new asset so it can hold the entire rotated essence, otherwise the dimensions of the original asset will be used. :type expand: bool :return: New asset with rotated essence :rtype: Asset """ mime_type = MimeType(asset.mime_type) encoder_name = self.__mime_type_to_encoder.get(mime_type) if not encoder_name or mime_type.type != 'video': raise UnsupportedFormatError( f'Unsupported source asset type: {mime_type}') if angle % 360.0 == 0.0: return asset angle_rad = radians(angle) width = asset.width height = asset.height if expand: if angle % 180 < 90: width_ = asset.width height_ = asset.height angle_rad_ = angle_rad % pi else: width_ = asset.height height_ = asset.width angle_rad_ = angle_rad % pi - pi / 2 cos_a = cos(angle_rad_) sin_a = sin(angle_rad_) width = ceil(round(width_ * cos_a + height_ * sin_a, 7)) height = ceil(round(width_ * sin_a + height_ * cos_a, 7)) result = io.BytesIO() with _FFmpegContext(asset.essence, result) as ctx: command = [ 'ffmpeg', '-v', 'error', '-i', ctx.input_path, '-codec', 'copy', '-f:v', f'rotate=a={angle_rad:f}:ow={width:d}:oh={height:d})', '-f', encoder_name, '-y', ctx.output_path ] try: subprocess.run(command, stderr=subprocess.PIPE, check=True) except subprocess.CalledProcessError as ffmpeg_error: error_message = ffmpeg_error.stderr.decode('utf-8') raise OperatorError(f'Could not rotate asset: {error_message}') metadata = _combine_metadata(asset, 'mime_type', 'duration', 'video', 'audio', 'subtitle', width=width, height=height) return Asset(essence=result, **metadata)
def crop(self, asset: Asset, x: int, y: int, width: int, height: int) -> Asset: """ Creates a cropped video asset whose essence is cropped to the specified rectangular area. :param asset: Video asset whose contents will be cropped :type asset: Asset :param x: Horizontal offset of the cropping area from left :type x: int :param y: Vertical offset of the cropping area from top :type y: int :param width: Width of the cropping area :type width: int :param height: Height of the cropping area :type height: int :return: New asset with cropped essence :rtype: Asset """ mime_type = MimeType(asset.mime_type) encoder_name = self.__mime_type_to_encoder.get(mime_type) if not encoder_name or mime_type.type != 'video': raise UnsupportedFormatError( f'Unsupported source asset type: {mime_type}') if x == 0 and y == 0 and width == asset.width and height == asset.height: return asset max_x = max(0, min(asset.width, width + x)) max_y = max(0, min(asset.height, height + y)) min_x = max(0, min(asset.width, x)) min_y = max(0, min(asset.height, y)) if min_x == asset.width or min_y == asset.height or max_x <= min_x or max_y <= min_y: raise OperatorError( f'Invalid cropping area: <x={x!r}, y={y!r}, width={width!r}, height={height!r}>' ) width = max_x - min_x height = max_y - min_y result = io.BytesIO() with _FFmpegContext(asset.essence, result) as ctx: command = [ 'ffmpeg', '-v', 'error', '-i', ctx.input_path, '-codec', 'copy', '-f:v', f'crop=w={width:d}:h={height:d}:x={x:d}:y={y:d}', '-f', encoder_name, '-y', ctx.output_path ] try: subprocess.run(command, stderr=subprocess.PIPE, check=True) except subprocess.CalledProcessError as ffmpeg_error: error_message = ffmpeg_error.stderr.decode('utf-8') raise OperatorError(f'Could not crop asset: {error_message}') metadata = _combine_metadata(asset, 'mime_type', 'duration', 'video', 'audio', 'subtitle', width=width, height=height) return Asset(essence=result, **metadata)