def generate_medium(self, size=None): if not size: size = (mgg.global_config['media:medium']['max_width'], mgg.global_config['media:medium']['max_height']) if self._skip_processing('medium', size=size): return # Note: pdftocairo adds '.png', so don't include an ext filename = os.path.join(self.workbench.dir, self.name_builder.fill('{basename}.medium')) executable = where('pdftocairo') args = [ executable, '-scale-to', str(min(size)), '-singlefile', '-png', self.pdf_filename, filename ] _log.debug('calling {0}'.format(repr(' '.join(args)))) Popen(executable=executable, args=args).wait() # since pdftocairo added '.png', we need to include it with the # filename store_public(self.entry, 'medium', filename + '.png', self.name_builder.fill('{basename}.medium.png')) self.entry.set_file_metadata('medium', size=size)
def generate_thumb(self, size=None): if not size: max_width = mgg.global_config['media:thumb']['max_width'] max_height = mgg.global_config['media:thumb']['max_height'] size = (max_width, max_height) if self._skip_processing('thumb', size=size): return thumb_tmp = os.path.join( self.workbench.dir, self.name_builder.fill('{basename}-thumbnail.jpg')) # We need the spectrogram to create a thumbnail spectrogram = self.entry.media_files.get('spectrogram') if not spectrogram: _log.info('No spectrogram found, we will create one.') self.create_spectrogram() spectrogram = self.entry.media_files['spectrogram'] spectrogram_filepath = mgg.public_store.get_local_path(spectrogram) self.thumbnailer.thumbnail_spectrogram(spectrogram_filepath, thumb_tmp, tuple(size)) store_public(self.entry, 'thumb', thumb_tmp, self.name_builder.fill('{basename}.thumbnail.jpg')) self.entry.set_file_metadata('thumb', **{'size': size})
def generate_thumb(self, thumb_size=None): # Temporary file for the video thumbnail (cleaned up with workbench) tmp_thumb = os.path.join(self.workbench.dir, self.name_builder.fill( '{basename}.thumbnail.jpg')) if not thumb_size: thumb_size = (mgg.global_config['media:thumb']['max_width'],) if self._skip_processing('thumb', thumb_size=thumb_size): return # We will only use the width so that the correct scale is kept transcoders.VideoThumbnailerMarkII( self.process_filename, tmp_thumb, thumb_size[0]) # Checking if the thumbnail was correctly created. If it was not, # then just give up. if not os.path.exists (tmp_thumb): return # Push the thumbnail to public storage _log.debug('Saving thumbnail...') store_public(self.entry, 'thumb', tmp_thumb, self.name_builder.fill('{basename}.thumbnail.jpg')) self.entry.set_file_metadata('thumb', thumb_size=thumb_size)
def resize_image(entry, resized, keyname, target_name, new_size, exif_tags, workdir, quality, filter): """ Store a resized version of an image and return its pathname. Arguments: proc_state -- the processing state for the image to resize resized -- an image from Image.open() of the original image being resized keyname -- Under what key to save in the db. target_name -- public file path for the new resized image exif_tags -- EXIF data for the original image workdir -- directory path for storing converted image files new_size -- 2-tuple size for the resized image quality -- level of compression used when resizing images filter -- One of BICUBIC, BILINEAR, NEAREST, ANTIALIAS """ resized = exif_fix_image_orientation(resized, exif_tags) # Fix orientation try: resize_filter = PIL_FILTERS[filter.upper()] except KeyError: raise Exception('Filter "{0}" not found, choose one of {1}'.format( unicode(filter), u', '.join(PIL_FILTERS.keys()))) resized.thumbnail(new_size, resize_filter) # Copy the new file to the conversion subdir, then remotely. tmp_resized_filename = os.path.join(workdir, target_name) with file(tmp_resized_filename, 'w') as resized_file: resized.save(resized_file, quality=quality) store_public(entry, keyname, tmp_resized_filename, target_name)
def generate_medium(self, size=None): if not size: size = (mgg.global_config['media:medium']['max_width'], mgg.global_config['media:medium']['max_height']) if self._skip_processing('medium', size=size): return # Note: pdftocairo adds '.png', so don't include an ext filename = os.path.join(self.workbench.dir, self.name_builder.fill('{basename}.medium')) executable = where('pdftocairo') args = [executable, '-scale-to', str(min(size)), '-singlefile', '-png', self.pdf_filename, filename] _log.debug('calling {0}'.format(repr(' '.join(args)))) Popen(executable=executable, args=args).wait() # since pdftocairo added '.png', we need to include it with the # filename store_public(self.entry, 'medium', filename + '.png', self.name_builder.fill('{basename}.medium.png')) self.entry.set_file_metadata('medium', size=size)
def generate_thumb(self, size=None): if not size: max_width = mgg.global_config['media:thumb']['max_width'] max_height = mgg.global_config['media:thumb']['max_height'] size = (max_width, max_height) if self._skip_processing('thumb', size=size): return thumb_tmp = os.path.join(self.workbench.dir, self.name_builder.fill( '{basename}-thumbnail.jpg')) # We need the spectrogram to create a thumbnail spectrogram = self.entry.media_files.get('spectrogram') if not spectrogram: _log.info('No spectrogram found, we will create one.') self.create_spectrogram() spectrogram = self.entry.media_files['spectrogram'] spectrogram_filepath = mgg.public_store.get_local_path(spectrogram) self.thumbnailer.thumbnail_spectrogram( spectrogram_filepath, thumb_tmp, tuple(size)) store_public(self.entry, 'thumb', thumb_tmp, self.name_builder.fill('{basename}.thumbnail.jpg')) self.entry.set_file_metadata('thumb', **{'size': size})
def generate_thumb(self, size=None): if not size: max_width = mgg.global_config["media:thumb"]["max_width"] max_height = mgg.global_config["media:thumb"]["max_height"] size = (max_width, max_height) if self._skip_processing("thumb", size=size): return thumb_tmp = os.path.join(self.workbench.dir, self.name_builder.fill("{basename}-thumbnail.jpg")) # We need the spectrogram to create a thumbnail spectrogram = self.entry.media_files.get("spectrogram") if not spectrogram: _log.info("No spectrogram found, we will create one.") self.create_spectrogram() spectrogram = self.entry.media_files["spectrogram"] spectrogram_filepath = mgg.public_store.get_local_path(spectrogram) self.thumbnailer.thumbnail_spectrogram(spectrogram_filepath, thumb_tmp, tuple(size)) store_public(self.entry, "thumb", thumb_tmp, self.name_builder.fill("{basename}.thumbnail.jpg")) self.entry.set_file_metadata("thumb", **{"size": size})
def generate_thumb(self, font=None, thumb_size=None): with file(self.process_filename, 'rb') as orig_file: # If no font kwarg, check config if not font: font = self.ascii_config.get('thumbnail_font', None) if not thumb_size: thumb_size = (mgg.global_config['media:thumb']['max_width'], mgg.global_config['media:thumb']['max_height']) tmp_thumb = os.path.join( self.conversions_subdir, self.name_builder.fill('{basename}.thumbnail.png')) ascii_converter_args = {} # If there is a font from either the config or kwarg, update # ascii_converter_args if font: ascii_converter_args.update( {'font': self.ascii_config['thumbnail_font']}) converter = asciitoimage.AsciiToImage(**ascii_converter_args) thumb = converter._create_image(orig_file.read()) with file(tmp_thumb, 'w') as thumb_file: thumb.thumbnail(thumb_size, Image.ANTIALIAS) thumb.save(thumb_file) _log.debug('Copying local file to public storage') store_public(self.entry, 'thumb', tmp_thumb, self.name_builder.fill('{basename}.thumbnail.jpg'))
def transcode(self, quality=None): if not quality: quality = self.audio_config['quality'] if self._skip_processing('webm_audio', quality=quality): return progress_callback = ProgressCallback(self.entry) webm_audio_tmp = os.path.join(self.workbench.dir, self.name_builder.fill( '{basename}{ext}')) self.transcoder.transcode( self.process_filename, webm_audio_tmp, quality=quality, progress_callback=progress_callback) self._keep_best() _log.debug('Saving medium...') store_public(self.entry, 'webm_audio', webm_audio_tmp, self.name_builder.fill('{basename}.medium.webm')) self.entry.set_file_metadata('webm_audio', **{'quality': quality})
def transcode(self, quality=None): if not quality: quality = self.audio_config['quality'] if self._skip_processing('webm_audio', quality=quality): return progress_callback = ProgressCallback(self.entry) webm_audio_tmp = os.path.join( self.workbench.dir, self.name_builder.fill('{basename}{ext}')) self.transcoder.transcode(self.process_filename, webm_audio_tmp, quality=quality, progress_callback=progress_callback) self.transcoder.discover(webm_audio_tmp) self._keep_best() _log.debug('Saving medium...') store_public(self.entry, 'webm_audio', webm_audio_tmp, self.name_builder.fill('{basename}.medium.webm')) self.entry.set_file_metadata('webm_audio', **{'quality': quality})
def generate_thumb(self, thumb_size=None): # Temporary file for the video thumbnail (cleaned up with workbench) tmp_thumb = os.path.join(self.workbench.dir, self.name_builder.fill( '{basename}.thumbnail.jpg')) if not thumb_size: thumb_size = (mgg.global_config['media:thumb']['max_width'],) if self._skip_processing('thumb', thumb_size=thumb_size): return # We will only use the width so that the correct scale is kept transcoders.capture_thumb( self.process_filename, tmp_thumb, thumb_size[0]) # Checking if the thumbnail was correctly created. If it was not, # then just give up. if not os.path.exists (tmp_thumb): return # Push the thumbnail to public storage _log.debug('Saving thumbnail...') store_public(self.entry, 'thumb', tmp_thumb, self.name_builder.fill('{basename}.thumbnail.jpg')) self.entry.set_file_metadata('thumb', thumb_size=thumb_size)
def create_spectrogram(self, max_width=None, fft_size=None): if not max_width: max_width = mgg.global_config['media:medium']['max_width'] if not fft_size: fft_size = self.audio_config['spectrogram_fft_size'] if self._skip_processing('spectrogram', max_width=max_width, fft_size=fft_size): return wav_tmp = os.path.join(self.workbench.dir, self.name_builder.fill( '{basename}.ogg')) _log.info('Creating OGG source for spectrogram') self.transcoder.transcode(self.process_filename, wav_tmp, mux_name='oggmux') spectrogram_tmp = os.path.join(self.workbench.dir, self.name_builder.fill( '{basename}-spectrogram.jpg')) self.thumbnailer.spectrogram( wav_tmp, spectrogram_tmp, width=max_width, fft_size=fft_size) _log.debug('Saving spectrogram...') store_public(self.entry, 'spectrogram', spectrogram_tmp, self.name_builder.fill('{basename}.spectrogram.jpg')) file_metadata = {'max_width': max_width, 'fft_size': fft_size} self.entry.set_file_metadata('spectrogram', **file_metadata)
def create_spectrogram(self, max_width=None, fft_size=None): if not max_width: max_width = mgg.global_config['media:medium']['max_width'] if not fft_size: fft_size = self.audio_config['spectrogram_fft_size'] wav_tmp = os.path.join(self.workbench.dir, self.name_builder.fill( '{basename}.ogg')) _log.info('Creating OGG source for spectrogram') self.transcoder.transcode( self.process_filename, wav_tmp, mux_string='vorbisenc quality={0} ! oggmux'.format( self.audio_config['quality'])) spectrogram_tmp = os.path.join(self.workbench.dir, self.name_builder.fill( '{basename}-spectrogram.jpg')) self.thumbnailer.spectrogram( wav_tmp, spectrogram_tmp, width=max_width, fft_size=fft_size) _log.debug('Saving spectrogram...') store_public(self.entry, 'spectrogram', spectrogram_tmp, self.name_builder.fill('{basename}.spectrogram.jpg'))
def transcode(self, medium_size=None, vp8_quality=None, vp8_threads=None, vorbis_quality=None): progress_callback = ProgressCallback(self.entry) tmp_dst = os.path.join(self.workbench.dir, self.name_builder.fill('{basename}.medium.webm')) if not medium_size: medium_size = ( mgg.global_config['media:medium']['max_width'], mgg.global_config['media:medium']['max_height']) if not vp8_quality: vp8_quality = self.video_config['vp8_quality'] if not vp8_threads: vp8_threads = self.video_config['vp8_threads'] if not vorbis_quality: vorbis_quality = self.video_config['vorbis_quality'] # Extract metadata and keep a record of it metadata = self.transcoder.discover(self.process_filename) store_metadata(self.entry, metadata) # Figure out whether or not we need to transcode this video or # if we can skip it if skip_transcode(metadata, medium_size): _log.debug('Skipping transcoding') dst_dimensions = metadata['videowidth'], metadata['videoheight'] # If there is an original and transcoded, delete the transcoded # since it must be of lower quality then the original if self.entry.media_files.get('original') and \ self.entry.media_files.get('webm_video'): self.entry.media_files['webm_video'].delete() else: self.transcoder.transcode(self.process_filename, tmp_dst, vp8_quality=vp8_quality, vp8_threads=vp8_threads, vorbis_quality=vorbis_quality, progress_callback=progress_callback, dimensions=tuple(medium_size)) dst_dimensions = self.transcoder.dst_data.videowidth,\ self.transcoder.dst_data.videoheight self._keep_best() # Push transcoded video to public storage _log.debug('Saving medium...') store_public(self.entry, 'webm_video', tmp_dst, self.name_builder.fill('{basename}.medium.webm')) _log.debug('Saved medium') self.did_transcode = True # Save the width and height of the transcoded video self.entry.media_data_init( width=dst_dimensions[0], height=dst_dimensions[1])
def _generate_pdf(self): """ Store the pdf. If the file is not a pdf, make it a pdf """ tmp_pdf = self.process_filename unoconv = where("unoconv") Popen(executable=unoconv, args=[unoconv, "-v", "-f", "pdf", self.process_filename]).wait() if not os.path.exists(tmp_pdf): _log.debug("unoconv failed to convert file to pdf") raise BadMediaFail() store_public(self.entry, "pdf", tmp_pdf, self.name_builder.fill("{basename}.pdf")) return self.workbench.localized_file(mgg.public_store, self.entry.media_files["pdf"])
def generate_thumb(self, thumb_size=None): # Temporary file for the video thumbnail (cleaned up with workbench) tmp_thumb = os.path.join( self.workbench.dir, self.name_builder.fill('{basename}.thumbnail.jpg')) if not thumb_size: thumb_size = (mgg.global_config['media:thumb']['max_width'], ) # We will only use the width so that the correct scale is kept transcoders.VideoThumbnailerMarkII(self.process_filename, tmp_thumb, thumb_size[0]) # Push the thumbnail to public storage _log.debug('Saving thumbnail...') store_public(self.entry, 'thumb', tmp_thumb, self.name_builder.fill('{basename}.thumbnail.jpg'))
def generate_thumb(self, size=None): if not size: size = (mgg.global_config['media:thumb']['max_width'], mgg.global_config['media:thumb']['max_height']) if self.svg_config['svg_thumbnails']: # delete existing thumbnail, if it doesn't match the original if self.entry.media_files.has_key('thumb') and \ self.entry.media_files['thumb'] != self.entry.media_files['original']: mgg.public_store.delete_file(self.entry.media_files['thumb']) self.entry.media_files['thumb'] = self.entry.media_files.get('original') else: thumb_filename = os.path.join(self.workbench.dir, self.name_builder.fill('{basename}.thumbnail.png')) render_preview(self.process_filename, thumb_filename, size) store_public(self.entry, 'thumb', thumb_filename)
def generate_thumb(self, font=None, thumb_size=None): with open(self.process_filename, 'rb') as orig_file: # If no font kwarg, check config if not font: font = self.ascii_config.get('thumbnail_font', None) if not thumb_size: thumb_size = (mgg.global_config['media:thumb']['max_width'], mgg.global_config['media:thumb']['max_height']) if self._skip_resizing(font, thumb_size): return tmp_thumb = os.path.join( self.conversions_subdir, self.name_builder.fill('{basename}.thumbnail.png')) ascii_converter_args = {} # If there is a font from either the config or kwarg, update # ascii_converter_args if font: ascii_converter_args.update( {'font': self.ascii_config['thumbnail_font']}) converter = asciitoimage.AsciiToImage( **ascii_converter_args) thumb = converter._create_image( orig_file.read()) with open(tmp_thumb, 'w') as thumb_file: thumb.thumbnail( thumb_size, Image.ANTIALIAS) thumb.save(thumb_file) thumb_info = {'font': font, 'width': thumb_size[0], 'height': thumb_size[1]} _log.debug('Copying local file to public storage') store_public(self.entry, 'thumb', tmp_thumb, self.name_builder.fill('{basename}.thumbnail.jpg')) self.entry.set_file_metadata('thumb', **thumb_info)
def create_spectrogram(self, max_width=None, fft_size=None): if not max_width: max_width = mgg.global_config["media:medium"]["max_width"] if not fft_size: fft_size = self.audio_config["spectrogram_fft_size"] if self._skip_processing("spectrogram", max_width=max_width, fft_size=fft_size): return wav_tmp = os.path.join(self.workbench.dir, self.name_builder.fill("{basename}.ogg")) _log.info("Creating OGG source for spectrogram") self.transcoder.transcode(self.process_filename, wav_tmp, mux_name="oggmux") spectrogram_tmp = os.path.join(self.workbench.dir, self.name_builder.fill("{basename}-spectrogram.jpg")) self.thumbnailer.spectrogram(wav_tmp, spectrogram_tmp, width=max_width, fft_size=fft_size) _log.debug("Saving spectrogram...") store_public(self.entry, "spectrogram", spectrogram_tmp, self.name_builder.fill("{basename}.spectrogram.jpg")) file_metadata = {"max_width": max_width, "fft_size": fft_size} self.entry.set_file_metadata("spectrogram", **file_metadata)
def generate_thumb(self, thumb_size=None): # Temporary file for the video thumbnail (cleaned up with workbench) tmp_thumb = os.path.join(self.workbench.dir, self.name_builder.fill( '{basename}.thumbnail.jpg')) if not thumb_size: thumb_size = (mgg.global_config['media:thumb']['max_width'],) # We will only use the width so that the correct scale is kept transcoders.VideoThumbnailerMarkII( self.process_filename, tmp_thumb, thumb_size[0]) # Push the thumbnail to public storage _log.debug('Saving thumbnail...') store_public(self.entry, 'thumb', tmp_thumb, self.name_builder.fill('{basename}.thumbnail.jpg'))
def _generate_pdf(self): """ Store the pdf. If the file is not a pdf, make it a pdf """ tmp_pdf = self.process_filename unoconv = where('unoconv') Popen(executable=unoconv, args=[unoconv, '-v', '-f', 'pdf', self.process_filename]).wait() if not os.path.exists(tmp_pdf): _log.debug('unoconv failed to convert file to pdf') raise BadMediaFail() store_public(self.entry, 'pdf', tmp_pdf, self.name_builder.fill('{basename}.pdf')) return self.workbench.localized_file( mgg.public_store, self.entry.media_files['pdf'])
def generate_medium(self, size=None): if not size: size = (mgg.global_config["media:medium"]["max_width"], mgg.global_config["media:medium"]["max_height"]) if self._skip_processing("medium", size=size): return # Note: pdftocairo adds '.png', so don't include an ext filename = os.path.join(self.workbench.dir, self.name_builder.fill("{basename}.medium")) executable = where("pdftocairo") args = [executable, "-scale-to", str(min(size)), "-singlefile", "-png", self.pdf_filename, filename] _log.debug("calling {0}".format(repr(" ".join(args)))) Popen(executable=executable, args=args).wait() # since pdftocairo added '.png', we need to include it with the # filename store_public(self.entry, "medium", filename + ".png", self.name_builder.fill("{basename}.medium.png")) self.entry.set_file_metadata("medium", size=size)
def _generate_pdf(self): """ Store the pdf. If the file is not a pdf, make it a pdf """ tmp_pdf = os.path.splitext(self.process_filename)[0] + '.pdf' unoconv = where('unoconv') args = [unoconv, '-v', '-f', 'pdf', self.process_filename] _log.debug('calling %s' % repr(args)) Popen(executable=unoconv, args=args).wait() if not os.path.exists(tmp_pdf): _log.debug('unoconv failed to convert file to pdf') raise BadMediaFail() store_public(self.entry, 'pdf', tmp_pdf, self.name_builder.fill('{basename}.pdf')) return self.workbench.localized_file( mgg.public_store, self.entry.media_files['pdf'])
def generate_thumb(self, thumb_size=None): if not thumb_size: thumb_size = (mgg.global_config['media:thumb']['max_width'], mgg.global_config['media:thumb']['max_height']) # Note: pdftocairo adds '.png', so don't include an ext thumb_filename = os.path.join(self.workbench.dir, self.name_builder.fill( '{basename}.thumbnail')) executable = where('pdftocairo') args = [executable, '-scale-to', str(min(thumb_size)), '-singlefile', '-png', self.pdf_filename, thumb_filename] _log.debug('calling {0}'.format(repr(' '.join(args)))) Popen(executable=executable, args=args).wait() # since pdftocairo added '.png', we need to include it with the # filename store_public(self.entry, 'thumb', thumb_filename + '.png', self.name_builder.fill('{basename}.thumbnail.png'))
def _snap(self, keyname, name, camera, size, project="ORTHO"): filename = self.name_builder.fill(name) workbench_path = self.workbench.joinpath(filename) shot = { "model_path": self.process_filename, "model_ext": self.ext, "camera_coord": camera, "camera_focus": self.model.average, "camera_clip": self.greatest * 10, "greatest": self.greatest, "projection": project, "width": size[0], "height": size[1], "out_file": workbench_path, } blender_render(shot) # make sure the image rendered to the workbench path assert os.path.exists(workbench_path) # copy it up! store_public(self.entry, keyname, workbench_path, filename)
def _snap(self, keyname, name, camera, size, project="ORTHO"): filename = self.name_builder.fill(name) workbench_path = self.workbench.joinpath(filename) shot = { "model_path": self.process_filename, "model_ext": self.ext, "camera_coord": camera, "camera_focus": self.model.average, "camera_clip": self.greatest*10, "greatest": self.greatest, "projection": project, "width": size[0], "height": size[1], "out_file": workbench_path, } blender_render(shot) # make sure the image rendered to the workbench path assert os.path.exists(workbench_path) # copy it up! store_public(self.entry, keyname, workbench_path, filename)
def transcode(self, medium_size=None, vp8_quality=None, vp8_threads=None, vorbis_quality=None): progress_callback = ProgressCallback(self.entry) tmp_dst = os.path.join(self.workbench.dir, self.name_builder.fill('{basename}.medium.webm')) if not medium_size: medium_size = ( mgg.global_config['media:medium']['max_width'], mgg.global_config['media:medium']['max_height']) if not vp8_quality: vp8_quality = self.video_config['vp8_quality'] if not vp8_threads: vp8_threads = self.video_config['vp8_threads'] if not vorbis_quality: vorbis_quality = self.video_config['vorbis_quality'] file_metadata = {'medium_size': medium_size, 'vp8_threads': vp8_threads, 'vp8_quality': vp8_quality, 'vorbis_quality': vorbis_quality} if self._skip_processing('webm_video', **file_metadata): return # Extract metadata and keep a record of it metadata = self.transcoder.discover(self.process_filename) store_metadata(self.entry, metadata) # Figure out whether or not we need to transcode this video or # if we can skip it if skip_transcode(metadata, medium_size): _log.debug('Skipping transcoding') dst_dimensions = metadata['videowidth'], metadata['videoheight'] # If there is an original and transcoded, delete the transcoded # since it must be of lower quality then the original if self.entry.media_files.get('original') and \ self.entry.media_files.get('webm_video'): self.entry.media_files['webm_video'].delete() else: self.transcoder.transcode(self.process_filename, tmp_dst, vp8_quality=vp8_quality, vp8_threads=vp8_threads, vorbis_quality=vorbis_quality, progress_callback=progress_callback, dimensions=tuple(medium_size)) dst_dimensions = self.transcoder.dst_data.videowidth,\ self.transcoder.dst_data.videoheight self._keep_best() # Push transcoded video to public storage _log.debug('Saving medium...') store_public(self.entry, 'webm_video', tmp_dst, self.name_builder.fill('{basename}.medium.webm')) _log.debug('Saved medium') self.entry.set_file_metadata('webm_video', **file_metadata) self.did_transcode = True # Save the width and height of the transcoded video self.entry.media_data_init( width=dst_dimensions[0], height=dst_dimensions[1])
def transcode(self, medium_size=None, vp8_quality=None, vp8_threads=None, vorbis_quality=None): progress_callback = ProgressCallback(self.entry) tmp_dst = os.path.join(self.workbench.dir, self.part_filename) if not medium_size: medium_size = ( mgg.global_config['media:medium']['max_width'], mgg.global_config['media:medium']['max_height']) if not vp8_quality: vp8_quality = self.video_config['vp8_quality'] if not vp8_threads: vp8_threads = self.video_config['vp8_threads'] if not vorbis_quality: vorbis_quality = self.video_config['vorbis_quality'] file_metadata = {'medium_size': medium_size, 'vp8_threads': vp8_threads, 'vp8_quality': vp8_quality, 'vorbis_quality': vorbis_quality} if self._skip_processing(self.curr_file, **file_metadata): return metadata = transcoders.discover(self.process_filename) orig_dst_dimensions = (metadata.get_video_streams()[0].get_width(), metadata.get_video_streams()[0].get_height()) # Figure out whether or not we need to transcode this video or # if we can skip it if skip_transcode(metadata, medium_size): _log.debug('Skipping transcoding') # If there is an original and transcoded, delete the transcoded # since it must be of lower quality then the original if self.entry.media_files.get('original') and \ self.entry.media_files.get(self.curr_file): self.entry.media_files[self.curr_file].delete() else: _log.debug('Entered transcoder') video_config = (mgg.global_config['plugins'] ['mediagoblin.media_types.video']) num_res = len(video_config['available_resolutions']) default_res = video_config['default_resolution'] self.transcoder.transcode(self.process_filename, tmp_dst, default_res, num_res, vp8_quality=vp8_quality, vp8_threads=vp8_threads, vorbis_quality=vorbis_quality, progress_callback=progress_callback, dimensions=tuple(medium_size)) if self.transcoder.dst_data: # Push transcoded video to public storage _log.debug('Saving medium...') store_public(self.entry, self.curr_file, tmp_dst, self.part_filename) _log.debug('Saved medium') self.entry.set_file_metadata(self.curr_file, **file_metadata) self.did_transcode = True
def transcode(self, medium_size=None, vp8_quality=None, vp8_threads=None, vorbis_quality=None): progress_callback = ProgressCallback(self.entry) tmp_dst = os.path.join(self.workbench.dir, self.name_builder.fill('{basename}.medium.webm')) if not medium_size: medium_size = ( mgg.global_config['media:medium']['max_width'], mgg.global_config['media:medium']['max_height']) if not vp8_quality: vp8_quality = self.video_config['vp8_quality'] if not vp8_threads: vp8_threads = self.video_config['vp8_threads'] if not vorbis_quality: vorbis_quality = self.video_config['vorbis_quality'] file_metadata = {'medium_size': medium_size, 'vp8_threads': vp8_threads, 'vp8_quality': vp8_quality, 'vorbis_quality': vorbis_quality} if self._skip_processing('webm_video', **file_metadata): return # Extract metadata and keep a record of it metadata = transcoders.discover(self.process_filename) # metadata's stream info here is a DiscovererContainerInfo instance, # it gets split into DiscovererAudioInfo and DiscovererVideoInfo; # metadata itself has container-related data in tags, like video-codec store_metadata(self.entry, metadata) orig_dst_dimensions = (metadata.get_video_streams()[0].get_width(), metadata.get_video_streams()[0].get_height()) # Figure out whether or not we need to transcode this video or # if we can skip it if skip_transcode(metadata, medium_size): _log.debug('Skipping transcoding') dst_dimensions = orig_dst_dimensions # If there is an original and transcoded, delete the transcoded # since it must be of lower quality then the original if self.entry.media_files.get('original') and \ self.entry.media_files.get('webm_video'): self.entry.media_files['webm_video'].delete() else: self.transcoder.transcode(self.process_filename, tmp_dst, vp8_quality=vp8_quality, vp8_threads=vp8_threads, vorbis_quality=vorbis_quality, progress_callback=progress_callback, dimensions=tuple(medium_size)) if self.transcoder.dst_data: video_info = self.transcoder.dst_data.get_video_streams()[0] dst_dimensions = (video_info.get_width(), video_info.get_height()) self._keep_best() # Push transcoded video to public storage _log.debug('Saving medium...') store_public(self.entry, 'webm_video', tmp_dst, self.name_builder.fill('{basename}.medium.webm')) _log.debug('Saved medium') self.entry.set_file_metadata('webm_video', **file_metadata) self.did_transcode = True else: dst_dimensions = orig_dst_dimensions # Save the width and height of the transcoded video self.entry.media_data_init( width=dst_dimensions[0], height=dst_dimensions[1])