def snap(name, camera, width=640, height=640, project="ORTHO"): filename = name_builder.fill(name) workbench_path = workbench.joinpath(filename) shot = { "model_path": queued_filename, "model_ext": ext, "camera_coord": camera, "camera_focus": model.average, "camera_clip": greatest*10, "greatest": greatest, "projection": project, "width": width, "height": height, "out_file": workbench_path, } blender_render(shot) # make sure the image rendered to the workbench path assert os.path.exists(workbench_path) # copy it up! with open(workbench_path, 'rb') as rendered_file: public_path = create_pub_filepath(entry, filename) with mgg.public_store.get_file(public_path, "wb") as public_file: public_file.write(rendered_file.read()) return public_path
def store_unicode_file(self): with file(self.process_filename, 'rb') as orig_file: self._detect_charset(orig_file) unicode_filepath = create_pub_filepath(self.entry, 'ascii-portable.txt') with mgg.public_store.get_file(unicode_filepath, 'wb') \ as unicode_file: # Decode the original file from its detected charset (or UTF8) # Encode the unicode instance to ASCII and replace any # non-ASCII with an HTML entity (&# unicode_file.write( unicode(orig_file.read().decode(self.charset)).encode( 'ascii', 'xmlcharrefreplace')) self.entry.media_files['unicode'] = unicode_filepath
def store_unicode_file(self): with open(self.process_filename, 'rb') as orig_file: self._detect_charset(orig_file) unicode_filepath = create_pub_filepath(self.entry, 'ascii-portable.txt') with mgg.public_store.get_file(unicode_filepath, 'wb') \ as unicode_file: # Decode the original file from its detected charset (or UTF8) # Encode the unicode instance to ASCII and replace any # non-ASCII with an HTML entity (&# unicode_file.write( six.text_type(orig_file.read().decode( self.charset)).encode( 'ascii', 'xmlcharrefreplace')) self.entry.media_files['unicode'] = unicode_filepath
def process_stl(proc_state): """Code to process an stl or obj model. Will be run by celery. A Workbench() represents a local tempory dir. It is automatically cleaned up when this function exits. """ entry = proc_state.entry workbench = proc_state.workbench queued_filepath = entry.queued_media_file queued_filename = workbench.localized_file( mgg.queue_store, queued_filepath, 'source') name_builder = FilenameBuilder(queued_filename) ext = queued_filename.lower().strip()[-4:] if ext.startswith("."): ext = ext[1:] else: ext = None # Attempt to parse the model file and divine some useful # information about it. with open(queued_filename, 'rb') as model_file: model = model_loader.auto_detect(model_file, ext) # generate preview images greatest = [model.width, model.height, model.depth] greatest.sort() greatest = greatest[-1] def snap(name, camera, width=640, height=640, project="ORTHO"): filename = name_builder.fill(name) workbench_path = workbench.joinpath(filename) shot = { "model_path": queued_filename, "model_ext": ext, "camera_coord": camera, "camera_focus": model.average, "camera_clip": greatest*10, "greatest": greatest, "projection": project, "width": width, "height": height, "out_file": workbench_path, } blender_render(shot) # make sure the image rendered to the workbench path assert os.path.exists(workbench_path) # copy it up! with open(workbench_path, 'rb') as rendered_file: public_path = create_pub_filepath(entry, filename) with mgg.public_store.get_file(public_path, "wb") as public_file: public_file.write(rendered_file.read()) return public_path thumb_path = snap( "{basename}.thumb.jpg", [0, greatest*-1.5, greatest], mgg.global_config['media:thumb']['max_width'], mgg.global_config['media:thumb']['max_height'], project="PERSP") perspective_path = snap( "{basename}.perspective.jpg", [0, greatest*-1.5, greatest], project="PERSP") topview_path = snap( "{basename}.top.jpg", [model.average[0], model.average[1], greatest*2]) frontview_path = snap( "{basename}.front.jpg", [model.average[0], greatest*-2, model.average[2]]) sideview_path = snap( "{basename}.side.jpg", [greatest*-2, model.average[1], model.average[2]]) ## Save the public file stuffs model_filepath = create_pub_filepath( entry, name_builder.fill('{basename}{ext}')) with mgg.public_store.get_file(model_filepath, 'wb') as model_file: with open(queued_filename, 'rb') as queued_file: model_file.write(queued_file.read()) # Remove queued media file from storage and database. # queued_filepath is in the task_id directory which should # be removed too, but fail if the directory is not empty to be on # the super-safe side. mgg.queue_store.delete_file(queued_filepath) # rm file mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir entry.queued_media_file = [] # Insert media file information into database media_files_dict = entry.setdefault('media_files', {}) media_files_dict[u'original'] = model_filepath media_files_dict[u'thumb'] = thumb_path media_files_dict[u'perspective'] = perspective_path media_files_dict[u'top'] = topview_path media_files_dict[u'side'] = sideview_path media_files_dict[u'front'] = frontview_path # Put model dimensions into the database dimensions = { "center_x" : model.average[0], "center_y" : model.average[1], "center_z" : model.average[2], "width" : model.width, "height" : model.height, "depth" : model.depth, "file_type" : ext, } entry.media_data_init(**dimensions)
def process_ascii(proc_state): """Code to process a txt file. Will be run by celery. A Workbench() represents a local tempory dir. It is automatically cleaned up when this function exits. """ entry = proc_state.entry workbench = proc_state.workbench ascii_config = mgg.global_config['media_type:mediagoblin.media_types.ascii'] # Conversions subdirectory to avoid collisions conversions_subdir = os.path.join( workbench.dir, 'conversions') os.mkdir(conversions_subdir) queued_filepath = entry.queued_media_file queued_filename = workbench.localized_file( mgg.queue_store, queued_filepath, 'source') queued_file = file(queued_filename, 'rb') with queued_file: queued_file_charset = chardet.detect(queued_file.read()) # Only select a non-utf-8 charset if chardet is *really* sure # Tested with "Feli\x0109an superjaron", which was detecte if queued_file_charset['confidence'] < 0.9: interpreted_charset = 'utf-8' else: interpreted_charset = queued_file_charset['encoding'] _log.info('Charset detected: {0}\nWill interpret as: {1}'.format( queued_file_charset, interpreted_charset)) queued_file.seek(0) # Rewind the queued file thumb_filepath = create_pub_filepath( entry, 'thumbnail.png') tmp_thumb_filename = os.path.join( conversions_subdir, thumb_filepath[-1]) ascii_converter_args = {} if ascii_config['thumbnail_font']: ascii_converter_args.update( {'font': ascii_config['thumbnail_font']}) converter = asciitoimage.AsciiToImage( **ascii_converter_args) thumb = converter._create_image( queued_file.read()) with file(tmp_thumb_filename, 'w') as thumb_file: thumb.thumbnail( (mgg.global_config['media:thumb']['max_width'], mgg.global_config['media:thumb']['max_height']), Image.ANTIALIAS) thumb.save(thumb_file) _log.debug('Copying local file to public storage') mgg.public_store.copy_local_to_storage( tmp_thumb_filename, thumb_filepath) queued_file.seek(0) original_filepath = create_pub_filepath(entry, queued_filepath[-1]) with mgg.public_store.get_file(original_filepath, 'wb') \ as original_file: original_file.write(queued_file.read()) queued_file.seek(0) # Rewind *again* unicode_filepath = create_pub_filepath(entry, 'ascii-portable.txt') with mgg.public_store.get_file(unicode_filepath, 'wb') \ as unicode_file: # Decode the original file from its detected charset (or UTF8) # Encode the unicode instance to ASCII and replace any non-ASCII # with an HTML entity (&# unicode_file.write( unicode(queued_file.read().decode( interpreted_charset)).encode( 'ascii', 'xmlcharrefreplace')) # Remove queued media file from storage and database. # queued_filepath is in the task_id directory which should # be removed too, but fail if the directory is not empty to be on # the super-safe side. mgg.queue_store.delete_file(queued_filepath) # rm file mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir entry.queued_media_file = [] media_files_dict = entry.setdefault('media_files', {}) media_files_dict['thumb'] = thumb_filepath media_files_dict['unicode'] = unicode_filepath media_files_dict['original'] = original_filepath entry.save()
def process_audio(entry): audio_config = mgg.global_config['media_type:mediagoblin.media_types.audio'] workbench = mgg.workbench_manager.create_workbench() queued_filepath = entry.queued_media_file queued_filename = workbench.localized_file( mgg.queue_store, queued_filepath, 'source') name_builder = FilenameBuilder(queued_filename) webm_audio_filepath = create_pub_filepath( entry, '{original}.webm'.format( original=os.path.splitext( queued_filepath[-1])[0])) if audio_config['keep_original']: with open(queued_filename, 'rb') as queued_file: original_filepath = create_pub_filepath( entry, name_builder.fill('{basename}{ext}')) with mgg.public_store.get_file(original_filepath, 'wb') as \ original_file: _log.debug('Saving original...') original_file.write(queued_file.read()) entry.media_files['original'] = original_filepath transcoder = AudioTranscoder() with tempfile.NamedTemporaryFile() as webm_audio_tmp: progress_callback = ProgressCallback(entry) transcoder.transcode( queued_filename, webm_audio_tmp.name, quality=audio_config['quality'], progress_callback=progress_callback) transcoder.discover(webm_audio_tmp.name) _log.debug('Saving medium...') mgg.public_store.get_file(webm_audio_filepath, 'wb').write( webm_audio_tmp.read()) entry.media_files['webm_audio'] = webm_audio_filepath # entry.media_data_init(length=int(data.audiolength)) if audio_config['create_spectrogram']: spectrogram_filepath = create_pub_filepath( entry, '{original}-spectrogram.jpg'.format( original=os.path.splitext( queued_filepath[-1])[0])) with tempfile.NamedTemporaryFile(suffix='.ogg') as wav_tmp: _log.info('Creating OGG source for spectrogram') transcoder.transcode( queued_filename, wav_tmp.name, mux_string='vorbisenc quality={0} ! oggmux'.format( audio_config['quality'])) thumbnailer = AudioThumbnailer() with tempfile.NamedTemporaryFile(suffix='.jpg') as spectrogram_tmp: thumbnailer.spectrogram( wav_tmp.name, spectrogram_tmp.name, width=mgg.global_config['media:medium']['max_width'], fft_size=audio_config['spectrogram_fft_size']) _log.debug('Saving spectrogram...') mgg.public_store.get_file(spectrogram_filepath, 'wb').write( spectrogram_tmp.read()) entry.media_files['spectrogram'] = spectrogram_filepath with tempfile.NamedTemporaryFile(suffix='.jpg') as thumb_tmp: thumbnailer.thumbnail_spectrogram( spectrogram_tmp.name, thumb_tmp.name, (mgg.global_config['media:thumb']['max_width'], mgg.global_config['media:thumb']['max_height'])) thumb_filepath = create_pub_filepath( entry, '{original}-thumbnail.jpg'.format( original=os.path.splitext( queued_filepath[-1])[0])) mgg.public_store.get_file(thumb_filepath, 'wb').write( thumb_tmp.read()) entry.media_files['thumb'] = thumb_filepath else: entry.media_files['thumb'] = ['fake', 'thumb', 'path.jpg'] mgg.queue_store.delete_file(queued_filepath) # clean up workbench workbench.destroy_self()
def process_video(entry): """ Process a video entry, transcode the queued media files (originals) and create a thumbnail for the entry. """ video_config = mgg.global_config['media_type:mediagoblin.media_types.video'] workbench = mgg.workbench_manager.create_workbench() queued_filepath = entry.queued_media_file queued_filename = workbench.localized_file( mgg.queue_store, queued_filepath, 'source') name_builder = FilenameBuilder(queued_filename) medium_filepath = create_pub_filepath( entry, name_builder.fill('{basename}-640p.webm')) thumbnail_filepath = create_pub_filepath( entry, name_builder.fill('{basename}.thumbnail.jpg')) # Create a temporary file for the video destination tmp_dst = tempfile.NamedTemporaryFile() with tmp_dst: # Transcode queued file to a VP8/vorbis file that fits in a 640x640 square progress_callback = ProgressCallback(entry) transcoder = transcoders.VideoTranscoder() transcoder.transcode(queued_filename, tmp_dst.name, vp8_quality=video_config['vp8_quality'], vp8_threads=video_config['vp8_threads'], vorbis_quality=video_config['vorbis_quality'], progress_callback=progress_callback) # Push transcoded video to public storage _log.debug('Saving medium...') mgg.public_store.get_file(medium_filepath, 'wb').write( tmp_dst.read()) _log.debug('Saved medium') entry.media_files['webm_640'] = medium_filepath # Save the width and height of the transcoded video entry.media_data_init( width=transcoder.dst_data.videowidth, height=transcoder.dst_data.videoheight) # Create a temporary file for the video thumbnail tmp_thumb = tempfile.NamedTemporaryFile(suffix='.jpg') with tmp_thumb: # Create a thumbnail.jpg that fits in a 180x180 square transcoders.VideoThumbnailerMarkII( queued_filename, tmp_thumb.name, 180) # Push the thumbnail to public storage _log.debug('Saving thumbnail...') mgg.public_store.get_file(thumbnail_filepath, 'wb').write( tmp_thumb.read()) _log.debug('Saved thumbnail') entry.media_files['thumb'] = thumbnail_filepath if video_config['keep_original']: # Push original file to public storage queued_file = file(queued_filename, 'rb') with queued_file: original_filepath = create_pub_filepath( entry, queued_filepath[-1]) with mgg.public_store.get_file(original_filepath, 'wb') as \ original_file: _log.debug('Saving original...') original_file.write(queued_file.read()) _log.debug('Saved original') entry.media_files['original'] = original_filepath mgg.queue_store.delete_file(queued_filepath)
def process_video(proc_state): """ Process a video entry, transcode the queued media files (originals) and create a thumbnail for the entry. A Workbench() represents a local tempory dir. It is automatically cleaned up when this function exits. """ entry = proc_state.entry workbench = proc_state.workbench video_config = mgg.global_config['media_type:mediagoblin.media_types.video'] queued_filepath = entry.queued_media_file queued_filename = proc_state.get_queued_filename() name_builder = FilenameBuilder(queued_filename) medium_filepath = create_pub_filepath( entry, name_builder.fill('{basename}-640p.webm')) thumbnail_filepath = create_pub_filepath( entry, name_builder.fill('{basename}.thumbnail.jpg')) # Create a temporary file for the video destination (cleaned up with workbench) tmp_dst = NamedTemporaryFile(dir=workbench.dir, delete=False) with tmp_dst: # Transcode queued file to a VP8/vorbis file that fits in a 640x640 square progress_callback = ProgressCallback(entry) dimensions = ( mgg.global_config['media:medium']['max_width'], mgg.global_config['media:medium']['max_height']) # Extract metadata and keep a record of it metadata = transcoders.VideoTranscoder().discover(queued_filename) store_metadata(entry, metadata) # Figure out whether or not we need to transcode this video or # if we can skip it if skip_transcode(metadata): _log.debug('Skipping transcoding') dst_dimensions = metadata['videowidth'], metadata['videoheight'] # Push original file to public storage _log.debug('Saving original...') proc_state.copy_original(queued_filepath[-1]) did_transcode = False else: transcoder = transcoders.VideoTranscoder() transcoder.transcode(queued_filename, tmp_dst.name, vp8_quality=video_config['vp8_quality'], vp8_threads=video_config['vp8_threads'], vorbis_quality=video_config['vorbis_quality'], progress_callback=progress_callback, dimensions=dimensions) dst_dimensions = transcoder.dst_data.videowidth,\ transcoder.dst_data.videoheight # Push transcoded video to public storage _log.debug('Saving medium...') mgg.public_store.copy_local_to_storage(tmp_dst.name, medium_filepath) _log.debug('Saved medium') entry.media_files['webm_640'] = medium_filepath did_transcode = True # Save the width and height of the transcoded video entry.media_data_init( width=dst_dimensions[0], height=dst_dimensions[1]) # Temporary file for the video thumbnail (cleaned up with workbench) tmp_thumb = NamedTemporaryFile(dir=workbench.dir, suffix='.jpg', delete=False) with tmp_thumb: # Create a thumbnail.jpg that fits in a 180x180 square transcoders.VideoThumbnailerMarkII( queued_filename, tmp_thumb.name, 180) # Push the thumbnail to public storage _log.debug('Saving thumbnail...') mgg.public_store.copy_local_to_storage(tmp_thumb.name, thumbnail_filepath) entry.media_files['thumb'] = thumbnail_filepath # save the original... but only if we did a transcoding # (if we skipped transcoding and just kept the original anyway as the main # media, then why would we save the original twice?) if video_config['keep_original'] and did_transcode: # Push original file to public storage _log.debug('Saving original...') proc_state.copy_original(queued_filepath[-1]) # Remove queued media file from storage and database proc_state.delete_queue_file()
def process_video(proc_state): """ Process a video entry, transcode the queued media files (originals) and create a thumbnail for the entry. A Workbench() represents a local tempory dir. It is automatically cleaned up when this function exits. """ entry = proc_state.entry workbench = proc_state.workbench video_config = mgg.global_config['media_type:mediagoblin.media_types.video'] queued_filepath = entry.queued_media_file queued_filename = proc_state.get_queued_filename() name_builder = FilenameBuilder(queued_filename) medium_basename = name_builder.fill('{basename}-640p.webm') medium_filepath = create_pub_filepath(entry, medium_basename) thumbnail_basename = name_builder.fill('{basename}.thumbnail.jpg') thumbnail_filepath = create_pub_filepath(entry, thumbnail_basename) # Create a temporary file for the video destination (cleaned up with workbench) tmp_dst = os.path.join(workbench.dir, medium_basename) # Transcode queued file to a VP8/vorbis file that fits in a 640x640 square progress_callback = ProgressCallback(entry) dimensions = ( mgg.global_config['media:medium']['max_width'], mgg.global_config['media:medium']['max_height']) # Extract metadata and keep a record of it metadata = transcoders.VideoTranscoder().discover(queued_filename) store_metadata(entry, metadata) # Figure out whether or not we need to transcode this video or # if we can skip it if skip_transcode(metadata): _log.debug('Skipping transcoding') dst_dimensions = metadata['videowidth'], metadata['videoheight'] # Push original file to public storage _log.debug('Saving original...') proc_state.copy_original(queued_filepath[-1]) did_transcode = False else: transcoder = transcoders.VideoTranscoder() transcoder.transcode(queued_filename, tmp_dst, vp8_quality=video_config['vp8_quality'], vp8_threads=video_config['vp8_threads'], vorbis_quality=video_config['vorbis_quality'], progress_callback=progress_callback, dimensions=dimensions) dst_dimensions = transcoder.dst_data.videowidth,\ transcoder.dst_data.videoheight # Push transcoded video to public storage _log.debug('Saving medium...') mgg.public_store.copy_local_to_storage(tmp_dst, medium_filepath) _log.debug('Saved medium') entry.media_files['webm_640'] = medium_filepath did_transcode = True # Save the width and height of the transcoded video entry.media_data_init( width=dst_dimensions[0], height=dst_dimensions[1]) # Temporary file for the video thumbnail (cleaned up with workbench) tmp_thumb = os.path.join(workbench.dir, thumbnail_basename) # Create a thumbnail.jpg that fits in a 180x180 square transcoders.VideoThumbnailerMarkII( queued_filename, tmp_thumb, 180) # Push the thumbnail to public storage _log.debug('Saving thumbnail...') mgg.public_store.copy_local_to_storage(tmp_thumb, thumbnail_filepath) entry.media_files['thumb'] = thumbnail_filepath # save the original... but only if we did a transcoding # (if we skipped transcoding and just kept the original anyway as the main # media, then why would we save the original twice?) if video_config['keep_original'] and did_transcode: # Push original file to public storage _log.debug('Saving original...') proc_state.copy_original(queued_filepath[-1]) # Remove queued media file from storage and database proc_state.delete_queue_file()
def process_audio(proc_state): """Code to process uploaded audio. Will be run by celery. A Workbench() represents a local tempory dir. It is automatically cleaned up when this function exits. """ entry = proc_state.entry workbench = proc_state.workbench audio_config = mgg.global_config['media_type:mediagoblin.media_types.audio'] queued_filepath = entry.queued_media_file queued_filename = workbench.localized_file( mgg.queue_store, queued_filepath, 'source') name_builder = FilenameBuilder(queued_filename) webm_audio_filepath = create_pub_filepath( entry, '{original}.webm'.format( original=os.path.splitext( queued_filepath[-1])[0])) if audio_config['keep_original']: with open(queued_filename, 'rb') as queued_file: original_filepath = create_pub_filepath( entry, name_builder.fill('{basename}{ext}')) with mgg.public_store.get_file(original_filepath, 'wb') as \ original_file: _log.debug('Saving original...') original_file.write(queued_file.read()) entry.media_files['original'] = original_filepath transcoder = AudioTranscoder() with NamedTemporaryFile(dir=workbench.dir) as webm_audio_tmp: progress_callback = ProgressCallback(entry) transcoder.transcode( queued_filename, webm_audio_tmp.name, quality=audio_config['quality'], progress_callback=progress_callback) transcoder.discover(webm_audio_tmp.name) _log.debug('Saving medium...') mgg.public_store.get_file(webm_audio_filepath, 'wb').write( webm_audio_tmp.read()) entry.media_files['webm_audio'] = webm_audio_filepath # entry.media_data_init(length=int(data.audiolength)) if audio_config['create_spectrogram']: spectrogram_filepath = create_pub_filepath( entry, '{original}-spectrogram.jpg'.format( original=os.path.splitext( queued_filepath[-1])[0])) with NamedTemporaryFile(dir=workbench.dir, suffix='.ogg') as wav_tmp: _log.info('Creating OGG source for spectrogram') transcoder.transcode( queued_filename, wav_tmp.name, mux_string='vorbisenc quality={0} ! oggmux'.format( audio_config['quality'])) thumbnailer = AudioThumbnailer() with NamedTemporaryFile(dir=workbench.dir, suffix='.jpg') as spectrogram_tmp: thumbnailer.spectrogram( wav_tmp.name, spectrogram_tmp.name, width=mgg.global_config['media:medium']['max_width'], fft_size=audio_config['spectrogram_fft_size']) _log.debug('Saving spectrogram...') mgg.public_store.get_file(spectrogram_filepath, 'wb').write( spectrogram_tmp.read()) entry.media_files['spectrogram'] = spectrogram_filepath with NamedTemporaryFile(dir=workbench.dir, suffix='.jpg') as thumb_tmp: thumbnailer.thumbnail_spectrogram( spectrogram_tmp.name, thumb_tmp.name, (mgg.global_config['media:thumb']['max_width'], mgg.global_config['media:thumb']['max_height'])) thumb_filepath = create_pub_filepath( entry, '{original}-thumbnail.jpg'.format( original=os.path.splitext( queued_filepath[-1])[0])) mgg.public_store.get_file(thumb_filepath, 'wb').write( thumb_tmp.read()) entry.media_files['thumb'] = thumb_filepath else: entry.media_files['thumb'] = ['fake', 'thumb', 'path.jpg'] # Remove queued media file from storage and database. # queued_filepath is in the task_id directory which should # be removed too, but fail if the directory is not empty to be on # the super-safe side. mgg.queue_store.delete_file(queued_filepath) # rm file mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir entry.queued_media_file = []
def process_audio(proc_state): """Code to process uploaded audio. Will be run by celery. A Workbench() represents a local tempory dir. It is automatically cleaned up when this function exits. """ entry = proc_state.entry workbench = proc_state.workbench audio_config = mgg.global_config[ 'media_type:mediagoblin.media_types.audio'] queued_filepath = entry.queued_media_file queued_filename = workbench.localized_file(mgg.queue_store, queued_filepath, 'source') name_builder = FilenameBuilder(queued_filename) webm_audio_filepath = create_pub_filepath( entry, '{original}.webm'.format( original=os.path.splitext(queued_filepath[-1])[0])) if audio_config['keep_original']: with open(queued_filename, 'rb') as queued_file: original_filepath = create_pub_filepath( entry, name_builder.fill('{basename}{ext}')) with mgg.public_store.get_file(original_filepath, 'wb') as \ original_file: _log.debug('Saving original...') original_file.write(queued_file.read()) entry.media_files['original'] = original_filepath transcoder = AudioTranscoder() with NamedTemporaryFile(dir=workbench.dir) as webm_audio_tmp: progress_callback = ProgressCallback(entry) transcoder.transcode(queued_filename, webm_audio_tmp.name, quality=audio_config['quality'], progress_callback=progress_callback) transcoder.discover(webm_audio_tmp.name) _log.debug('Saving medium...') mgg.public_store.get_file(webm_audio_filepath, 'wb').write(webm_audio_tmp.read()) entry.media_files['webm_audio'] = webm_audio_filepath # entry.media_data_init(length=int(data.audiolength)) if audio_config['create_spectrogram']: spectrogram_filepath = create_pub_filepath( entry, '{original}-spectrogram.jpg'.format( original=os.path.splitext(queued_filepath[-1])[0])) with NamedTemporaryFile(dir=workbench.dir, suffix='.ogg') as wav_tmp: _log.info('Creating OGG source for spectrogram') transcoder.transcode( queued_filename, wav_tmp.name, mux_string='vorbisenc quality={0} ! oggmux'.format( audio_config['quality'])) thumbnailer = AudioThumbnailer() with NamedTemporaryFile(dir=workbench.dir, suffix='.jpg') as spectrogram_tmp: thumbnailer.spectrogram( wav_tmp.name, spectrogram_tmp.name, width=mgg.global_config['media:medium']['max_width'], fft_size=audio_config['spectrogram_fft_size']) _log.debug('Saving spectrogram...') mgg.public_store.get_file(spectrogram_filepath, 'wb').write(spectrogram_tmp.read()) entry.media_files['spectrogram'] = spectrogram_filepath with NamedTemporaryFile(dir=workbench.dir, suffix='.jpg') as thumb_tmp: thumbnailer.thumbnail_spectrogram( spectrogram_tmp.name, thumb_tmp.name, (mgg.global_config['media:thumb']['max_width'], mgg.global_config['media:thumb']['max_height'])) thumb_filepath = create_pub_filepath( entry, '{original}-thumbnail.jpg'.format( original=os.path.splitext(queued_filepath[-1])[0])) mgg.public_store.get_file(thumb_filepath, 'wb').write(thumb_tmp.read()) entry.media_files['thumb'] = thumb_filepath else: entry.media_files['thumb'] = ['fake', 'thumb', 'path.jpg'] # Remove queued media file from storage and database. # queued_filepath is in the task_id directory which should # be removed too, but fail if the directory is not empty to be on # the super-safe side. mgg.queue_store.delete_file(queued_filepath) # rm file mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir entry.queued_media_file = []
def process_ascii(entry): ''' Code to process a txt file ''' ascii_config = mgg.global_config['media_type:mediagoblin.media_types.ascii'] workbench = mgg.workbench_manager.create_workbench() # Conversions subdirectory to avoid collisions conversions_subdir = os.path.join( workbench.dir, 'conversions') os.mkdir(conversions_subdir) queued_filepath = entry.queued_media_file queued_filename = workbench.localized_file( mgg.queue_store, queued_filepath, 'source') queued_file = file(queued_filename, 'rb') with queued_file: queued_file_charset = chardet.detect(queued_file.read()) # Only select a non-utf-8 charset if chardet is *really* sure # Tested with "Feli\x0109an superjaron", which was detecte if queued_file_charset['confidence'] < 0.9: interpreted_charset = 'utf-8' else: interpreted_charset = queued_file_charset['encoding'] _log.info('Charset detected: {0}\nWill interpret as: {1}'.format( queued_file_charset, interpreted_charset)) queued_file.seek(0) # Rewind the queued file thumb_filepath = create_pub_filepath( entry, 'thumbnail.png') tmp_thumb_filename = os.path.join( conversions_subdir, thumb_filepath[-1]) ascii_converter_args = {} if ascii_config['thumbnail_font']: ascii_converter_args.update( {'font': ascii_config['thumbnail_font']}) converter = asciitoimage.AsciiToImage( **ascii_converter_args) thumb = converter._create_image( queued_file.read()) with file(tmp_thumb_filename, 'w') as thumb_file: thumb.thumbnail( (mgg.global_config['media:thumb']['max_width'], mgg.global_config['media:thumb']['max_height']), Image.ANTIALIAS) thumb.save(thumb_file) _log.debug('Copying local file to public storage') mgg.public_store.copy_local_to_storage( tmp_thumb_filename, thumb_filepath) queued_file.seek(0) original_filepath = create_pub_filepath(entry, queued_filepath[-1]) with mgg.public_store.get_file(original_filepath, 'wb') \ as original_file: original_file.write(queued_file.read()) queued_file.seek(0) # Rewind *again* unicode_filepath = create_pub_filepath(entry, 'ascii-portable.txt') with mgg.public_store.get_file(unicode_filepath, 'wb') \ as unicode_file: # Decode the original file from its detected charset (or UTF8) # Encode the unicode instance to ASCII and replace any non-ASCII # with an HTML entity (&# unicode_file.write( unicode(queued_file.read().decode( interpreted_charset)).encode( 'ascii', 'xmlcharrefreplace')) mgg.queue_store.delete_file(queued_filepath) entry.queued_media_file = [] media_files_dict = entry.setdefault('media_files', {}) media_files_dict['thumb'] = thumb_filepath media_files_dict['unicode'] = unicode_filepath media_files_dict['original'] = original_filepath entry.save()
def process_image(entry): """ Code to process an image """ workbench = mgg.workbench_manager.create_workbench() # Conversions subdirectory to avoid collisions conversions_subdir = os.path.join( workbench.dir, 'conversions') os.mkdir(conversions_subdir) queued_filepath = entry.queued_media_file queued_filename = workbench.localized_file( mgg.queue_store, queued_filepath, 'source') name_builder = FilenameBuilder(queued_filename) # EXIF extraction exif_tags = extract_exif(queued_filename) gps_data = get_gps_data(exif_tags) # Always create a small thumbnail thumb_filepath = create_pub_filepath( entry, name_builder.fill('{basename}.thumbnail{ext}')) resize_image(entry, queued_filename, thumb_filepath, exif_tags, conversions_subdir, (mgg.global_config['media:thumb']['max_width'], mgg.global_config['media:thumb']['max_height'])) # If the size of the original file exceeds the specified size of a `medium` # file, a `.medium.jpg` files is created and later associated with the media # entry. medium = Image.open(queued_filename) if medium.size[0] > mgg.global_config['media:medium']['max_width'] \ or medium.size[1] > mgg.global_config['media:medium']['max_height'] \ or exif_image_needs_rotation(exif_tags): medium_filepath = create_pub_filepath( entry, name_builder.fill('{basename}.medium{ext}')) resize_image( entry, queued_filename, medium_filepath, exif_tags, conversions_subdir, (mgg.global_config['media:medium']['max_width'], mgg.global_config['media:medium']['max_height'])) else: medium_filepath = None # we have to re-read because unlike PIL, not everything reads # things in string representation :) queued_file = file(queued_filename, 'rb') with queued_file: original_filepath = create_pub_filepath( entry, name_builder.fill('{basename}{ext}')) with mgg.public_store.get_file(original_filepath, 'wb') \ as original_file: original_file.write(queued_file.read()) # Remove queued media file from storage and database mgg.queue_store.delete_file(queued_filepath) entry.queued_media_file = [] # Insert media file information into database media_files_dict = entry.setdefault('media_files', {}) media_files_dict[u'thumb'] = thumb_filepath media_files_dict[u'original'] = original_filepath if medium_filepath: media_files_dict[u'medium'] = medium_filepath # Insert exif data into database exif_all = clean_exif(exif_tags) if len(exif_all): entry.media_data_init(exif_all=exif_all) if len(gps_data): for key in list(gps_data.keys()): gps_data['gps_' + key] = gps_data.pop(key) entry.media_data_init(**gps_data) # clean up workbench workbench.destroy_self()