def run(args, media_id=None): if not media_id: media_id = args.media_id try: media_entry, manager = get_entry_and_processing_manager(media_id) # TODO: (maybe?) This could probably be handled entirely by the # processor class... try: processor_class = manager.get_processor( args.reprocess_command, media_entry) except ProcessorDoesNotExist: print 'No such processor "%s" for media with id "%s"' % ( args.reprocess_command, media_entry.id) return except ProcessorNotEligible: print 'Processor "%s" exists but media "%s" is not eligible' % ( args.reprocess_command, media_entry.id) return reprocess_parser = processor_class.generate_parser() reprocess_args = reprocess_parser.parse_args(args.reprocess_args) reprocess_request = processor_class.args_to_request(reprocess_args) run_process_media( media_entry, reprocess_action=args.reprocess_command, reprocess_info=reprocess_request) except ProcessingManagerDoesNotExist: entry = MediaEntry.query.filter_by(id=media_id).first() print 'No such processing manager for {0}'.format(entry.media_type)
def run(args, media_id=None): if not media_id: media_id = args.media_id try: media_entry, manager = get_entry_and_processing_manager(media_id) # TODO: (maybe?) This could probably be handled entirely by the # processor class... try: processor_class = manager.get_processor( args.reprocess_command, media_entry) except ProcessorDoesNotExist: print('No such processor "{}" for media with id "{}"'.format( args.reprocess_command, media_entry.id)) return except ProcessorNotEligible: print('Processor "{}" exists but media "{}" is not eligible'.format( args.reprocess_command, media_entry.id)) return reprocess_parser = processor_class.generate_parser() reprocess_args = reprocess_parser.parse_args(args.reprocess_args) reprocess_request = processor_class.args_to_request(reprocess_args) run_process_media( media_entry, reprocess_action=args.reprocess_command, reprocess_info=reprocess_request) except ProcessingManagerDoesNotExist: entry = MediaEntry.query.filter_by(id=media_id).first() print('No such processing manager for {}'.format(entry.media_type))
def test_celery_chord(self, mock_chord, mock_process_media): entry = get_sample_entry(self.our_user(), self.media_type) # prepare things for testing video_config = mg_globals.global_config['plugins'][entry.media_type] def_res = video_config['default_resolution'] priority_num = len(video_config['available_resolutions']) + 1 reprocess_info = { 'vorbis_quality': None, 'vp8_threads': None, 'thumb_size': None, 'vp8_quality': None } tasks_list = [main_task.signature(args=(entry.id, def_res, ACCEPTED_RESOLUTIONS[def_res]), kwargs=reprocess_info, queue='default', priority=priority_num, immutable=True)] for comp_res in video_config['available_resolutions']: if comp_res != def_res: priority_num += -1 tasks_list.append( complementary_task.signature(args=(entry.id, comp_res, ACCEPTED_RESOLUTIONS[comp_res]), kwargs=reprocess_info, queue='default', priority=priority_num, immutable=True) ) transcoding_tasks = group(tasks_list) run_process_media(entry) mock_chord.assert_called_once_with(transcoding_tasks) entry.delete()
def import_file(self, media): try: media_type, media_manager = ( #get_media_type_and_manager(media.filename)) type_match_handler(media,media.filename)) except FileTypeNotSupported: print u"File type not supported: {0}".format(media.filename) return entry = self.db.MediaEntry() entry.media_type = unicode(media_type) entry.title = unicode( os.path.basename(os.path.splitext(media.filename)[0])) entry.uploader = 1 # Process the user's folksonomy "tags" entry.tags = convert_to_tag_list_of_dicts("") # Generate a slug from the title entry.generate_slug() task_id = unicode(uuid.uuid4()) entry.queued_media_file = media.filename.split("/") entry.queued_task_id = task_id try: entry.save() entry_id = entry.id run_process_media(entry) Session.commit() return entry_id except Exception: Session.rollback() raise
def pwg_images_addSimple(request): form = AddSimpleForm(request.form) if not form.validate(): _log.error("addSimple: form failed") raise BadRequest() dump = [] for f in form: dump.append("%s=%r" % (f.name, f.data)) _log.info("addSimple: %r %s %r", request.form, " ".join(dump), request.files) if not check_file_field(request, "image"): raise BadRequest() filename = request.files["image"].filename # Sniff the submitted media to determine which # media plugin should handle processing media_type, media_manager = sniff_media(request.files["image"]) # create entry and save in database entry = new_upload_entry(request.user) entry.media_type = unicode(media_type) entry.title = unicode(form.name.data) or unicode(splitext(filename)[0]) entry.description = unicode(form.comment.data) """ # Process the user's folksonomy "tags" entry.tags = convert_to_tag_list_of_dicts( form.tags.data) """ # Generate a slug from the title entry.generate_slug() queue_file = prepare_queue_task(request.app, entry, filename) with queue_file: shutil.copyfileobj(request.files["image"].stream, queue_file, length=4 * 1048576) # Save now so we have this data before kicking off processing entry.save() # Pass off to processing # # (... don't change entry after this point to avoid race # conditions with changes to the document via processing code) feed_url = request.urlgen("mediagoblin.user_pages.atom_feed", qualified=True, user=request.user.username) run_process_media(entry, feed_url) collection_id = form.category.data if collection_id > 0: collection = Collection.query.get(collection_id) if collection is not None and collection.creator == request.user.id: add_media_to_collection(collection, entry, "") return {"image_id": entry.id, "url": entry.url_for_self(request.urlgen, qualified=True)}
def post_entry(request): _log.debug('Posting entry') if request.method == 'OPTIONS': return json_response({'status': 200}) if request.method != 'POST': _log.debug('Must POST against post_entry') raise BadRequest() if not 'file' in request.files \ or not isinstance(request.files['file'], FileStorage) \ or not request.files['file'].stream: _log.debug('File field not found') raise BadRequest() media_file = request.files['file'] media_type, media_manager = sniff_media(media_file) entry = request.db.MediaEntry() entry.media_type = unicode(media_type) entry.title = unicode(request.form.get('title') or splitext(media_file.filename)[0]) entry.description = unicode(request.form.get('description')) entry.license = unicode(request.form.get('license', '')) entry.uploader = request.user.id entry.generate_slug() # queue appropriately queue_file = prepare_queue_task(request.app, entry, media_file.filename) with queue_file: queue_file.write(request.files['file'].stream.read()) # Save now so we have this data before kicking off processing entry.save() if request.form.get('callback_url'): metadata = request.db.ProcessingMetaData() metadata.media_entry = entry metadata.callback_url = unicode(request.form['callback_url']) metadata.save() # Pass off to processing # # (... don't change entry after this point to avoid race # conditions with changes to the document via processing code) run_process_media(entry) return json_response(get_entry_serializable(entry, request.urlgen))
def initial(args): """ Reprocess all failed media """ query = MediaEntry.query.filter_by(state='failed') for entry in query: try: media_entry, manager = get_entry_and_processing_manager(entry.id) run_process_media(media_entry, reprocess_action='initial') except ProcessingManagerDoesNotExist: print 'No such processing manager for {0}'.format(entry.media_type)
def post_entry(request): _log.debug('Posting entry') if request.method == 'OPTIONS': return json_response({'status': 200}) if request.method != 'POST': _log.debug('Must POST against post_entry') raise BadRequest() if not check_file_field(request, 'file'): _log.debug('File field not found') raise BadRequest() media_file = request.files['file'] media_type, media_manager = sniff_media(media_file) entry = new_upload_entry(request.user) entry.media_type = unicode(media_type) entry.title = unicode( request.form.get('title') or splitext(media_file.filename)[0]) entry.description = unicode(request.form.get('description')) entry.license = unicode(request.form.get('license', '')) entry.generate_slug() # queue appropriately queue_file = prepare_queue_task(request.app, entry, media_file.filename) with queue_file: queue_file.write(request.files['file'].stream.read()) # Save now so we have this data before kicking off processing entry.save() if request.form.get('callback_url'): metadata = request.db.ProcessingMetaData() metadata.media_entry = entry metadata.callback_url = unicode(request.form['callback_url']) metadata.save() # Pass off to processing # # (... don't change entry after this point to avoid race # conditions with changes to the document via processing code) feed_url = request.urlgen('mediagoblin.user_pages.atom_feed', qualified=True, user=request.user.username) run_process_media(entry, feed_url) return json_response(get_entry_serializable(entry, request.urlgen))
def post_entry(request): _log.debug("Posting entry") if request.method == "OPTIONS": return json_response({"status": 200}) if request.method != "POST": _log.debug("Must POST against post_entry") raise BadRequest() if not check_file_field(request, "file"): _log.debug("File field not found") raise BadRequest() media_file = request.files["file"] media_type, media_manager = sniff_media(media_file) entry = request.db.MediaEntry() entry.media_type = unicode(media_type) entry.title = unicode(request.form.get("title") or splitext(media_file.filename)[0]) entry.description = unicode(request.form.get("description")) entry.license = unicode(request.form.get("license", "")) entry.uploader = request.user.id entry.generate_slug() # queue appropriately queue_file = prepare_queue_task(request.app, entry, media_file.filename) with queue_file: queue_file.write(request.files["file"].stream.read()) # Save now so we have this data before kicking off processing entry.save() if request.form.get("callback_url"): metadata = request.db.ProcessingMetaData() metadata.media_entry = entry metadata.callback_url = unicode(request.form["callback_url"]) metadata.save() # Pass off to processing # # (... don't change entry after this point to avoid race # conditions with changes to the document via processing code) feed_url = request.urlgen("mediagoblin.user_pages.atom_feed", qualified=True, user=request.user.username) run_process_media(entry, feed_url) return json_response(get_entry_serializable(entry, request.urlgen))
def initial(args): """ Reprocess all failed media """ query = MediaEntry.query.filter_by(state='failed') for entry in query: try: media_entry, manager = get_entry_and_processing_manager(entry.id) run_process_media( media_entry, reprocess_action='initial') except ProcessingManagerDoesNotExist: print 'No such processing manager for {0}'.format(entry.media_type)
def thumbs(args): """ Regenerate thumbs for all processed media """ query = MediaEntry.query.filter_by(state='processed') for entry in query: try: media_entry, manager = get_entry_and_processing_manager(entry.id) # TODO: (maybe?) This could probably be handled entirely by the # processor class... try: processor_class = manager.get_processor( 'resize', media_entry) except ProcessorDoesNotExist: print('No such processor "{}" for media with id "{}"'.format( 'resize', media_entry.id)) return except ProcessorNotEligible: print('Processor "{}" exists but media "{}" is not eligible'.format( 'resize', media_entry.id)) return reprocess_parser = processor_class.generate_parser() # prepare filetype and size to be passed into reprocess_parser if args.size: extra_args = 'thumb --{} {} {}'.format( processor_class.thumb_size, args.size[0], args.size[1]) else: extra_args = 'thumb' reprocess_args = reprocess_parser.parse_args(extra_args.split()) reprocess_request = processor_class.args_to_request(reprocess_args) run_process_media( media_entry, reprocess_action='resize', reprocess_info=reprocess_request) except ProcessingManagerDoesNotExist: print('No such processing manager for {}'.format(entry.media_type))
def thumbs(args): """ Regenerate thumbs for all processed media """ query = MediaEntry.query.filter_by(state='processed') for entry in query: try: media_entry, manager = get_entry_and_processing_manager(entry.id) # TODO: (maybe?) This could probably be handled entirely by the # processor class... try: processor_class = manager.get_processor( 'resize', media_entry) except ProcessorDoesNotExist: print 'No such processor "%s" for media with id "%s"' % ( 'resize', media_entry.id) return except ProcessorNotEligible: print 'Processor "%s" exists but media "%s" is not eligible' % ( 'resize', media_entry.id) return reprocess_parser = processor_class.generate_parser() # prepare filetype and size to be passed into reprocess_parser if args.size: extra_args = 'thumb --{0} {1} {2}'.format( processor_class.thumb_size, args.size[0], args.size[1]) else: extra_args = 'thumb' reprocess_args = reprocess_parser.parse_args(extra_args.split()) reprocess_request = processor_class.args_to_request(reprocess_args) run_process_media( media_entry, reprocess_action='resize', reprocess_info=reprocess_request) except ProcessingManagerDoesNotExist: print 'No such processing manager for {0}'.format(entry.media_type)
def submit_start(request): """ First view for submitting a file. """ submit_form = submit_forms.SubmitStartForm(request.form) if request.method == 'POST' and submit_form.validate(): if not ('file' in request.files and isinstance(request.files['file'], FileStorage) and request.files['file'].stream): submit_form.file.errors.append( _(u'You must provide a file.')) else: try: filename = request.files['file'].filename # Sniff the submitted media to determine which # media plugin should handle processing media_type, media_manager = sniff_media( request.files['file']) # create entry and save in database entry = request.db.MediaEntry() entry.media_type = unicode(media_type) entry.title = ( unicode(request.form['title']) or unicode(splitext(filename)[0])) entry.description = unicode(request.form.get('description')) entry.license = unicode(request.form.get('license', "")) or None entry.uploader = request.user.id # Process the user's folksonomy "tags" entry.tags = convert_to_tag_list_of_dicts( request.form.get('tags')) # Generate a slug from the title entry.generate_slug() queue_file = prepare_queue_task(request.app, entry, filename) with queue_file: queue_file.write(request.files['file'].stream.read()) # Save now so we have this data before kicking off processing entry.save() # Pass off to processing # # (... don't change entry after this point to avoid race # conditions with changes to the document via processing code) run_process_media(entry) handle_push_urls(request) add_message(request, SUCCESS, _('Woohoo! Submitted!')) return redirect(request, "mediagoblin.user_pages.user_home", user=request.user.username) except Exception as e: ''' This section is intended to catch exceptions raised in mediagoblin.media_types ''' if isinstance(e, InvalidFileType) or \ isinstance(e, FileTypeNotSupported): submit_form.file.errors.append( e) else: raise return render_to_response( request, 'mediagoblin/submit/start.html', {'submit_form': submit_form, 'app_config': mg_globals.app_config})
def submit_start(request): """ First view for submitting a file. """ submit_form = submit_forms.SubmitStartForm(request.form, license=request.user.license_preference) if request.method == 'POST' and submit_form.validate(): if not check_file_field(request, 'file'): submit_form.file.errors.append( _(u'You must provide a file.')) else: try: filename = request.files['file'].filename # If the filename contains non ascii generate a unique name if not all(ord(c) < 128 for c in filename): filename = unicode(uuid.uuid4()) + splitext(filename)[-1] # Sniff the submitted media to determine which # media plugin should handle processing media_type, media_manager = sniff_media( request.files['file']) # create entry and save in database entry = new_upload_entry(request.user) entry.media_type = unicode(media_type) entry.title = ( unicode(submit_form.title.data) or unicode(splitext(request.files['file'].filename)[0])) entry.description = unicode(submit_form.description.data) entry.license = unicode(submit_form.license.data) or None # Process the user's folksonomy "tags" entry.tags = convert_to_tag_list_of_dicts( submit_form.tags.data) # Generate a slug from the title entry.generate_slug() queue_file = prepare_queue_task(request.app, entry, filename) with queue_file: queue_file.write(request.files['file'].stream.read()) # Save now so we have this data before kicking off processing entry.save() # Pass off to async processing # # (... don't change entry after this point to avoid race # conditions with changes to the document via processing code) feed_url = request.urlgen( 'mediagoblin.user_pages.atom_feed', qualified=True, user=request.user.username) run_process_media(entry, feed_url) add_message(request, SUCCESS, _('Woohoo! Submitted!')) add_comment_subscription(request.user, entry) return redirect(request, "mediagoblin.user_pages.user_home", user=request.user.username) except Exception as e: ''' This section is intended to catch exceptions raised in mediagoblin.media_types ''' if isinstance(e, InvalidFileType) or \ isinstance(e, FileTypeNotSupported): submit_form.file.errors.append( e) else: raise return render_to_response( request, 'mediagoblin/submit/start.html', {'submit_form': submit_form, 'app_config': mg_globals.app_config})
def multi_submit_start(request): """ First view for submitting a file. """ submit_form = submit_forms.get_submit_start_form( request.form, license=request.user.license_preference) users_collections = Collection.query.filter_by( actor=request.user.id, type=Collection.USER_DEFINED_TYPE).order_by(Collection.title) if users_collections.count() > 0: submit_form.collection.query = users_collections else: del submit_form.collection # Below is what was used for mediagoblin 0.5.0-dev. Above is the new way. # submit_form = submit_forms.SubmitStartForm(request.form, license=request.user.license_preference) filecount = 0 if request.method == 'POST' and submit_form.validate(): if not check_file_field(request, 'file'): submit_form.file.errors.append( _(u'You must provide at least one file.')) else: for submitted_file in request.files.getlist('file'): try: if not submitted_file.filename: # MOST likely an invalid file continue # Skip the rest of the loop for this file else: filename = submitted_file.filename _log.info("html5-multi-upload: Got filename: %s" % filename) # If the filename contains non ascii generate a unique name if not all(ord(c) < 128 for c in filename): filename = str( uuid.uuid4()) + splitext(filename)[-1] # Sniff the submitted media to determine which # media plugin should handle processing media_type, media_manager = sniff_media( submitted_file, filename) # create entry and save in database entry = new_upload_entry(request.user) entry.media_type = str(media_type) entry.title = (str(submit_form.title.data) or str( splitext(submitted_file.filename)[0])) entry.description = str(submit_form.description.data) entry.license = str(submit_form.license.data) or None # Process the user's folksonomy "tags" entry.tags = convert_to_tag_list_of_dicts( submit_form.tags.data) # Generate a slug from the title entry.generate_slug() queue_file = prepare_queue_task( request.app, entry, filename) with queue_file: queue_file.write(submitted_file.stream.read()) # Save now so we have this data before kicking off processing entry.save() # Pass off to async processing # # (... don't change entry after this point to avoid race # conditions with changes to the document via processing code) feed_url = request.urlgen( 'mediagoblin.user_pages.atom_feed', qualified=True, user=request.user.username) run_process_media(entry, feed_url) if submit_form.collection and submit_form.collection.data: add_media_to_collection( submit_form.collection.data, entry) create_activity("add", entry, request.user, target=submit_form.collection.data) add_comment_subscription(request.user, entry) filecount = filecount + 1 except Exception as e: ''' This section is intended to catch exceptions raised in mediagoblin.media_types ''' if isinstance(e, TypeNotFound) or isinstance( e, FileTypeNotSupported): submit_form.file.errors.append(e) else: raise add_message(request, SUCCESS, _('Woohoo! Submitted %d Files!' % filecount)) return redirect(request, "mediagoblin.user_pages.user_home", user=request.user.username) return render_to_response(request, 'start.html', {'multi_submit_form': submit_form})
def multi_submit_start(request): """ First view for submitting a file. """ submit_form = submit_forms.get_submit_start_form(request.form, license=request.user.license_preference) users_collections = Collection.query.filter_by( actor=request.user.id, type=Collection.USER_DEFINED_TYPE ).order_by(Collection.title) if users_collections.count() > 0: submit_form.collection.query = users_collections else: del submit_form.collection # Below is what was used for mediagoblin 0.5.0-dev. Above is the new way. # submit_form = submit_forms.SubmitStartForm(request.form, license=request.user.license_preference) filecount = 0 if request.method == 'POST' and submit_form.validate(): if not check_file_field(request, 'file'): submit_form.file.errors.append(_(u'You must provide at least one file.')) else: for submitted_file in request.files.getlist('file'): try: if not submitted_file.filename: # MOST likely an invalid file continue # Skip the rest of the loop for this file else: filename = submitted_file.filename _log.info("html5-multi-upload: Got filename: %s" % filename) # If the filename contains non ascii generate a unique name if not all(ord(c) < 128 for c in filename): filename = unicode(uuid.uuid4()) + splitext(filename)[-1] # Sniff the submitted media to determine which # media plugin should handle processing media_type, media_manager = sniff_media( submitted_file, filename) # create entry and save in database entry = new_upload_entry(request.user) entry.media_type = unicode(media_type) entry.title = ( unicode(submit_form.title.data) or unicode(splitext(submitted_file.filename)[0])) entry.description = unicode(submit_form.description.data) entry.license = unicode(submit_form.license.data) or None # Process the user's folksonomy "tags" entry.tags = convert_to_tag_list_of_dicts( submit_form.tags.data) # Generate a slug from the title entry.generate_slug() queue_file = prepare_queue_task(request.app, entry, filename) with queue_file: queue_file.write(submitted_file.stream.read()) # Save now so we have this data before kicking off processing entry.save() # Pass off to async processing # # (... don't change entry after this point to avoid race # conditions with changes to the document via processing code) feed_url = request.urlgen( 'mediagoblin.user_pages.atom_feed', qualified=True, user=request.user.username) run_process_media(entry, feed_url) if submit_form.collection and submit_form.collection.data: add_media_to_collection( submit_form.collection.data, entry) create_activity( "add", entry, request.user, target=submit_form.collection.data) add_comment_subscription(request.user, entry) filecount = filecount + 1 except Exception as e: ''' This section is intended to catch exceptions raised in mediagoblin.media_types ''' if isinstance(e, TypeNotFound) or isinstance(e, FileTypeNotSupported): submit_form.file.errors.append(e) else: raise add_message(request, SUCCESS, _('Woohoo! Submitted %d Files!' % filecount)) return redirect(request, "mediagoblin.user_pages.user_home", user=request.user.username) return render_to_response( request, 'start.html', {'multi_submit_form': submit_form})
def submit_start(request): """ First view for submitting a file. """ user = request.user if user.upload_limit >= 0: upload_limit = user.upload_limit else: upload_limit = mg_globals.app_config.get('upload_limit', None) if upload_limit and user.uploaded >= upload_limit: messages.add_message( request, messages.WARNING, _('Sorry, you have reached your upload limit.')) return redirect(request, "mediagoblin.user_pages.user_home", user=request.user.username) max_file_size = mg_globals.app_config.get('max_file_size', None) submit_form = submit_forms.get_submit_start_form( request.form, license=request.user.license_preference, max_file_size=max_file_size, upload_limit=upload_limit, uploaded=user.uploaded) if request.method == 'POST' and submit_form.validate(): if not check_file_field(request, 'file'): submit_form.file.errors.append( _(u'You must provide a file.')) else: try: filename = request.files['file'].filename # If the filename contains non ascii generate a unique name if not all(ord(c) < 128 for c in filename): filename = unicode(uuid.uuid4()) + splitext(filename)[-1] # Sniff the submitted media to determine which # media plugin should handle processing media_type, media_manager = sniff_media( request.files['file']) # create entry and save in database entry = new_upload_entry(request.user) entry.media_type = unicode(media_type) entry.title = ( unicode(submit_form.title.data) or unicode(splitext(request.files['file'].filename)[0])) entry.description = unicode(submit_form.description.data) entry.license = unicode(submit_form.license.data) or None # Process the user's folksonomy "tags" entry.tags = convert_to_tag_list_of_dicts( submit_form.tags.data) # Generate a slug from the title entry.generate_slug() queue_file = prepare_queue_task(request.app, entry, filename) with queue_file: queue_file.write(request.files['file'].stream.read()) # Get file size and round to 2 decimal places file_size = request.app.queue_store.get_file_size( entry.queued_media_file) / (1024.0 * 1024) file_size = float('{0:.2f}'.format(file_size)) error = False # Check if file size is over the limit if max_file_size and file_size >= max_file_size: submit_form.file.errors.append( _(u'Sorry, the file size is too big.')) error = True # Check if user is over upload limit if upload_limit and (user.uploaded + file_size) >= upload_limit: submit_form.file.errors.append( _('Sorry, uploading this file will put you over your' ' upload limit.')) error = True if not error: user.uploaded = user.uploaded + file_size user.save() entry.file_size = file_size # Save now so we have this data before kicking off processing entry.save() # Pass off to processing # # (... don't change entry after this point to avoid race # conditions with changes to the document via processing code) feed_url = request.urlgen( 'mediagoblin.user_pages.atom_feed', qualified=True, user=request.user.username) run_process_media(entry, feed_url) add_message(request, SUCCESS, _('Woohoo! Submitted!')) add_comment_subscription(request.user, entry) return redirect(request, "mediagoblin.user_pages.user_home", user=user.username) except Exception as e: ''' This section is intended to catch exceptions raised in mediagoblin.media_types ''' if isinstance(e, InvalidFileType) or \ isinstance(e, FileTypeNotSupported): submit_form.file.errors.append( e) else: raise return render_to_response( request, 'mediagoblin/submit/start.html', {'submit_form': submit_form, 'app_config': mg_globals.app_config})