Exemple #1
0
def run(args, media_id=None):
    if not media_id:
        media_id = args.media_id
    try:
        media_entry, manager = get_entry_and_processing_manager(media_id)

        # TODO: (maybe?) This could probably be handled entirely by the
        # processor class...
        try:
            processor_class = manager.get_processor(
                args.reprocess_command, media_entry)
        except ProcessorDoesNotExist:
            print('No such processor "{}" for media with id "{}"'.format(
                args.reprocess_command, media_entry.id))
            return
        except ProcessorNotEligible:
            print('Processor "{}" exists but media "{}" is not eligible'.format(
                args.reprocess_command, media_entry.id))
            return

        reprocess_parser = processor_class.generate_parser()
        reprocess_args = reprocess_parser.parse_args(args.reprocess_args)
        reprocess_request = processor_class.args_to_request(reprocess_args)
        run_process_media(
            media_entry,
            reprocess_action=args.reprocess_command,
            reprocess_info=reprocess_request)

    except ProcessingManagerDoesNotExist:
        entry = MediaEntry.query.filter_by(id=media_id).first()
        print('No such processing manager for {}'.format(entry.media_type))
def run(args, media_id=None):
    if not media_id:
        media_id = args.media_id
    try:
        media_entry, manager = get_entry_and_processing_manager(media_id)

        # TODO: (maybe?) This could probably be handled entirely by the
        # processor class...
        try:
            processor_class = manager.get_processor(
                args.reprocess_command, media_entry)
        except ProcessorDoesNotExist:
            print 'No such processor "%s" for media with id "%s"' % (
                args.reprocess_command, media_entry.id)
            return
        except ProcessorNotEligible:
            print 'Processor "%s" exists but media "%s" is not eligible' % (
                args.reprocess_command, media_entry.id)
            return

        reprocess_parser = processor_class.generate_parser()
        reprocess_args = reprocess_parser.parse_args(args.reprocess_args)
        reprocess_request = processor_class.args_to_request(reprocess_args)
        run_process_media(
            media_entry,
            reprocess_action=args.reprocess_command,
            reprocess_info=reprocess_request)

    except ProcessingManagerDoesNotExist:
        entry = MediaEntry.query.filter_by(id=media_id).first()
        print 'No such processing manager for {0}'.format(entry.media_type)
def processing_cleanup(entry_id):
    _log.debug('Entered processing_cleanup')
    entry, manager = get_entry_and_processing_manager(entry_id)
    with CommonVideoProcessor(manager, entry) as processor:
        # no need to specify a resolution here
        processor.common_setup()
        processor.copy_original()
        processor.keep_best()
        processor.delete_queue_file()
    _log.debug('Deleted queue_file')
Exemple #4
0
def initial(args):
    """
    Reprocess all failed media
    """
    query = MediaEntry.query.filter_by(state='failed')

    for entry in query:
        try:
            media_entry, manager = get_entry_and_processing_manager(entry.id)
            run_process_media(media_entry, reprocess_action='initial')
        except ProcessingManagerDoesNotExist:
            print 'No such processing manager for {0}'.format(entry.media_type)
def complementary_task(entry_id, resolution, medium_size, **process_info):
    """
    Side celery task to transcode the video to other resolutions
    """
    entry, manager = get_entry_and_processing_manager(entry_id)
    with CommonVideoProcessor(manager, entry) as processor:
        processor.common_setup(resolution)
        processor.transcode(medium_size=tuple(medium_size),
                            vp8_quality=process_info['vp8_quality'],
                            vp8_threads=process_info['vp8_threads'],
                            vorbis_quality=process_info['vorbis_quality'])
    _log.info('MediaEntry ID {} is transcoded to {}'.format(
        entry.id, medium_size))
def initial(args):
    """
    Reprocess all failed media
    """
    query = MediaEntry.query.filter_by(state='failed')

    for entry in query:
        try:
            media_entry, manager = get_entry_and_processing_manager(entry.id)
            run_process_media(
                media_entry,
                reprocess_action='initial')
        except ProcessingManagerDoesNotExist:
            print 'No such processing manager for {0}'.format(entry.media_type)
Exemple #7
0
def thumbs(args):
    """
    Regenerate thumbs for all processed media
    """
    query = MediaEntry.query.filter_by(state='processed')

    for entry in query:
        try:
            media_entry, manager = get_entry_and_processing_manager(entry.id)

            # TODO: (maybe?) This could probably be handled entirely by the
            # processor class...
            try:
                processor_class = manager.get_processor(
                    'resize', media_entry)
            except ProcessorDoesNotExist:
                print('No such processor "{}" for media with id "{}"'.format(
                    'resize', media_entry.id))
                return
            except ProcessorNotEligible:
                print('Processor "{}" exists but media "{}" is not eligible'.format(
                    'resize', media_entry.id))
                return

            reprocess_parser = processor_class.generate_parser()

            # prepare filetype and size to be passed into reprocess_parser
            if args.size:
                extra_args = 'thumb --{} {} {}'.format(
                    processor_class.thumb_size,
                    args.size[0],
                    args.size[1])
            else:
                extra_args = 'thumb'

            reprocess_args = reprocess_parser.parse_args(extra_args.split())
            reprocess_request = processor_class.args_to_request(reprocess_args)
            run_process_media(
                media_entry,
                reprocess_action='resize',
                reprocess_info=reprocess_request)

        except ProcessingManagerDoesNotExist:
            print('No such processing manager for {}'.format(entry.media_type))
def thumbs(args):
    """
    Regenerate thumbs for all processed media
    """
    query = MediaEntry.query.filter_by(state='processed')

    for entry in query:
        try:
            media_entry, manager = get_entry_and_processing_manager(entry.id)

            # TODO: (maybe?) This could probably be handled entirely by the
            # processor class...
            try:
                processor_class = manager.get_processor(
                    'resize', media_entry)
            except ProcessorDoesNotExist:
                print 'No such processor "%s" for media with id "%s"' % (
                    'resize', media_entry.id)
                return
            except ProcessorNotEligible:
                print 'Processor "%s" exists but media "%s" is not eligible' % (
                    'resize', media_entry.id)
                return

            reprocess_parser = processor_class.generate_parser()

            # prepare filetype and size to be passed into reprocess_parser
            if args.size:
                extra_args = 'thumb --{0} {1} {2}'.format(
                    processor_class.thumb_size,
                    args.size[0],
                    args.size[1])
            else:
                extra_args = 'thumb'

            reprocess_args = reprocess_parser.parse_args(extra_args.split())
            reprocess_request = processor_class.args_to_request(reprocess_args)
            run_process_media(
                media_entry,
                reprocess_action='resize',
                reprocess_info=reprocess_request)

        except ProcessingManagerDoesNotExist:
            print 'No such processing manager for {0}'.format(entry.media_type)
def main_task(entry_id, resolution, medium_size, **process_info):
    """
    Main celery task to transcode the video to the default resolution
    and store original video metadata.
    """
    _log.debug('MediaEntry processing')
    entry, manager = get_entry_and_processing_manager(entry_id)
    with CommonVideoProcessor(manager, entry) as processor:
        processor.common_setup(resolution)
        processor.transcode(medium_size=tuple(medium_size),
                            vp8_quality=process_info['vp8_quality'],
                            vp8_threads=process_info['vp8_threads'],
                            vorbis_quality=process_info['vorbis_quality'])
        processor.generate_thumb(thumb_size=process_info['thumb_size'])
        processor.store_orig_metadata()
    # Make state of entry as processed
    entry.state = 'processed'
    entry.save()
    _log.info('MediaEntry ID {} is processed (transcoded to default'
              ' resolution): {}'.format(entry.id, medium_size))
    _log.debug('MediaEntry processed')
Exemple #10
0
def available(args):
    # Get the media type, either by looking up media id, or by specific type
    try:
        media_id = int(args.id_or_type)
        media_entry, manager = get_entry_and_processing_manager(media_id)
        media_type = media_entry.media_type
    except ValueError:
        media_type = args.id_or_type
        media_entry = None
        manager = get_processing_manager_for_type(media_type)
    except ProcessingManagerDoesNotExist:
        entry = MediaEntry.query.filter_by(id=args.id_or_type).first()
        print('No such processing manager for {}'.format(entry.media_type))

    if args.state:
        processors = manager.list_all_processors_by_state(args.state)
    elif media_entry is None:
        processors = manager.list_all_processors()
    else:
        processors = manager.list_eligible_processors(media_entry)

    print("Available processors:")
    print("=====================")
    print("")

    if args.action_help:
        for processor in processors:
            print(processor.name)
            print("-" * len(processor.name))

            parser = processor.generate_parser()
            parser.print_help()
            print("")

    else:
        for processor in processors:
            if processor.description:
                print(" - {}: {}".format(processor.name, processor.description))
            else:
                print(" - %s" % processor.name)
def available(args):
    # Get the media type, either by looking up media id, or by specific type
    try:
        media_id = int(args.id_or_type)
        media_entry, manager = get_entry_and_processing_manager(media_id)
        media_type = media_entry.media_type
    except ValueError:
        media_type = args.id_or_type
        media_entry = None
        manager = get_processing_manager_for_type(media_type)
    except ProcessingManagerDoesNotExist:
        entry = MediaEntry.query.filter_by(id=args.id_or_type).first()
        print 'No such processing manager for {0}'.format(entry.media_type)

    if args.state:
        processors = manager.list_all_processors_by_state(args.state)
    elif media_entry is None:
        processors = manager.list_all_processors()
    else:
        processors = manager.list_eligible_processors(media_entry)

    print "Available processors:"
    print "====================="
    print ""

    if args.action_help:
        for processor in processors:
            print processor.name
            print "-" * len(processor.name)

            parser = processor.generate_parser()
            parser.print_help()
            print ""

    else:
        for processor in processors:
            if processor.description:
                print " - %s: %s" % (processor.name, processor.description)
            else:
                print " - %s" % processor.name
Exemple #12
0
def run_process_media(entry,
                      feed_url=None,
                      reprocess_action="initial",
                      reprocess_info=None):
    """Process the media asynchronously

    :param entry: MediaEntry() instance to be processed.
    :param feed_url: A string indicating the feed_url that the PuSH servers
        should be notified of. This will be sth like: `request.urlgen(
            'mediagoblin.user_pages.atom_feed',qualified=True,
            user=request.user.username)`
    :param reprocess_action: What particular action should be run.
    :param reprocess_info: A dict containing all of the necessary reprocessing
        info for the given media_type"""

    entry, manager = get_entry_and_processing_manager(entry.id)

    try:
        wf = manager.workflow(entry, feed_url, reprocess_action,
                              reprocess_info)
        if wf is None:
            ProcessMedia().apply_async(
                [entry.id, feed_url, reprocess_action, reprocess_info], {},
                task_id=entry.queued_task_id)
        else:
            chord(wf[0])(wf[1])
    except BaseException as exc:
        # The purpose of this section is because when running in "lazy"
        # or always-eager-with-exceptions-propagated celery mode that
        # the failure handling won't happen on Celery end.  Since we
        # expect a lot of users to run things in this way we have to
        # capture stuff here.
        #
        # ... not completely the diaper pattern because the
        # exception is re-raised :)
        mark_entry_failed(entry.id, exc)
        # re-raise the exception
        raise
Exemple #13
0
    def run(self, media_id, feed_url, reprocess_action, reprocess_info=None):
        """
        Pass the media entry off to the appropriate processing function
        (for now just process_image...)

        :param media_id: MediaEntry().id
        :param feed_url: The feed URL that the PuSH server needs to be
            updated for.
        :param reprocess_action: What particular action should be run. For
            example, 'initial'.
        :param reprocess: A dict containing all of the necessary reprocessing
            info for the media_type.
        """
        reprocess_info = reprocess_info or {}
        entry, manager = get_entry_and_processing_manager(media_id)

        # Try to process, and handle expected errors.
        try:
            processor_class = manager.get_processor(reprocess_action, entry)

            with processor_class(manager, entry) as processor:
                # Initial state change has to be here because
                # the entry.state gets recorded on processor_class init
                entry.state = u'processing'
                entry.save()

                _log.debug('Processing {0}'.format(entry))

                try:
                    processor.process(**reprocess_info)
                except Exception as exc:
                    if processor.entry_orig_state == 'processed':
                        _log.error(
                            'Entry {0} failed to process due to the following'
                            ' error: {1}'.format(entry.id, exc))
                        _log.info(
                            'Setting entry.state back to "processed"')
                        pass
                    else:
                        raise

            # We set the state to processed and save the entry here so there's
            # no need to save at the end of the processing stage, probably ;)
            entry.state = u'processed'
            entry.save()

            # Notify the PuSH servers as async task
            if mgg.app_config["push_urls"] and feed_url:
                handle_push_urls.subtask().delay(feed_url)

            json_processing_callback(entry)
        except BaseProcessingFail as exc:
            mark_entry_failed(entry.id, exc)
            json_processing_callback(entry)
            return

        except ImportError as exc:
            _log.error(
                'Entry {0} failed to process due to an import error: {1}'\
                    .format(
                    entry.title,
                    exc))

            mark_entry_failed(entry.id, exc)
            json_processing_callback(entry)

        except Exception as exc:
            _log.error('An unhandled exception was raised while'
                    + ' processing {0}'.format(
                        entry))

            mark_entry_failed(entry.id, exc)
            json_processing_callback(entry)
            raise
Exemple #14
0
    def run(self, media_id, feed_url, reprocess_action, reprocess_info=None):
        """
        Pass the media entry off to the appropriate processing function
        (for now just process_image...)

        :param feed_url: The feed URL that the PuSH server needs to be
            updated for.
        :param reprocess: A dict containing all of the necessary reprocessing
            info for the media_type.
        """
        reprocess_info = reprocess_info or {}
        entry, manager = get_entry_and_processing_manager(media_id)

        # Try to process, and handle expected errors.
        try:
            processor_class = manager.get_processor(reprocess_action, entry)

            with processor_class(manager, entry) as processor:
                # Initial state change has to be here because
                # the entry.state gets recorded on processor_class init
                entry.state = u'processing'
                entry.save()

                _log.debug('Processing {0}'.format(entry))

                try:
                    processor.process(**reprocess_info)
                except Exception as exc:
                    if processor.entry_orig_state == 'processed':
                        _log.error(
                            'Entry {0} failed to process due to the following'
                            ' error: {1}'.format(entry.id, exc))
                        _log.info(
                            'Setting entry.state back to "processed"')
                        pass
                    else:
                        raise

            # We set the state to processed and save the entry here so there's
            # no need to save at the end of the processing stage, probably ;)
            entry.state = u'processed'
            entry.save()

            # Notify the PuSH servers as async task
            if mgg.app_config["push_urls"] and feed_url:
                handle_push_urls.subtask().delay(feed_url)

            json_processing_callback(entry)
        except BaseProcessingFail as exc:
            mark_entry_failed(entry.id, exc)
            json_processing_callback(entry)
            return

        except ImportError as exc:
            _log.error(
                'Entry {0} failed to process due to an import error: {1}'\
                    .format(
                    entry.title,
                    exc))

            mark_entry_failed(entry.id, exc)
            json_processing_callback(entry)

        except Exception as exc:
            _log.error('An unhandled exception was raised while'
                    + ' processing {0}'.format(
                        entry))

            mark_entry_failed(entry.id, exc)
            json_processing_callback(entry)
            raise