def on_failure(self, exc, task_id, args, kwargs, einfo): """ If the processing failed we should mark that in the database. Assuming that the exception raised is a subclass of BaseProcessingFail, we can use that to get more information about the failure and store that for conveying information to users about the failure, etc. """ entry_id = args[0] mark_entry_failed(entry_id, exc) entry = mgg.database.MediaEntry.query.filter_by(id=entry_id).first() json_processing_callback(entry)
def run(self, media_id, feed_url): """ Pass the media entry off to the appropriate processing function (for now just process_image...) :param feed_url: The feed URL that the PuSH server needs to be updated for. """ entry = MediaEntry.query.get(media_id) # Try to process, and handle expected errors. try: entry.state = u'processing' entry.save() _log.debug('Processing {0}'.format(entry)) proc_state = ProcessingState(entry) with mgg.workbench_manager.create() as workbench: proc_state.set_workbench(workbench) # run the processing code entry.media_manager.processor(proc_state) # We set the state to processed and save the entry here so there's # no need to save at the end of the processing stage, probably ;) entry.state = u'processed' #Generate the slug here rather than earlier when it could have failed. entry.generate_slug() entry.save() # Notify the PuSH servers as async task if mgg.app_config["push_urls"] and feed_url: handle_push_urls.subtask().delay(feed_url) json_processing_callback(entry) except BaseProcessingFail as exc: mark_entry_failed(entry.id, exc) json_processing_callback(entry) return except ImportError as exc: _log.error( 'Entry {0} failed to process due to an import error: {1}'\ .format( entry.title, exc)) mark_entry_failed(entry.id, exc) json_processing_callback(entry) except Exception as exc: _log.error('An unhandled exception was raised while' + ' processing {0}'.format( entry)) mark_entry_failed(entry.id, exc) json_processing_callback(entry) raise
def run(self, media_id, feed_url): """ Pass the media entry off to the appropriate processing function (for now just process_image...) :param feed_url: The feed URL that the PuSH server needs to be updated for. """ entry = MediaEntry.query.get(media_id) # Try to process, and handle expected errors. try: entry.state = u'processing' entry.save() _log.debug('Processing {0}'.format(entry)) proc_state = ProcessingState(entry) with mgg.workbench_manager.create() as workbench: proc_state.set_workbench(workbench) # run the processing code entry.media_manager.processor(proc_state) # We set the state to processed and save the entry here so there's # no need to save at the end of the processing stage, probably ;) entry.state = u'processed' entry.save() # Notify the PuSH servers as async task if mgg.app_config["push_urls"] and feed_url: handle_push_urls.subtask().delay(feed_url) json_processing_callback(entry) except BaseProcessingFail as exc: mark_entry_failed(entry.id, exc) json_processing_callback(entry) return except ImportError as exc: _log.error( 'Entry {0} failed to process due to an import error: {1}'\ .format( entry.title, exc)) mark_entry_failed(entry.id, exc) json_processing_callback(entry) except Exception as exc: _log.error('An unhandled exception was raised while' + ' processing {0}'.format( entry)) mark_entry_failed(entry.id, exc) json_processing_callback(entry) raise
def run(self, media_id): """ Pass the media entry off to the appropriate processing function (for now just process_image...) """ entry = mgg.database.MediaEntry.one( {'_id': ObjectId(media_id)}) # Try to process, and handle expected errors. try: manager = get_media_manager(entry.media_type) entry.state = u'processing' entry.save() _log.debug('Processing {0}'.format(entry)) manager['processor'](entry) entry.state = u'processed' entry.save() json_processing_callback(entry) except BaseProcessingFail as exc: mark_entry_failed(entry._id, exc) json_processing_callback(entry) return except ImportError as exc: _log.error( 'Entry {0} failed to process due to an import error: {1}'\ .format( entry.title, exc)) mark_entry_failed(entry._id, exc) json_processing_callback(entry) except Exception as exc: _log.error('An unhandled exception was raised while' + ' processing {0}'.format( entry)) mark_entry_failed(entry._id, exc) json_processing_callback(entry) raise
def run(self, media_id): """ Pass the media entry off to the appropriate processing function (for now just process_image...) """ entry = MediaEntry.query.get(media_id) # Try to process, and handle expected errors. try: entry.state = u'processing' entry.save() _log.debug('Processing {0}'.format(entry)) # run the processing code entry.media_manager['processor'](entry) # We set the state to processed and save the entry here so there's # no need to save at the end of the processing stage, probably ;) entry.state = u'processed' entry.save() json_processing_callback(entry) except BaseProcessingFail as exc: mark_entry_failed(entry.id, exc) json_processing_callback(entry) return except ImportError as exc: _log.error( 'Entry {0} failed to process due to an import error: {1}'\ .format( entry.title, exc)) mark_entry_failed(entry.id, exc) json_processing_callback(entry) except Exception as exc: _log.error('An unhandled exception was raised while' + ' processing {0}'.format( entry)) mark_entry_failed(entry.id, exc) json_processing_callback(entry) raise
def run(self, media_id, feed_url, reprocess_action, reprocess_info=None): """ Pass the media entry off to the appropriate processing function (for now just process_image...) :param media_id: MediaEntry().id :param feed_url: The feed URL that the PuSH server needs to be updated for. :param reprocess_action: What particular action should be run. For example, 'initial'. :param reprocess: A dict containing all of the necessary reprocessing info for the media_type. """ reprocess_info = reprocess_info or {} entry, manager = get_entry_and_processing_manager(media_id) # Try to process, and handle expected errors. try: processor_class = manager.get_processor(reprocess_action, entry) with processor_class(manager, entry) as processor: # Initial state change has to be here because # the entry.state gets recorded on processor_class init entry.state = u'processing' entry.save() _log.debug('Processing {0}'.format(entry)) try: processor.process(**reprocess_info) except Exception as exc: if processor.entry_orig_state == 'processed': _log.error( 'Entry {0} failed to process due to the following' ' error: {1}'.format(entry.id, exc)) _log.info( 'Setting entry.state back to "processed"') pass else: raise # We set the state to processed and save the entry here so there's # no need to save at the end of the processing stage, probably ;) entry.state = u'processed' entry.save() # Notify the PuSH servers as async task if mgg.app_config["push_urls"] and feed_url: handle_push_urls.subtask().delay(feed_url) json_processing_callback(entry) except BaseProcessingFail as exc: mark_entry_failed(entry.id, exc) json_processing_callback(entry) return except ImportError as exc: _log.error( 'Entry {0} failed to process due to an import error: {1}'\ .format( entry.title, exc)) mark_entry_failed(entry.id, exc) json_processing_callback(entry) except Exception as exc: _log.error('An unhandled exception was raised while' + ' processing {0}'.format( entry)) mark_entry_failed(entry.id, exc) json_processing_callback(entry) raise
def run(self, media_id, feed_url, reprocess_action, reprocess_info=None): """ Pass the media entry off to the appropriate processing function (for now just process_image...) :param feed_url: The feed URL that the PuSH server needs to be updated for. :param reprocess: A dict containing all of the necessary reprocessing info for the media_type. """ reprocess_info = reprocess_info or {} entry, manager = get_entry_and_processing_manager(media_id) # Try to process, and handle expected errors. try: processor_class = manager.get_processor(reprocess_action, entry) with processor_class(manager, entry) as processor: # Initial state change has to be here because # the entry.state gets recorded on processor_class init entry.state = u'processing' entry.save() _log.debug('Processing {0}'.format(entry)) try: processor.process(**reprocess_info) except Exception as exc: if processor.entry_orig_state == 'processed': _log.error( 'Entry {0} failed to process due to the following' ' error: {1}'.format(entry.id, exc)) _log.info( 'Setting entry.state back to "processed"') pass else: raise # We set the state to processed and save the entry here so there's # no need to save at the end of the processing stage, probably ;) entry.state = u'processed' entry.save() # Notify the PuSH servers as async task if mgg.app_config["push_urls"] and feed_url: handle_push_urls.subtask().delay(feed_url) json_processing_callback(entry) except BaseProcessingFail as exc: mark_entry_failed(entry.id, exc) json_processing_callback(entry) return except ImportError as exc: _log.error( 'Entry {0} failed to process due to an import error: {1}'\ .format( entry.title, exc)) mark_entry_failed(entry.id, exc) json_processing_callback(entry) except Exception as exc: _log.error('An unhandled exception was raised while' + ' processing {0}'.format( entry)) mark_entry_failed(entry.id, exc) json_processing_callback(entry) raise