def model(self): ErrorLogModel = apps.get_model(app_label='logging', model_name='ErrorLog') lock_id = 'logging-get-or-create-errorlogmodel-{}'.format( self.app_config.name) try: logger.debug('trying to acquire lock: %s', lock_id) lock = LockingBackend.get_instance().acquire_lock(lock_id) logger.debug('acquired lock: %s', lock_id) except LockError: logger.debug('unable to obtain lock: %s' % lock_id) raise else: try: model, created = ErrorLogModel.objects.get_or_create( name=self.app_config.name) except ErrorLogModel.MultipleObjectsReturned: # Self heal previously repeated entries ErrorLogModel.objects.filter( name=self.app_config.name).delete() model, created = ErrorLogModel.objects.get_or_create( name=self.app_config.name) else: return model finally: lock.release()
def generate_image(self, user=None, **kwargs): transformation_list = self.get_combined_transformation_list(user=user, **kwargs) combined_cache_filename = BaseTransformation.combine(transformation_list) # Check is transformed image is available logger.debug('transformations cache filename: %s', combined_cache_filename) try: lock = LockingBackend.get_instance().acquire_lock( name='document_page_generate_image_{}_{}'.format( self.pk, combined_cache_filename ) ) except Exception: raise else: # Second try block to release the lock even on fatal errors inside # the block. try: if self.cache_partition.get_file(filename=combined_cache_filename): logger.debug( 'transformations cache file "%s" found', combined_cache_filename ) else: logger.debug( 'transformations cache file "%s" not found', combined_cache_filename ) image = self.get_image(transformations=transformation_list) with self.cache_partition.create_file(filename=combined_cache_filename) as file_object: file_object.write(image.getvalue()) return combined_cache_filename finally: lock.release()
def task_check_expired_check_outs(): DocumentCheckout = apps.get_model(app_label='checkouts', model_name='DocumentCheckout') logger.debug(msg='executing...') lock_id = 'task_expired_check_outs' try: logger.debug('trying to acquire lock: %s', lock_id) lock = LockingBackend.get_instance().acquire_lock( name=lock_id, timeout=CHECKOUT_EXPIRATION_LOCK_EXPIRE) logger.debug('acquired lock: %s', lock_id) DocumentCheckout.objects.check_in_expired_check_outs() lock.release() except LockError: logger.debug(msg='unable to obtain lock')
def index_instance(self, instance, exclude_set=None): try: lock = LockingBackend.get_instance().acquire_lock( name='dynamic_search_whoosh_index_instance' ) except LockError: raise else: try: # Use a shadow method to allow using a single lock for # all recursions. self._index_instance( instance=instance, exclude_set=exclude_set ) finally: lock.release()
def deindex_instance(self, instance): try: lock = LockingBackend.get_instance().acquire_lock( name='dynamic_search_whoosh_deindex_instance' ) except LockError: raise else: try: search_model = SearchModel.get_for_model(instance=instance) index = self.get_index(search_model=search_model) writer = index.writer() writer.delete_by_term('id', str(instance.pk)) writer.commit() finally: lock.release()
def remove_document(self, document): """ The argument `acquire_lock` controls whether or not this method acquires or lock. The case for this is to acquire when called directly or not to acquire when called as part of a larger index process that already has a lock """ # Prevent another process to work on this node. We use the node's # parent template node for the lock try: lock = LockingBackend.get_instance().acquire_lock( self.index_template_node.get_lock_string()) except LockError: raise else: try: self.documents.remove(document) finally: lock.release()
def create_file(self, filename): lock_id = 'cache_partition-create_file-{}-{}'.format(self.pk, filename) try: logger.debug('trying to acquire lock: %s', lock_id) lock = LockingBackend.get_instance().acquire_lock(lock_id) logger.debug('acquired lock: %s', lock_id) try: self.cache.prune() # Since open "wb+" doesn't create files force the creation of an # empty file. self.cache.storage.delete( name=self.get_full_filename(filename=filename) ) self.cache.storage.save( name=self.get_full_filename(filename=filename), content=ContentFile(content='') ) try: with transaction.atomic(): partition_file = self.files.create(filename=filename) yield partition_file.open(mode='wb') except Exception as exception: logger.error( 'Unexpected exception while trying to save new ' 'cache file; %s', exception ) self.cache.storage.delete( name=self.get_full_filename(filename=filename) ) raise finally: partition_file.close() partition_file.update_size() finally: lock.release() except LockError: logger.debug('unable to obtain lock: %s' % lock_id) raise
def task_process_document_version(document_version_id): DocumentVersion = apps.get_model(app_label='documents', model_name='DocumentVersion') document_version = DocumentVersion.objects.get(pk=document_version_id) lock_id = 'task_process_document_version-%d' % document_version_id try: logger.debug('trying to acquire lock: %s', lock_id) # Acquire lock to avoid processing the same document version more # than once concurrently lock = LockingBackend.get_instance().acquire_lock(name=lock_id, timeout=LOCK_EXPIRE) logger.debug('acquired lock: %s', lock_id) except LockError: logger.debug('unable to obtain lock: %s' % lock_id) else: try: FileMetadataDriver.process_document_version( document_version=document_version) finally: lock.release()
def delete_empty(self): """ Method to delete all empty node instances in a recursive manner. """ # Prevent another process to delete this node. try: lock = LockingBackend.get_instance().acquire_lock( self.index_template_node.get_lock_string()) except LockError: raise else: try: if self.get_documents().count() == 0 and self.get_children( ).count() == 0: if not self.is_root_node(): # I'm not a root node, I can be deleted self.delete() if self.parent.is_root_node(): # My parent is not a root node, it can be deleted self.parent.delete_empty() finally: lock.release()
def task_check_interval_source(source_id, test=False): Source = apps.get_model(app_label='sources', model_name='Source') lock_id = 'task_check_interval_source-%d' % source_id try: logger.debug('trying to acquire lock: %s', lock_id) lock = LockingBackend.get_instance().acquire_lock( name=lock_id, timeout=DEFAULT_SOURCE_LOCK_EXPIRE) except LockError: logger.debug('unable to obtain lock: %s' % lock_id) else: logger.debug('acquired lock: %s', lock_id) try: source = Source.objects.get_subclass(pk=source_id) if source.enabled or test: source.check_source(test=test) except Exception as exception: logger.error('Error processing source id: %s; %s', source_id, exception) if settings.DEBUG: raise finally: lock.release()
def index_document(self, document, acquire_lock=True, index_instance_node_parent=None): # Start transaction after the lock in case the locking backend uses # the database. try: if acquire_lock: lock = LockingBackend.get_instance().acquire_lock( self.get_lock_string()) except LockError: raise else: try: logger.debug('IndexTemplateNode; Indexing document: %s', document) if not index_instance_node_parent: # I'm the root with transaction.atomic(): index_instance_root_node = self.get_instance_root_node( ) for child in self.get_children(): child.index_document(document=document, acquire_lock=False, index_instance_node_parent= index_instance_root_node) elif self.enabled: with transaction.atomic(): logger.debug( 'IndexTemplateNode; non parent: evaluating') logger.debug('My parent template is: %s', self.parent) logger.debug('My parent instance node is: %s', index_instance_node_parent) logger.debug( 'IndexTemplateNode; Evaluating template: %s', self.expression) try: template = Template( template_string=self.expression) result = template.render( context={'document': document}) except Exception as exception: logger.debug('Evaluating error: %s', exception) error_message = _( 'Error indexing document: %(document)s; expression: ' '%(expression)s; %(exception)s') % { 'document': document, 'expression': self.expression, 'exception': exception } logger.debug(error_message) else: logger.debug('Evaluation result: %s', result) if result: index_instance_node, created = self.index_instance_nodes.get_or_create( parent=index_instance_node_parent, value=result) if self.link_documents: index_instance_node.documents.add(document) for child in self.get_children(): child.index_document( document=document, acquire_lock=False, index_instance_node_parent= index_instance_node) finally: if acquire_lock: lock.release()