def _try_cleanup_uploads(self): """ Performs garbage collection on the blobupload table. Will also perform garbage collection on the uploads folder in the S3 bucket, if applicable. """ try: with GlobalLock("BLOB_CLEANUP", lock_ttl=LOCK_TTL): self._cleanup_uploads() if app.config.get("CLEAN_BLOB_UPLOAD_FOLDER", False): self._try_clean_partial_uploads() except LockNotAcquiredException: logger.debug( "Could not acquire global lock for blob upload cleanup worker")
def _index_recent_manifests_in_scanner(self): batch_size = app.config.get("SECURITY_SCANNER_V4_RECENT_MANIFEST_BATCH_SIZE", 1000) if not app.config.get("SECURITY_SCANNER_V4_SKIP_RECENT_MANIFEST_BATCH_LOCK", False): try: with GlobalLock( "SECURITYWORKER_INDEX_RECENT_MANIFEST", lock_ttl=300, auto_renewal=True ): self._model.perform_indexing_recent_manifests(batch_size) except LockNotAcquiredException: logger.warning( "Could not acquire global lock for recent manifest indexing. Skipping" ) else: self._model.perform_indexing_recent_manifests(batch_size)
def _garbage_collection_repos(self, skip_lock_for_testing=False): """ Performs garbage collection on repositories. """ with UseThenDisconnect(app.config): policy = get_random_gc_policy() if policy is None: logger.debug("No GC policies found") return repo_ref = registry_model.find_repository_with_garbage(policy) if repo_ref is None: logger.debug("No repository with garbage found") return assert features.GARBAGE_COLLECTION try: with GlobalLock( "REPO_GARBAGE_COLLECTION_%s" % repo_ref.id, lock_ttl=REPOSITORY_GC_TIMEOUT + LOCK_TIMEOUT_PADDING, ) if not skip_lock_for_testing else empty_context(): try: repository = Repository.get(id=repo_ref.id) except Repository.DoesNotExist: return logger.debug( "Starting GC of repository #%s (%s)", repository.id, repository.name ) garbage_collect_repo(repository) logger.debug( "Finished GC of repository #%s (%s)", repository.id, repository.name ) gc_iterations.inc() except LockNotAcquiredException: logger.debug("Could not acquire repo lock for garbage collection")
utilizing this method will enforce a 1:1 quay worker to gunicorn worker ratio. """ gc_worker = NamespaceGCWorker( namespace_gc_queue, poll_period_seconds=POLL_PERIOD_SECONDS, reservation_seconds=NAMESPACE_GC_TIMEOUT, ) worker = GunicornWorker(__name__, app, gc_worker, features.NAMESPACE_GARBAGE_COLLECTION) return worker if __name__ == "__main__": logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False) if not features.NAMESPACE_GARBAGE_COLLECTION: logger.debug("Namespace garbage collection is disabled; skipping") while True: time.sleep(100000) GlobalLock.configure(app.config) logger.debug("Starting namespace GC worker") worker = NamespaceGCWorker( namespace_gc_queue, poll_period_seconds=POLL_PERIOD_SECONDS, reservation_seconds=NAMESPACE_GC_TIMEOUT, ) worker.start()