def get_all_tags(skopeo, mirror): verbose_logs = os.getenv("DEBUGLOG", "false").lower() == "true" username = ( mirror.external_registry_username.decrypt() if mirror.external_registry_username else None ) password = ( mirror.external_registry_password.decrypt() if mirror.external_registry_password else None ) with database.CloseForLongOperation(app.config): result = skopeo.tags( "docker://%s" % (mirror.external_reference), mirror.root_rule.rule_value, username=username, password=password, verbose_logs=verbose_logs, verify_tls=mirror.external_registry_config.get("verify_tls", True), proxy=mirror.external_registry_config.get("proxy", {}), ) if not result.success: raise RepoMirrorSkopeoException( "skopeo inspect failed: %s" % _skopeo_inspect_failure(result), result.stdout, result.stderr, ) return result.tags
def _work_checker(self): while self._current_status == AnsibleServerStatus.RUNNING: with database.CloseForLongOperation(app.config): yield From(trollius.sleep(WORK_CHECK_TIMEOUT)) processing_time = 30 # seconds job_item = None try: job_item = self._queue.get( processing_time=processing_time, ordering_required=True ) except Exception as ex: # Case when database is uninitialized get a "programming error" in peewee logger.debug("Likely database not initialized") continue if job_item is None: logger.debug( "No additional work found. Going to sleep for %s seconds", WORK_CHECK_TIMEOUT, ) continue logger.debug("Processing: %s", job_item) resource = json.loads(job_item.body) resource["work_queue"] = False result, status = getattr( sys.modules["routes." + resource["task"]], "process_resources" )([resource]) if status == 200: logger.debug("Processing complete: %s", result) self._queue.complete(job_item) else: logger.debug("Processing incomplete: %s", result) self._queue.incomplete(job_item, retry_after=WORK_CHECK_TIMEOUT) continue
def download_blob(namespace_name, repo_name, digest): # Find the blob. blob = registry_model.get_cached_repo_blob(model_cache, namespace_name, repo_name, digest) if blob is None: raise BlobUnknown() # Build the response headers. headers = {"Docker-Content-Digest": digest} # If our storage supports range requests, let the client know. if storage.get_supports_resumable_downloads(blob.placements): headers["Accept-Ranges"] = "bytes" image_pulled_bytes.labels("v2").inc(blob.compressed_size) # Short-circuit by redirecting if the storage supports it. path = blob.storage_path logger.debug("Looking up the direct download URL for path: %s", path) direct_download_url = storage.get_direct_download_url(blob.placements, path, get_request_ip()) if direct_download_url: logger.debug("Returning direct download URL") resp = redirect(direct_download_url) resp.headers.extend(headers) return resp # Close the database connection before we stream the download. logger.debug("Closing database connection before streaming layer data") headers.update( { "Content-Length": blob.compressed_size, "Content-Type": BLOB_CONTENT_TYPE, } ) with database.CloseForLongOperation(app.config): # Stream the response to the client. return Response( storage.stream_read(blob.placements, path), headers=headers, )
def download_blob(namespace_name, repo_name, digest): # Find the blob. blob = registry_model.get_cached_repo_blob(model_cache, namespace_name, repo_name, digest) if blob is None: raise BlobUnknown() # Build the response headers. headers = {'Docker-Content-Digest': digest} # If our storage supports range requests, let the client know. if storage.get_supports_resumable_downloads(blob.placements): headers['Accept-Ranges'] = 'bytes' metric_queue.pull_byte_count.Inc(blob.compressed_size, labelvalues=['v2']) # Short-circuit by redirecting if the storage supports it. path = blob.storage_path logger.debug('Looking up the direct download URL for path: %s', path) direct_download_url = storage.get_direct_download_url( blob.placements, path, get_request_ip()) if direct_download_url: logger.debug('Returning direct download URL') resp = redirect(direct_download_url) resp.headers.extend(headers) return resp # Close the database connection before we stream the download. logger.debug('Closing database connection before streaming layer data') with database.CloseForLongOperation(app.config): # Stream the response to the client. return Response( storage.stream_read(blob.placements, path), headers=headers.update({ 'Content-Length': blob.compressed_size, 'Content-Type': BLOB_CONTENT_TYPE, }), )
def _work_checker(self): logger.debug("Initializing work checker") while True: logger.debug("Writing queue metrics") self._queue.update_metrics() with database.CloseForLongOperation(app.config): time.sleep(WORK_CHECK_TIMEOUT) logger.debug("Checking for more work from the build queue") processing_time = EPHEMERAL_SETUP_TIMEOUT + SETUP_LEEWAY_SECONDS job_item = self._queue.get(processing_time=processing_time, ordering_required=True) if job_item is None: logger.debug( "No additional work found. Going to sleep for %s seconds", WORK_CHECK_TIMEOUT ) continue try: build_job = BuildJob(job_item) except BuildJobLoadException as bjle: logger.error( "BuildJobLoadException. Job data: %s. No retry restore. - %s", job_item.body, bjle, ) self._queue.incomplete(job_item, restore_retry=False) continue build_id = build_job.build_uuid job_id = self._job_key(build_id) try: logger.debug("Creating build job for build %s", build_id) self.create_job(build_id, {"job_queue_item": build_job.job_item}) except BuildJobAlreadyExistsError: logger.warning( "Attempted to create job %s that already exists. Cleaning up existing job and returning it to the queue.", job_id, ) self.job_unschedulable(job_id) self._queue.incomplete(job_item, restore_retry=True) continue except BuildJobError as je: logger.error("Create job exception. Build %s - %s", build_id, je) self._queue.incomplete(job_item, restore_retry=True) continue try: logger.debug("Scheduling build job %s", job_id) schedule_success, retry_timeout = self.schedule(build_id) except Exception as se: logger.exception("Exception when scheduling job %s: %s", build_job.build_uuid, se) self._queue.incomplete(job_item, restore_retry=True, retry_after=WORK_CHECK_TIMEOUT) continue if schedule_success: logger.debug("Build job %s scheduled.", job_id) else: logger.warning( "Unsuccessful schedule. Build ID: %s. Retry restored.", build_job.repo_build.uuid, ) self.job_unschedulable(job_id) self._queue.incomplete(job_item, restore_retry=True, retry_after=retry_timeout)
async def _work_checker(self): logger.debug("Initializing work checker") while self._current_status == BuildServerStatus.RUNNING: with database.CloseForLongOperation(app.config): await asyncio.sleep(WORK_CHECK_TIMEOUT) logger.debug( "Checking for more work for %d active workers", self._lifecycle_manager.num_workers(), ) processing_time = self._lifecycle_manager.overall_setup_time() + SETUP_LEEWAY_SECONDS job_item = self._queue.get(processing_time=processing_time, ordering_required=True) if job_item is None: logger.debug( "No additional work found. Going to sleep for %s seconds", WORK_CHECK_TIMEOUT ) continue try: build_job = BuildJob(job_item) except BuildJobLoadException as irbe: logger.warning( "[BUILD INCOMPLETE: job load exception] Job data: %s. No retry restore.", job_item.body, ) logger.exception(irbe) self._queue.incomplete(job_item, restore_retry=False) continue logger.debug( "Checking for an avaliable worker for build job %s", build_job.repo_build.uuid ) try: schedule_success, retry_timeout = await self._lifecycle_manager.schedule(build_job) except: logger.warning( "[BUILD INCOMPLETE: scheduling] Build ID: %s. Retry restored.", build_job.repo_build.uuid, ) logger.exception("Exception when scheduling job: %s", build_job.repo_build.uuid) self._current_status = BuildServerStatus.EXCEPTION self._queue.incomplete(job_item, restore_retry=True, retry_after=WORK_CHECK_TIMEOUT) return if schedule_success: logger.debug("Marking build %s as scheduled", build_job.repo_build.uuid) status_handler = StatusHandler(self._build_logs, build_job.repo_build.uuid) await status_handler.set_phase(database.BUILD_PHASE.BUILD_SCHEDULED) self._job_count = self._job_count + 1 logger.debug( "Build job %s scheduled. Running: %s", build_job.repo_build.uuid, self._job_count, ) else: logger.warning( "[BUILD INCOMPLETE: no schedule] Build ID: %s. Retry restored.", build_job.repo_build.uuid, ) logger.debug( "All workers are busy for job %s Requeuing after %s seconds.", build_job.repo_build.uuid, retry_timeout, ) self._queue.incomplete(job_item, restore_retry=True, retry_after=retry_timeout)
def perform_mirror(skopeo, mirror): """ Run mirror on all matching tags of remote repository. """ if os.getenv("DEBUGLOG", "false").lower() == "true": verbose_logs = True else: verbose_logs = False mirror = claim_mirror(mirror) if mirror == None: raise PreemptedException emit_log( mirror, "repo_mirror_sync_started", "start", "'%s' with tag pattern '%s'" % (mirror.external_reference, ",".join(mirror.root_rule.rule_value)), ) # Fetch the tags to mirror, being careful to handle exceptions. The 'Exception' is safety net only, allowing # easy communication by user through bug report. tags = [] try: tags = tags_to_mirror(skopeo, mirror) except RepoMirrorSkopeoException as e: emit_log( mirror, "repo_mirror_sync_failed", "end", "'%s' with tag pattern '%s': %s" % (mirror.external_reference, ",".join(mirror.root_rule.rule_value), str(e)), tags=", ".join(tags), stdout=e.stdout, stderr=e.stderr, ) release_mirror(mirror, RepoMirrorStatus.FAIL) return except Exception as e: emit_log( mirror, "repo_mirror_sync_failed", "end", "'%s' with tag pattern '%s': INTERNAL ERROR" % (mirror.external_reference, ",".join(mirror.root_rule.rule_value)), tags=", ".join(tags), stdout="Not applicable", stderr=traceback.format_exc(e), ) release_mirror(mirror, RepoMirrorStatus.FAIL) return if tags == []: emit_log( mirror, "repo_mirror_sync_success", "end", "'%s' with tag pattern '%s'" % (mirror.external_reference, ",".join(mirror.root_rule.rule_value)), tags="No tags matched", ) release_mirror(mirror, RepoMirrorStatus.SUCCESS) return # Sync tags now_ms = database.get_epoch_timestamp_ms() overall_status = RepoMirrorStatus.SUCCESS try: delete_obsolete_tags(mirror, tags) try: username = ( mirror.external_registry_username.decrypt() if mirror.external_registry_username else None ) password = ( mirror.external_registry_password.decrypt() if mirror.external_registry_password else None ) except DecryptionFailureException: logger.exception( "Failed to decrypt username or password for mirroring %s", mirror.repository ) raise dest_server = ( app.config.get("REPO_MIRROR_SERVER_HOSTNAME", None) or app.config["SERVER_HOSTNAME"] ) for tag in tags: src_image = "docker://%s:%s" % (mirror.external_reference, tag) dest_image = "docker://%s/%s/%s:%s" % ( dest_server, mirror.repository.namespace_user.username, mirror.repository.name, tag, ) with database.CloseForLongOperation(app.config): result = skopeo.copy( src_image, dest_image, src_tls_verify=mirror.external_registry_config.get("verify_tls", True), dest_tls_verify=app.config.get( "REPO_MIRROR_TLS_VERIFY", True ), # TODO: is this a config choice or something else? src_username=username, src_password=password, dest_username=mirror.internal_robot.username, dest_password=retrieve_robot_token(mirror.internal_robot), proxy=mirror.external_registry_config.get("proxy", {}), verbose_logs=verbose_logs, ) if not result.success: overall_status = RepoMirrorStatus.FAIL emit_log( mirror, "repo_mirror_sync_tag_failed", "finish", "Source '%s' failed to sync" % src_image, tag=tag, stdout=result.stdout, stderr=result.stderr, ) logger.info("Source '%s' failed to sync." % src_image) else: emit_log( mirror, "repo_mirror_sync_tag_success", "finish", "Source '%s' successful sync" % src_image, tag=tag, stdout=result.stdout, stderr=result.stderr, ) logger.info("Source '%s' successful sync." % src_image) mirror = claim_mirror(mirror) if mirror is None: emit_log( mirror, "repo_mirror_sync_failed", "lost", "'%s' with tag pattern '%s'" % (mirror.external_reference, ",".join(mirror.root_rule.rule_value)), ) except Exception as e: overall_status = RepoMirrorStatus.FAIL emit_log( mirror, "repo_mirror_sync_failed", "end", "'%s' with tag pattern '%s': INTERNAL ERROR" % (mirror.external_reference, ",".join(mirror.root_rule.rule_value)), tags=", ".join(tags), stdout="Not applicable", stderr=traceback.format_exc(e), ) release_mirror(mirror, overall_status) return finally: if overall_status == RepoMirrorStatus.FAIL: emit_log( mirror, "repo_mirror_sync_failed", "lost", "'%s' with tag pattern '%s'" % (mirror.external_reference, ",".join(mirror.root_rule.rule_value)), ) rollback(mirror, now_ms) else: emit_log( mirror, "repo_mirror_sync_success", "end", "'%s' with tag pattern '%s'" % (mirror.external_reference, ",".join(mirror.root_rule.rule_value)), tags=", ".join(tags), ) release_mirror(mirror, overall_status) return overall_status