def update_images(namespace_name, repo_name): permission = ModifyRepositoryPermission(namespace_name, repo_name) if permission.can(): logger.debug("Looking up repository") repository_ref = registry_model.lookup_repository(namespace_name, repo_name, kind_filter="image") if repository_ref is None: # Make sure the repo actually exists. image_pushes.labels("v1", 404, "").inc() abort(404, message="Unknown repository", issue="unknown-repo") builder = lookup_manifest_builder(repository_ref, session.get("manifest_builder"), storage, docker_v2_signing_key) if builder is None: image_pushes.labels("v1", 400, "").inc() abort(400) # Generate a job for each notification that has been added to this repo logger.debug("Adding notifications for repository") event_data = { "updated_tags": [tag.name for tag in builder.committed_tags], } builder.done() track_and_log("push_repo", repository_ref) spawn_notification(repository_ref, "repo_push", event_data) image_pushes.labels("v1", 204, "").inc() return make_response("Updated", 204) image_pushes.labels("v1", 403, "").inc() abort(403)
def update_images(namespace_name, repo_name): permission = ModifyRepositoryPermission(namespace_name, repo_name) if permission.can(): logger.debug('Looking up repository') repository_ref = registry_model.lookup_repository(namespace_name, repo_name, kind_filter='image') if repository_ref is None: # Make sure the repo actually exists. abort(404, message='Unknown repository', issue='unknown-repo') builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'), storage, docker_v2_signing_key) if builder is None: abort(400) # Generate a job for each notification that has been added to this repo logger.debug('Adding notifications for repository') event_data = { 'updated_tags': [tag.name for tag in builder.committed_tags], } builder.done() track_and_log('push_repo', repository_ref) spawn_notification(repository_ref, 'repo_push', event_data) metric_queue.repository_push.Inc( labelvalues=[namespace_name, repo_name, 'v1', True]) return make_response('Updated', 204) abort(403)
def put_image_json(namespace, repository, image_id): logger.debug('Checking repo permissions') permission = ModifyRepositoryPermission(namespace, repository) if not permission.can(): abort(403) repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image') if repository_ref is None: abort(403) builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'), store, docker_v2_signing_key) if builder is None: abort(400) logger.debug('Parsing image JSON') try: uploaded_metadata = request.data data = json.loads(uploaded_metadata.decode('utf8')) except ValueError: pass if not data or not isinstance(data, dict): abort(400, 'Invalid JSON for image: %(image_id)s\nJSON: %(json)s', issue='invalid-request', image_id=image_id, json=request.data) if 'id' not in data: abort(400, 'Missing key `id` in JSON for image: %(image_id)s', issue='invalid-request', image_id=image_id) if image_id != data['id']: abort(400, 'JSON data contains invalid id for image: %(image_id)s', issue='invalid-request', image_id=image_id) logger.debug('Looking up repo image') location_pref = store.preferred_locations[0] username = get_authenticated_user() and get_authenticated_user().username layer = builder.start_layer(image_id, uploaded_metadata, location_pref, username, app.config['PUSH_TEMP_TAG_EXPIRATION_SEC']) if layer is None: abort(400, 'Image %(image_id)s has invalid metadata', issue='invalid-request', image_id=image_id) return make_response('true', 200)
def put_image_checksum(namespace, repository, image_id): logger.debug("Checking repo permissions") permission = ModifyRepositoryPermission(namespace, repository) if not permission.can(): abort(403) repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter="image") if repository_ref is None: abort(403) # Docker Version < 0.10 (tarsum+sha): old_checksum = request.headers.get("X-Docker-Checksum") # Docker Version >= 0.10 (sha): new_checksum = request.headers.get("X-Docker-Checksum-Payload") checksum = new_checksum or old_checksum if not checksum: abort( 400, "Missing checksum for image %(image_id)s", issue="missing-checksum", image_id=image_id, ) logger.debug("Checking for image in manifest builder") builder = lookup_manifest_builder(repository_ref, session.get("manifest_builder"), store, docker_v2_signing_key) if builder is None: abort(400) layer = builder.lookup_layer(image_id) if layer is None: abort(404) if old_checksum: builder.save_precomputed_checksum(layer, checksum) return make_response("true", 200) if not builder.validate_layer_checksum(layer, checksum): logger.debug( "put_image_checksum: Wrong checksum. Given: %s and expected: %s", checksum, builder.get_layer_checksums(layer), ) abort( 400, "Checksum mismatch for image: %(image_id)s", issue="checksum-mismatch", image_id=image_id, ) return make_response("true", 200)
def delete_tag(namespace_name, repo_name, tag): permission = ModifyRepositoryPermission(namespace_name, repo_name) repository_ref = registry_model.lookup_repository(namespace_name, repo_name, kind_filter="image") if permission.can() and repository_ref is not None: if not registry_model.delete_tag(repository_ref, tag): abort(404) track_and_log("delete_tag", repository_ref, tag=tag) return make_response("Deleted", 200) abort(403)
def get_repository_build(self, uuid): try: build = model.build.get_repository_build(uuid) except model.InvalidRepositoryBuildException as e: raise InvalidRepositoryBuildException(str(e)) repo_namespace = build.repository_namespace_user_username repo_name = build.repository_name can_read = ReadRepositoryPermission(repo_namespace, repo_name).can() can_write = ModifyRepositoryPermission(repo_namespace, repo_name).can() can_admin = AdministerRepositoryPermission(repo_namespace, repo_name).can() job_config = get_job_config(build.job_config) phase, status, error = _get_build_status(build) url = userfiles.get_file_url(self.resource_key, get_request_ip(), requires_cors=True) return RepositoryBuild( build.uuid, build.logs_archived, repo_namespace, repo_name, can_write, can_read, _create_user(build.pull_robot), build.resource_key, BuildTrigger(build.trigger.uuid, build.trigger.service.name, _create_user(build.trigger.pull_robot), can_read, can_admin, True), build.display_name, build.display_name, build.started, job_config, phase, status, error, url)
def logarchive(file_id): JSON_MIMETYPE = "application/json" try: found_build = model.build.get_repository_build(file_id) except model.InvalidRepositoryBuildException as ex: logger.exception(ex, extra={"build_uuid": file_id}) abort(403) repo = found_build.repository has_permission = ModifyRepositoryPermission(repo.namespace_user.username, repo.name).can() if features.READER_BUILD_LOGS and not has_permission: if ReadRepositoryPermission( repo.namespace_user.username, repo.name).can() or model.repository.repository_is_public( repo.namespace_user.username, repo.name): has_permission = True if not has_permission: abort(403) try: path = log_archive.get_file_id_path(file_id) data_stream = log_archive._storage.stream_read_file( log_archive._locations, path) return send_file(GzipInputStream(data_stream), mimetype=JSON_MIMETYPE) except IOError: logger.exception("Could not read archived logs") abort(403)
def wrapper(namespace_name, repo_name, *args, **kwargs): response = f(namespace_name, repo_name, *args, **kwargs) # Setting session namespace and repository session["namespace"] = namespace_name session["repository"] = repo_name # We run our index and registry on the same hosts for now registry_server = urlparse.urlparse(request.url).netloc response.headers["X-Docker-Endpoints"] = registry_server has_token_request = request.headers.get("X-Docker-Token", "") force_grant = add_grant_for_status == response.status_code if has_token_request or force_grant: grants = [] if scope == GrantType.READ_REPOSITORY: if force_grant or ReadRepositoryPermission(namespace_name, repo_name).can(): grants.append(repository_read_grant(namespace_name, repo_name)) elif scope == GrantType.WRITE_REPOSITORY: if force_grant or ModifyRepositoryPermission(namespace_name, repo_name).can(): grants.append(repository_write_grant(namespace_name, repo_name)) # Generate a signed token for the user (if any) and the grants (if any) if grants or get_authenticated_user(): user_context = get_authenticated_user() and get_authenticated_user().username signature = generate_signed_token(grants, user_context) response.headers["WWW-Authenticate"] = signature response.headers["X-Docker-Token"] = signature return response
def buildlogs(build_uuid): found_build = model.build.get_repository_build(build_uuid) if not found_build: abort(403) repo = found_build.repository has_permission = ModifyRepositoryPermission(repo.namespace_user.username, repo.name).can() if features.READER_BUILD_LOGS and not has_permission: if ReadRepositoryPermission( repo.namespace_user.username, repo.name).can() or model.repository.repository_is_public( repo.namespace_user.username, repo.name): has_permission = True if not has_permission: abort(403) # If the logs have been archived, just return a URL of the completed archive if found_build.logs_archived: return redirect( log_archive.get_file_url(found_build.uuid, get_request_ip())) _, logs = build_logs.get_log_entries(found_build.uuid, 0) response = jsonify({"logs": [log for log in logs]}) response.headers[ "Content-Disposition"] = "attachment;filename=" + found_build.uuid + ".json" return response
def push(namespace, package_name): reponame = repo_name(namespace, package_name) if not REPOSITORY_NAME_REGEX.match(package_name): logger.debug("Found invalid repository name CNR push: %s", reponame) raise InvalidUsage("invalid repository name: %s" % reponame) values = request.get_json(force=True, silent=True) or {} private = values.get("visibility", "private") owner = get_authenticated_user() if not Package.exists(reponame): if not CreateRepositoryPermission(namespace).can(): raise Forbidden( "Unauthorized access for: %s" % reponame, { "package": reponame, "scopes": ["create"] }, ) Package.create_repository(reponame, private, owner) logs_model.log_action("create_repo", namespace, repository_name=package_name) if not ModifyRepositoryPermission(namespace, package_name).can(): raise Forbidden("Unauthorized access for: %s" % reponame, { "package": reponame, "scopes": ["push"] }) if not "release" in values: raise InvalidUsage("Missing release") if not "media_type" in values: raise InvalidUsage("Missing media_type") if not "blob" in values: raise InvalidUsage("Missing blob") release_version = str(values["release"]) media_type = values["media_type"] force = request.args.get("force", "false") == "true" blob = Blob(reponame, values["blob"]) app_release = cnr_registry.push( reponame, release_version, media_type, blob, force, package_class=Package, user=owner, visibility=private, ) logs_model.log_action("push_repo", namespace, repository_name=package_name, metadata={"release": release_version}) return jsonify(app_release)
def _get_tuf_root(repository_ref, namespace, reponame): if not features.SIGNING or repository_ref is None or not repository_ref.trust_enabled: return DISABLED_TUF_ROOT # Users with write access to a repository will see signer-rooted TUF metadata if ModifyRepositoryPermission(namespace, reponame).can(): return SIGNER_TUF_ROOT return QUAY_TUF_ROOT
def build_status_view(build_obj): phase, status, error = _get_build_status(build_obj) repo_namespace = build_obj.repository.namespace_user.username repo_name = build_obj.repository.name can_read = ReadRepositoryPermission(repo_namespace, repo_name).can() can_write = ModifyRepositoryPermission(repo_namespace, repo_name).can() can_admin = AdministerRepositoryPermission(repo_namespace, repo_name).can() job_config = get_job_config(build_obj) resp = { "id": build_obj.uuid, "phase": phase, "started": format_date(build_obj.started), "display_name": build_obj.display_name, "status": status or {}, "subdirectory": job_config.get("build_subdir", ""), "dockerfile_path": job_config.get("build_subdir", ""), "context": job_config.get("context", ""), "tags": job_config.get("docker_tags", []), "manual_user": job_config.get("manual_user", None), "is_writer": can_write, "trigger": trigger_view(build_obj.trigger, can_read, can_admin, for_build=True), "trigger_metadata": job_config.get("trigger_metadata", None) if can_read else None, "resource_key": build_obj.resource_key, "pull_robot": user_view(build_obj.pull_robot) if build_obj.pull_robot else None, "repository": { "namespace": repo_namespace, "name": repo_name }, "error": error, } if can_write or features.READER_BUILD_LOGS: if build_obj.resource_key is not None: resp["archive_url"] = user_files.get_file_url( build_obj.resource_key, get_request_ip(), requires_cors=True) elif job_config.get("archive_url", None): resp["archive_url"] = job_config["archive_url"] return resp
def build_status_view(build_obj): phase, status, error = _get_build_status(build_obj) repo_namespace = build_obj.repository.namespace_user.username repo_name = build_obj.repository.name can_read = ReadRepositoryPermission(repo_namespace, repo_name).can() can_write = ModifyRepositoryPermission(repo_namespace, repo_name).can() can_admin = AdministerRepositoryPermission(repo_namespace, repo_name).can() job_config = get_job_config(build_obj) resp = { 'id': build_obj.uuid, 'phase': phase, 'started': format_date(build_obj.started), 'display_name': build_obj.display_name, 'status': status or {}, 'subdirectory': job_config.get('build_subdir', ''), 'dockerfile_path': job_config.get('build_subdir', ''), 'context': job_config.get('context', ''), 'tags': job_config.get('docker_tags', []), 'manual_user': job_config.get('manual_user', None), 'is_writer': can_write, 'trigger': trigger_view(build_obj.trigger, can_read, can_admin, for_build=True), 'trigger_metadata': job_config.get('trigger_metadata', None) if can_read else None, 'resource_key': build_obj.resource_key, 'pull_robot': user_view(build_obj.pull_robot) if build_obj.pull_robot else None, 'repository': { 'namespace': repo_namespace, 'name': repo_name }, 'error': error, } if can_write or features.READER_BUILD_LOGS: if build_obj.resource_key is not None: resp['archive_url'] = user_files.get_file_url( build_obj.resource_key, get_request_ip(), requires_cors=True) elif job_config.get('archive_url', None): resp['archive_url'] = job_config['archive_url'] return resp
def get(self, namespace, repository, build_uuid): """ Return the build logs for the build specified by the build uuid. """ can_write = ModifyRepositoryPermission(namespace, repository).can() if not features.READER_BUILD_LOGS and not can_write: raise Unauthorized() build = model.build.get_repository_build(build_uuid) if (not build or build.repository.name != repository or build.repository.namespace_user.username != namespace): raise NotFound() return get_logs_or_log_url(build)
def put_tag(namespace_name, repo_name, tag): permission = ModifyRepositoryPermission(namespace_name, repo_name) repository_ref = registry_model.lookup_repository(namespace_name, repo_name, kind_filter="image") if permission.can() and repository_ref is not None: if not TAG_REGEX.match(tag): abort(400, TAG_ERROR) image_id = json.loads(request.data) # Check for the image ID first in a builder (for an in-progress push). builder = lookup_manifest_builder(repository_ref, session.get("manifest_builder"), storage, docker_v2_signing_key) if builder is not None: layer = builder.lookup_layer(image_id) if layer is not None: commited_tag = builder.commit_tag_and_manifest(tag, layer) if commited_tag is None: abort(400) return make_response("Created", 200) # Check if there is an existing image we should use (for PUT calls outside of a normal push # operation). legacy_image = registry_model.get_legacy_image(repository_ref, image_id) if legacy_image is None: abort(400) if (registry_model.retarget_tag(repository_ref, tag, legacy_image, storage, docker_v2_signing_key) is None): abort(400) return make_response("Created", 200) abort(403)
def get(self, namespace, repository, parsed_args): """ Fetch the specified repository. """ logger.debug("Get repo: %s/%s" % (namespace, repository)) include_tags = parsed_args["includeTags"] max_tags = 500 repo = model.get_repo(namespace, repository, get_authenticated_user(), include_tags, max_tags) if repo is None: raise NotFound() has_write_permission = ModifyRepositoryPermission( namespace, repository).can() has_write_permission = has_write_permission and repo.state == RepositoryState.NORMAL repo_data = repo.to_dict() repo_data["can_write"] = has_write_permission repo_data["can_admin"] = AdministerRepositoryPermission( namespace, repository).can() if parsed_args[ "includeStats"] and repo.repository_base_elements.kind_name != "application": stats = [] found_dates = {} for count in repo.counts: stats.append(count.to_dict()) found_dates["%s/%s" % (count.date.month, count.date.day)] = True # Fill in any missing stats with zeros. for day in range(1, MAX_DAYS_IN_3_MONTHS): day_date = datetime.now() - timedelta(days=day) key = "%s/%s" % (day_date.month, day_date.day) if key not in found_dates: stats.append({ "date": day_date.date().isoformat(), "count": 0, }) repo_data["stats"] = stats return repo_data
def push(namespace, package_name): reponame = repo_name(namespace, package_name) if not REPOSITORY_NAME_REGEX.match(package_name): logger.debug('Found invalid repository name CNR push: %s', reponame) raise InvalidUsage('invalid repository name: %s' % reponame) values = request.get_json(force=True, silent=True) or {} private = values.get('visibility', 'private') owner = get_authenticated_user() if not Package.exists(reponame): if not CreateRepositoryPermission(namespace).can(): raise Forbidden("Unauthorized access for: %s" % reponame, {"package": reponame, "scopes": ['create']}) Package.create_repository(reponame, private, owner) logs_model.log_action('create_repo', namespace, repository_name=package_name) if not ModifyRepositoryPermission(namespace, package_name).can(): raise Forbidden("Unauthorized access for: %s" % reponame, {"package": reponame, "scopes": ['push']}) if not 'release' in values: raise InvalidUsage('Missing release') if not 'media_type' in values: raise InvalidUsage('Missing media_type') if not 'blob' in values: raise InvalidUsage('Missing blob') release_version = str(values['release']) media_type = values['media_type'] force = request.args.get('force', 'false') == 'true' blob = Blob(reponame, values['blob']) app_release = cnr_registry.push(reponame, release_version, media_type, blob, force, package_class=Package, user=owner, visibility=private) logs_model.log_action('push_repo', namespace, repository_name=package_name, metadata={'release': release_version}) return jsonify(app_release)
def _authorize_or_downscope_request(scope_param, has_valid_auth_context): # TODO: The complexity of this function is difficult to follow and maintain. Refactor/Cleanup. if len(scope_param) == 0: if not has_valid_auth_context: # In this case, we are doing an auth flow, and it's not an anonymous pull. logger.debug("No user and no token sent for empty scope list") raise Unauthorized() return None match = _get_scope_regex().match(scope_param) if match is None: logger.debug("Match: %s", match) logger.debug("len: %s", len(scope_param)) logger.warning("Unable to decode repository and actions: %s", scope_param) raise InvalidRequest("Unable to decode repository and actions: %s" % scope_param) logger.debug("Match: %s", match.groups()) registry_and_repo = match.group(1) namespace_and_repo = match.group(2) requested_actions = match.group(3).split(",") lib_namespace = app.config["LIBRARY_NAMESPACE"] namespace, reponame = parse_namespace_repository(namespace_and_repo, lib_namespace) # Ensure that we are never creating an invalid repository. if not REPOSITORY_NAME_REGEX.match(reponame): logger.debug("Found invalid repository name in auth flow: %s", reponame) if len(namespace_and_repo.split("/")) > 1: msg = "Nested repositories are not supported. Found: %s" % namespace_and_repo raise NameInvalid(message=msg) raise NameInvalid(message="Invalid repository name: %s" % namespace_and_repo) # Ensure the namespace is enabled. if registry_model.is_existing_disabled_namespace(namespace): msg = "Namespace %s has been disabled. Please contact a system administrator." % namespace raise NamespaceDisabled(message=msg) final_actions = [] repository_ref = registry_model.lookup_repository(namespace, reponame) repo_is_public = repository_ref is not None and repository_ref.is_public invalid_repo_message = "" if repository_ref is not None and repository_ref.kind != "image": invalid_repo_message = ( "This repository is for managing %s " + "and not container images.") % repository_ref.kind # Ensure the repository is not marked for deletion. if repository_ref is not None and repository_ref.state == RepositoryState.MARKED_FOR_DELETION: raise Unknown(message="Unknown repository") if "push" in requested_actions: # Check if there is a valid user or token, as otherwise the repository cannot be # accessed. if has_valid_auth_context: user = get_authenticated_user() # Lookup the repository. If it exists, make sure the entity has modify # permission. Otherwise, make sure the entity has create permission. if repository_ref: if ModifyRepositoryPermission(namespace, reponame).can(): if repository_ref is not None and repository_ref.kind != "image": raise Unsupported(message=invalid_repo_message) # Check for different repository states. if repository_ref.state == RepositoryState.NORMAL: # In NORMAL mode, if the user has permission, then they can push. final_actions.append("push") elif repository_ref.state == RepositoryState.MIRROR: # In MIRROR mode, only the mirroring robot can push. mirror = model.repo_mirror.get_mirror( repository_ref.id) robot = mirror.internal_robot if mirror is not None else None if robot is not None and user is not None and robot == user: assert robot.robot final_actions.append("push") else: logger.debug( "Repository %s/%s push requested for non-mirror robot %s: %s", namespace, reponame, robot, user, ) elif repository_ref.state == RepositoryState.READ_ONLY: # No pushing allowed in read-only state. pass else: logger.warning( "Unknown state for repository %s: %s", repository_ref, repository_ref.state, ) else: logger.debug("No permission to modify repository %s/%s", namespace, reponame) else: # TODO: Push-to-create functionality should be configurable if CreateRepositoryPermission( namespace).can() and user is not None: logger.debug("Creating repository: %s/%s", namespace, reponame) repository_ref = RepositoryReference.for_repo_obj( model.repository.create_repository( namespace, reponame, user)) final_actions.append("push") else: logger.debug("No permission to create repository %s/%s", namespace, reponame) if "pull" in requested_actions: # Grant pull if the user can read the repo or it is public. if ReadRepositoryPermission(namespace, reponame).can() or repo_is_public: if repository_ref is not None and repository_ref.kind != "image": raise Unsupported(message=invalid_repo_message) final_actions.append("pull") else: logger.debug("No permission to pull repository %s/%s", namespace, reponame) if "*" in requested_actions: # Grant * user is admin if AdministerRepositoryPermission(namespace, reponame).can(): if repository_ref is not None and repository_ref.kind != "image": raise Unsupported(message=invalid_repo_message) if repository_ref and repository_ref.state in ( RepositoryState.MIRROR, RepositoryState.READ_ONLY, ): logger.debug("No permission to administer repository %s/%s", namespace, reponame) else: assert repository_ref.state == RepositoryState.NORMAL final_actions.append("*") else: logger.debug("No permission to administer repository %s/%s", namespace, reponame) # Final sanity checks. if "push" in final_actions: assert repository_ref.state != RepositoryState.READ_ONLY if "*" in final_actions: assert repository_ref.state == RepositoryState.NORMAL return scopeResult( actions=final_actions, namespace=namespace, repository=reponame, registry_and_repo=registry_and_repo, tuf_root=_get_tuf_root(repository_ref, namespace, reponame), )
def post(self, namespace, repository): """ Request that a repository be built and pushed from the specified input. """ logger.debug("User requested repository initialization.") request_json = request.get_json() dockerfile_id = request_json.get("file_id", None) archive_url = request_json.get("archive_url", None) if not dockerfile_id and not archive_url: raise InvalidRequest("file_id or archive_url required") if archive_url: archive_match = None try: archive_match = urlparse(archive_url) except ValueError: pass if not archive_match: raise InvalidRequest( "Invalid Archive URL: Must be a valid URI") scheme = archive_match.scheme if scheme != "http" and scheme != "https": raise InvalidRequest( "Invalid Archive URL: Must be http or https") context, subdir = self.get_dockerfile_context(request_json) tags = request_json.get("docker_tags", ["latest"]) pull_robot_name = request_json.get("pull_robot", None) # Verify the security behind the pull robot. if pull_robot_name: result = parse_robot_username(pull_robot_name) if result: try: model.user.lookup_robot(pull_robot_name) except model.InvalidRobotException: raise NotFound() # Make sure the user has administer permissions for the robot's namespace. (robot_namespace, _) = result if not AdministerOrganizationPermission(robot_namespace).can(): raise Unauthorized() else: raise Unauthorized() # Check if the dockerfile resource has already been used. If so, then it # can only be reused if the user has access to the repository in which the # dockerfile was previously built. if dockerfile_id: associated_repository = model.build.get_repository_for_resource( dockerfile_id) if associated_repository: if not ModifyRepositoryPermission( associated_repository.namespace_user.username, associated_repository.name): raise Unauthorized() # Start the build. repo = model.repository.get_repository(namespace, repository) if repo is None: raise NotFound() try: build_name = (user_files.get_file_checksum(dockerfile_id) if dockerfile_id else hashlib.sha224( archive_url.encode("ascii")).hexdigest()[0:7]) except IOError: raise InvalidRequest("File %s could not be found or is invalid" % dockerfile_id) prepared = PreparedBuild() prepared.build_name = build_name prepared.dockerfile_id = dockerfile_id prepared.archive_url = archive_url prepared.tags = tags prepared.subdirectory = subdir prepared.context = context prepared.is_manual = True prepared.metadata = {} try: build_request = start_build(repo, prepared, pull_robot_name=pull_robot_name) except MaximumBuildsQueuedException: abort(429, message="Maximum queued build rate exceeded.") except BuildTriggerDisabledException: abort(400, message="Build trigger is disabled") resp = build_status_view(build_request) repo_string = "%s/%s" % (namespace, repository) headers = { "Location": api.url_for(RepositoryBuildStatus, repository=repo_string, build_uuid=build_request.uuid), } return resp, 201, headers
def create_repository(namespace_name, repo_name): # Verify that the repository name is valid. if not REPOSITORY_NAME_REGEX.match(repo_name): abort( 400, message= "Invalid repository name. Repository names cannot contain slashes." ) logger.debug("Looking up repository %s/%s", namespace_name, repo_name) repository_ref = registry_model.lookup_repository(namespace_name, repo_name) if repository_ref is None and get_authenticated_user() is None: logger.debug("Attempt to create repository %s/%s without user auth", namespace_name, repo_name) abort( 401, message= 'Cannot create a repository as a guest. Please login via "docker login" first.', issue="no-login", ) elif repository_ref: modify_perm = ModifyRepositoryPermission(namespace_name, repo_name) if not modify_perm.can(): abort( 403, message= "You do not have permission to modify repository %(namespace)s/%(repository)s", issue="no-repo-write-permission", namespace=namespace_name, repository=repo_name, ) elif repository_ref.kind != "image": msg = ( "This repository is for managing %s resources and not container images." % repository_ref.kind) abort(405, message=msg, namespace=namespace_name) else: create_perm = CreateRepositoryPermission(namespace_name) if not create_perm.can(): logger.warning( "Attempt to create a new repo %s/%s with insufficient perms", namespace_name, repo_name, ) msg = 'You do not have permission to create repositories in namespace "%(namespace)s"' abort(403, message=msg, issue="no-create-permission", namespace=namespace_name) # Attempt to create the new repository. logger.debug( "Creating repository %s/%s with owner: %s", namespace_name, repo_name, get_authenticated_user().username, ) repository_ref = model.repository.create_repository( namespace_name, repo_name, get_authenticated_user()) if get_authenticated_user(): user_event_data = { "action": "push_start", "repository": repo_name, "namespace": namespace_name, } event = userevents.get_event(get_authenticated_user().username) event.publish_event_data("docker-cli", user_event_data) # Start a new builder for the repository and save its ID in the session. assert repository_ref builder = create_manifest_builder(repository_ref, storage, docker_v2_signing_key) logger.debug("Started repo push with manifest builder %s", builder) if builder is None: abort(404, message="Unknown repository", issue="unknown-repo") session["manifest_builder"] = builder.builder_id return make_response("Created", 201)
def put_image_layer(namespace, repository, image_id): logger.debug("Checking repo permissions") permission = ModifyRepositoryPermission(namespace, repository) if not permission.can(): abort(403) repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter="image") if repository_ref is None: abort(403) logger.debug("Checking for image in manifest builder") builder = lookup_manifest_builder(repository_ref, session.get("manifest_builder"), store, docker_v2_signing_key) if builder is None: abort(400) layer = builder.lookup_layer(image_id) if layer is None: abort(404) logger.debug("Storing layer data") input_stream = request.stream if request.headers.get("transfer-encoding") == "chunked": # Careful, might work only with WSGI servers supporting chunked # encoding (Gunicorn) input_stream = request.environ["wsgi.input"] expiration_sec = app.config["PUSH_TEMP_TAG_EXPIRATION_SEC"] settings = BlobUploadSettings( maximum_blob_size=app.config["MAXIMUM_LAYER_SIZE"], committed_blob_expiration=expiration_sec, ) extra_handlers = [] # Add a handler that copies the data into a temp file. This is used to calculate the tarsum, # which is only needed for older versions of Docker. requires_tarsum = bool(builder.get_layer_checksums(layer)) if requires_tarsum: tmp, tmp_hndlr = store.temp_store_handler() extra_handlers.append(tmp_hndlr) # Add a handler which computes the simple Docker V1 checksum. h, sum_hndlr = checksums.simple_checksum_handler(layer.v1_metadata_string) extra_handlers.append(sum_hndlr) uploaded_blob = None try: with upload_blob(repository_ref, store, settings, extra_blob_stream_handlers=extra_handlers) as manager: manager.upload_chunk(app.config, input_stream) uploaded_blob = manager.commit_to_blob(app.config) except BlobUploadException: logger.exception("Exception when writing image data") abort(520, "Image %(image_id)s could not be written. Please try again.", image_id=image_id) # Compute the final checksum csums = [] csums.append("sha256:{0}".format(h.hexdigest())) try: if requires_tarsum: tmp.seek(0) csums.append( checksums.compute_tarsum(tmp, layer.v1_metadata_string)) tmp.close() except (IOError, checksums.TarError) as exc: logger.debug("put_image_layer: Error when computing tarsum %s", exc) # If there was already a precomputed checksum, validate against it now. if builder.get_layer_checksums(layer): checksum = builder.get_layer_checksums(layer)[0] if not builder.validate_layer_checksum(layer, checksum): logger.debug( "put_image_checksum: Wrong checksum. Given: %s and expected: %s", checksum, builder.get_layer_checksums(layer), ) abort( 400, "Checksum mismatch for image: %(image_id)s", issue="checksum-mismatch", image_id=image_id, ) # Assign the blob to the layer in the manifest. if not builder.assign_layer_blob(layer, uploaded_blob, csums): abort(500, "Something went wrong") # Send a job to the work queue to replicate the image layer. # TODO: move this into a better place. queue_storage_replication(namespace, uploaded_blob) return make_response("true", 200)
def put_image_json(namespace, repository, image_id): logger.debug("Checking repo permissions") permission = ModifyRepositoryPermission(namespace, repository) if not permission.can(): abort(403) repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter="image") if repository_ref is None: abort(403) builder = lookup_manifest_builder(repository_ref, session.get("manifest_builder"), store, docker_v2_signing_key) if builder is None: abort(400) logger.debug("Parsing image JSON") try: uploaded_metadata = request.data uploaded_metadata_string = uploaded_metadata.decode("utf-8") data = json.loads(uploaded_metadata_string) except ValueError: pass if not data or not isinstance(data, dict): abort( 400, "Invalid JSON for image: %(image_id)s\nJSON: %(json)s", issue="invalid-request", image_id=image_id, json=request.data, ) if "id" not in data: abort( 400, "Missing key `id` in JSON for image: %(image_id)s", issue="invalid-request", image_id=image_id, ) if image_id != data["id"]: abort( 400, "JSON data contains invalid id for image: %(image_id)s", issue="invalid-request", image_id=image_id, ) logger.debug("Looking up repo image") location_pref = store.preferred_locations[0] username = get_authenticated_user() and get_authenticated_user().username layer = builder.start_layer( image_id, uploaded_metadata_string, location_pref, username, app.config["PUSH_TEMP_TAG_EXPIRATION_SEC"], ) if layer is None: abort( 400, "Image %(image_id)s has invalid metadata", issue="invalid-request", image_id=image_id, ) return make_response("true", 200)
def build_trigger_webhook(trigger_uuid, **kwargs): logger.debug("Webhook received with uuid %s", trigger_uuid) try: trigger = model.build.get_build_trigger(trigger_uuid) except model.InvalidBuildTriggerException: # It is ok to return 404 here, since letting an attacker know that a trigger UUID is valid # doesn't leak anything abort(404) # Ensure we are not currently in read-only mode. if app.config.get("REGISTRY_STATE", "normal") == "readonly": abort(503, "System is currently in read-only mode") # Ensure the trigger has permission. namespace = trigger.repository.namespace_user.username repository = trigger.repository.name if ModifyRepositoryPermission(namespace, repository).can(): handler = BuildTriggerHandler.get_handler(trigger) if trigger.repository.kind.name != "image": abort( 501, "Build triggers cannot be invoked on application repositories") if trigger.repository.state != RepositoryState.NORMAL: abort(503, "Repository is currently in read only or mirror mode") logger.debug("Passing webhook request to handler %s", handler) try: prepared = handler.handle_trigger_request(request) except ValidationRequestException: logger.debug("Handler reported a validation exception: %s", handler) # This was just a validation request, we don't need to build anything return make_response("Okay") except SkipRequestException: logger.debug("Handler reported to skip the build: %s", handler) # The build was requested to be skipped return make_response("Okay") except InvalidPayloadException as ipe: logger.exception("Invalid payload") # The payload was malformed abort(400, message=str(ipe)) pull_robot_name = model.build.get_pull_robot_name(trigger) repo = model.repository.get_repository(namespace, repository) try: start_build(repo, prepared, pull_robot_name=pull_robot_name) except MaximumBuildsQueuedException: abort(429, message="Maximum queued build rate exceeded.") except BuildTriggerDisabledException: logger.debug("Build trigger %s is disabled", trigger_uuid) abort( 400, message= "This build trigger is currently disabled. Please re-enable to continue.", ) return make_response("Okay") abort(403)