def post(self, robot_shortname): """ Regenerates the token for a user's robot. """ parent = get_authenticated_user() robot = model.regenerate_user_robot_token(robot_shortname, parent) log_action("regenerate_robot_token", parent.username, {"robot": robot_shortname}) return robot.to_dict(include_token=True)
def create_repository(namespace_name, repo_name): # Verify that the repository name is valid. if not REPOSITORY_NAME_REGEX.match(repo_name): abort(400, message="Invalid repository name. Repository names cannot contain slashes.") logger.debug("Looking up repository %s/%s", namespace_name, repo_name) repository_ref = registry_model.lookup_repository(namespace_name, repo_name) if repository_ref is None and get_authenticated_user() is None: logger.debug( "Attempt to create repository %s/%s without user auth", namespace_name, repo_name ) abort( 401, message='Cannot create a repository as a guest. Please login via "docker login" first.', issue="no-login", ) elif repository_ref: modify_perm = ModifyRepositoryPermission(namespace_name, repo_name) if not modify_perm.can(): abort( 403, message="You do not have permission to modify repository %(namespace)s/%(repository)s", issue="no-repo-write-permission", namespace=namespace_name, repository=repo_name, ) elif repository_ref.kind != "image": msg = ( "This repository is for managing %s resources and not container images." % repository_ref.kind ) abort(405, message=msg, namespace=namespace_name) else: create_perm = CreateRepositoryPermission(namespace_name) if not create_perm.can(): logger.warning( "Attempt to create a new repo %s/%s with insufficient perms", namespace_name, repo_name, ) msg = 'You do not have permission to create repositories in namespace "%(namespace)s"' abort(403, message=msg, issue="no-create-permission", namespace=namespace_name) # Attempt to create the new repository. logger.debug( "Creating repository %s/%s with owner: %s", namespace_name, repo_name, get_authenticated_user().username, ) repository_ref = model.repository.create_repository( namespace_name, repo_name, get_authenticated_user() ) if get_authenticated_user(): user_event_data = { "action": "push_start", "repository": repo_name, "namespace": namespace_name, } event = userevents.get_event(get_authenticated_user().username) event.publish_event_data("docker-cli", user_event_data) # Start a new builder for the repository and save its ID in the session. assert repository_ref builder = create_manifest_builder(repository_ref, storage, docker_v2_signing_key) logger.debug("Started repo push with manifest builder %s", builder) if builder is None: abort(404, message="Unknown repository", issue="unknown-repo") session["manifest_builder"] = builder.builder_id return make_response("Created", 201)
def _authorize_or_downscope_request(scope_param, has_valid_auth_context): # TODO: The complexity of this function is difficult to follow and maintain. Refactor/Cleanup. if len(scope_param) == 0: if not has_valid_auth_context: # In this case, we are doing an auth flow, and it's not an anonymous pull. logger.debug("No user and no token sent for empty scope list") raise Unauthorized() return None match = _get_scope_regex().match(scope_param) if match is None: logger.debug("Match: %s", match) logger.debug("len: %s", len(scope_param)) logger.warning("Unable to decode repository and actions: %s", scope_param) raise InvalidRequest("Unable to decode repository and actions: %s" % scope_param) logger.debug("Match: %s", match.groups()) registry_and_repo = match.group(1) namespace_and_repo = match.group(2) requested_actions = match.group(3).split(",") lib_namespace = app.config["LIBRARY_NAMESPACE"] namespace, reponame = parse_namespace_repository(namespace_and_repo, lib_namespace) # Ensure that we are never creating an invalid repository. if not REPOSITORY_NAME_REGEX.match(reponame): logger.debug("Found invalid repository name in auth flow: %s", reponame) if len(namespace_and_repo.split("/")) > 1: msg = "Nested repositories are not supported. Found: %s" % namespace_and_repo raise NameInvalid(message=msg) raise NameInvalid(message="Invalid repository name: %s" % namespace_and_repo) # Ensure the namespace is enabled. if registry_model.is_existing_disabled_namespace(namespace): msg = "Namespace %s has been disabled. Please contact a system administrator." % namespace raise NamespaceDisabled(message=msg) final_actions = [] repository_ref = registry_model.lookup_repository(namespace, reponame) repo_is_public = repository_ref is not None and repository_ref.is_public invalid_repo_message = "" if repository_ref is not None and repository_ref.kind != "image": invalid_repo_message = ( "This repository is for managing %s " + "and not container images.") % repository_ref.kind # Ensure the repository is not marked for deletion. if repository_ref is not None and repository_ref.state == RepositoryState.MARKED_FOR_DELETION: raise Unknown(message="Unknown repository") if "push" in requested_actions: # Check if there is a valid user or token, as otherwise the repository cannot be # accessed. if has_valid_auth_context: user = get_authenticated_user() # Lookup the repository. If it exists, make sure the entity has modify # permission. Otherwise, make sure the entity has create permission. if repository_ref: if ModifyRepositoryPermission(namespace, reponame).can(): if repository_ref is not None and repository_ref.kind != "image": raise Unsupported(message=invalid_repo_message) # Check for different repository states. if repository_ref.state == RepositoryState.NORMAL: # In NORMAL mode, if the user has permission, then they can push. final_actions.append("push") elif repository_ref.state == RepositoryState.MIRROR: # In MIRROR mode, only the mirroring robot can push. mirror = model.repo_mirror.get_mirror( repository_ref.id) robot = mirror.internal_robot if mirror is not None else None if robot is not None and user is not None and robot == user: assert robot.robot final_actions.append("push") else: logger.debug( "Repository %s/%s push requested for non-mirror robot %s: %s", namespace, reponame, robot, user, ) elif repository_ref.state == RepositoryState.READ_ONLY: # No pushing allowed in read-only state. pass else: logger.warning( "Unknown state for repository %s: %s", repository_ref, repository_ref.state, ) else: logger.debug("No permission to modify repository %s/%s", namespace, reponame) else: # TODO: Push-to-create functionality should be configurable if CreateRepositoryPermission( namespace).can() and user is not None: logger.debug("Creating repository: %s/%s", namespace, reponame) found = model.repository.get_or_create_repository( namespace, reponame, user) if found is not None: repository_ref = RepositoryReference.for_repo_obj( found) if repository_ref.kind != "image": raise Unsupported( message="Cannot push to an app repository") final_actions.append("push") else: logger.debug("No permission to create repository %s/%s", namespace, reponame) if "pull" in requested_actions: # Grant pull if the user can read the repo or it is public. if ReadRepositoryPermission(namespace, reponame).can() or repo_is_public: if repository_ref is not None and repository_ref.kind != "image": raise Unsupported(message=invalid_repo_message) final_actions.append("pull") else: logger.debug("No permission to pull repository %s/%s", namespace, reponame) if "*" in requested_actions: # Grant * user is admin if AdministerRepositoryPermission(namespace, reponame).can(): if repository_ref is not None and repository_ref.kind != "image": raise Unsupported(message=invalid_repo_message) if repository_ref and repository_ref.state in ( RepositoryState.MIRROR, RepositoryState.READ_ONLY, ): logger.debug("No permission to administer repository %s/%s", namespace, reponame) else: assert repository_ref.state == RepositoryState.NORMAL final_actions.append("*") else: logger.debug("No permission to administer repository %s/%s", namespace, reponame) # Final sanity checks. if "push" in final_actions: assert repository_ref.state != RepositoryState.READ_ONLY if "*" in final_actions: assert repository_ref.state == RepositoryState.NORMAL return scopeResult( actions=final_actions, namespace=namespace, repository=reponame, registry_and_repo=registry_and_repo, tuf_root=_get_tuf_root(repository_ref, namespace, reponame), )
def put(self): """ Update a users details such as password or email. """ user = get_authenticated_user() user_data = request.get_json() previous_username = None headers = None try: if 'password' in user_data: logger.debug('Changing password for user: %s', user.username) log_action('account_change_password', user.username) # Change the user's password. model.user.change_password(user, user_data['password']) # Login again to reset their session cookie. success, headers = common_login(user.uuid) if not success: raise request_error( message='Could not perform login action') if features.MAILING: send_password_changed(user.username, user.email) if 'invoice_email' in user_data: logger.debug('Changing invoice_email for user: %s', user.username) model.user.change_send_invoice_email( user, user_data['invoice_email']) if features.CHANGE_TAG_EXPIRATION and 'tag_expiration_s' in user_data: logger.debug('Changing user tag expiration to: %ss', user_data['tag_expiration_s']) model.user.change_user_tag_expiration( user, user_data['tag_expiration_s']) if ('invoice_email_address' in user_data and user_data['invoice_email_address'] != user.invoice_email_address): model.user.change_invoice_email_address( user, user_data['invoice_email_address']) if 'email' in user_data and user_data['email'] != user.email: new_email = user_data['email'] if model.user.find_user_by_email(new_email): # Email already used. raise request_error(message='E-mail address already used') if features.MAILING: logger.debug( 'Sending email to change email address for user: %s', user.username) confirmation_code = model.user.create_confirm_email_code( user, new_email=new_email) send_change_email(user.username, user_data['email'], confirmation_code) else: ua_future = user_analytics.change_email( user.email, new_email) ua_future.add_done_callback( build_error_callback('Change email failed')) model.user.update_email(user, new_email, auto_verify=not features.MAILING) if features.USER_METADATA: metadata = {} for field in ('given_name', 'family_name', 'company', 'location'): if field in user_data: metadata[field] = user_data.get(field) if len(metadata) > 0: model.user.update_user_metadata(user, metadata) ua_mdata_future = user_analytics.change_metadata( user.email, **metadata) ua_mdata_future.add_done_callback( build_error_callback('Change metadata failed')) # Check for username rename. A username can be renamed if the feature is enabled OR the user # currently has a confirm_username prompt. if 'username' in user_data: confirm_username = model.user.has_user_prompt( user, 'confirm_username') new_username = user_data.get('username') previous_username = user.username rename_allowed = (features.USER_RENAME or (confirm_username and features.USERNAME_CONFIRMATION)) username_changing = new_username and new_username != previous_username if rename_allowed and username_changing: if model.user.get_user_or_org(new_username) is not None: # Username already used. raise request_error( message='Username is already in use') user = model.user.change_username(user.id, new_username) username_future = user_analytics.change_username( user.email, new_username) username_future.add_done_callback( build_error_callback('Change username failed')) elif confirm_username: model.user.remove_user_prompt(user, 'confirm_username') except model.user.InvalidPasswordException, ex: raise request_error(exception=ex)
def post(self, service_id): """ Request that the current user be detached from the external login service. """ model.user.detach_external_login(get_authenticated_user(), service_id) return {'success': True}
def _repo_verb(namespace, repository, tag_name, verb, formatter, sign=False, checker=None, **kwargs): # Verify that the image exists and that we have access to it. logger.debug( "Verifying repo verb %s for repository %s/%s with user %s with mimetype %s", verb, namespace, repository, get_authenticated_user(), request.accept_mimetypes.best, ) tag, manifest, schema1_manifest = _verify_repo_verb( storage, namespace, repository, tag_name, verb, checker) # Load the repository for later. repo = model.repository.get_repository(namespace, repository) if repo is None: abort(404) # Check for torrent, which is no longer supported. if request.accept_mimetypes.best == "application/x-bittorrent": abort(406) # Log the action. track_and_log("repo_verb", wrap_repository(repo), tag=tag.name, verb=verb, **kwargs) is_readonly = app.config.get("REGISTRY_STATE", "normal") == "readonly" # Lookup/create the derived image for the verb and repo image. if is_readonly: derived_image = registry_model.lookup_derived_image( manifest, verb, storage, varying_metadata={"tag": tag.name}, include_placements=True) else: derived_image = registry_model.lookup_or_create_derived_image( manifest, verb, storage.preferred_locations[0], storage, varying_metadata={"tag": tag.name}, include_placements=True, ) if derived_image is None: logger.error( "Could not create or lookup a derived image for manifest %s", manifest) abort(400) if derived_image is not None and not derived_image.blob.uploading: logger.debug("Derived %s image %s exists in storage", verb, derived_image) is_head_request = request.method == "HEAD" if derived_image.blob.compressed_size: image_pulled_bytes.labels("verbs").inc( derived_image.blob.compressed_size) download_url = storage.get_direct_download_url( derived_image.blob.placements, derived_image.blob.storage_path, head=is_head_request) if download_url: logger.debug("Redirecting to download URL for derived %s image %s", verb, derived_image) return redirect(download_url) # Close the database handle here for this process before we send the long download. database.close_db_filter(None) logger.debug("Sending cached derived %s image %s", verb, derived_image) return send_file( storage.stream_read_file(derived_image.blob.placements, derived_image.blob.storage_path), mimetype=LAYER_MIMETYPE, ) logger.debug("Building and returning derived %s image", verb) hasher = SimpleHasher() # Close the database connection before any process forking occurs. This is important because # the Postgres driver does not react kindly to forking, so we need to make sure it is closed # so that each process will get its own unique connection. database.close_db_filter(None) def _cleanup(): # Close any existing DB connection once the process has exited. database.close_db_filter(None) def _store_metadata_and_cleanup(): if is_readonly: return with database.UseThenDisconnect(app.config): registry_model.set_derived_image_size(derived_image, hasher.hashed_bytes) # Create a queue process to generate the data. The queue files will read from the process # and send the results to the client and storage. unique_id = (derived_image.unique_id if derived_image is not None else hashlib.sha256("%s:%s" % (verb, uuid.uuid4())).hexdigest()) handlers = [hasher.update] reporter = VerbReporter(verb) args = (formatter, tag, schema1_manifest, unique_id, handlers, reporter) queue_process = QueueProcess( _open_stream, 8 * 1024, 10 * 1024 * 1024, # 8K/10M chunk/max args, finished=_store_metadata_and_cleanup, ) client_queue_file = QueueFile(queue_process.create_queue(), "client", timeout=QUEUE_FILE_TIMEOUT) if not is_readonly: storage_queue_file = QueueFile(queue_process.create_queue(), "storage", timeout=QUEUE_FILE_TIMEOUT) # If signing is required, add a QueueFile for signing the image as we stream it out. signing_queue_file = None if sign and signer.name: signing_queue_file = QueueFile(queue_process.create_queue(), "signing", timeout=QUEUE_FILE_TIMEOUT) # Start building. queue_process.run() # Start the storage saving. if not is_readonly: storage_args = (verb, derived_image, storage_queue_file, namespace, repository, tag_name) QueueProcess.run_process(_write_derived_image_to_storage, storage_args, finished=_cleanup) if sign and signer.name: signing_args = (verb, derived_image, signing_queue_file) QueueProcess.run_process(_sign_derived_image, signing_args, finished=_cleanup) # Close the database handle here for this process before we send the long download. database.close_db_filter(None) # Return the client's data. return send_file(client_queue_file, mimetype=LAYER_MIMETYPE)
def post(self): if SuperUserPermission().can(): body = request.get_json() key_name = body.get("name", "") if not validate_service_key_name(key_name): raise InvalidRequest("Invalid service key friendly name: %s" % key_name) # Ensure we have a valid expiration date if specified. expiration_date = body.get("expiration", None) if expiration_date is not None: try: expiration_date = datetime.utcfromtimestamp(float(expiration_date)) except ValueError as ve: raise InvalidRequest("Invalid expiration date: %s" % ve) if expiration_date <= datetime.now(): raise InvalidRequest("Expiration date cannot be in the past") # Create the metadata for the key. user = get_authenticated_user() metadata = body.get("metadata", {}) metadata.update( { "created_by": "Quay Superuser Panel", "creator": user.username, "ip": get_request_ip(), } ) # Generate a key with a private key that we *never save*. (private_key, key_id) = pre_oci_model.generate_service_key( body["service"], expiration_date, metadata=metadata, name=key_name ) # Auto-approve the service key. pre_oci_model.approve_service_key( key_id, user, ServiceKeyApprovalType.SUPERUSER, notes=body.get("notes", "") ) # Log the creation and auto-approval of the service key. key_log_metadata = { "kid": key_id, "preshared": True, "service": body["service"], "name": key_name, "expiration_date": expiration_date, "auto_approved": True, } log_action("service_key_create", None, key_log_metadata) log_action("service_key_approve", None, key_log_metadata) return jsonify( { "kid": key_id, "name": key_name, "service": body["service"], "public_key": private_key.publickey().exportKey("PEM"), "private_key": private_key.exportKey("PEM"), } ) raise Unauthorized()
def get(self): parent = get_authenticated_user() user_quotas = model.namespacequota.get_namespace_quota_list( parent.username) return [quota_view(quota) for quota in user_quotas]
def get(self, quota_id): parent = get_authenticated_user() quota = get_quota(parent.username, quota_id) return quota_view(quota)
def change_password(): return redirect("/user/%s?tab=settings" % get_authenticated_user().username)
def put(self, namespace, repository, tag): """ Change which image a tag points to or create a new tag. """ if not TAG_REGEX.match(tag): abort(400, TAG_ERROR) repo_ref = registry_model.lookup_repository(namespace, repository) if repo_ref is None: raise NotFound() if "expiration" in request.get_json(): tag_ref = registry_model.get_repo_tag(repo_ref, tag) if tag_ref is None: raise NotFound() expiration = request.get_json().get("expiration") expiration_date = None if expiration is not None: try: expiration_date = datetime.utcfromtimestamp( float(expiration)) except ValueError: abort(400) if expiration_date <= datetime.now(): abort(400) existing_end_ts, ok = registry_model.change_repository_tag_expiration( tag_ref, expiration_date) if ok: if not (existing_end_ts is None and expiration_date is None): log_action( "change_tag_expiration", namespace, { "username": get_authenticated_user().username, "repo": repository, "tag": tag, "namespace": namespace, "expiration_date": expiration_date, "old_expiration_date": existing_end_ts, }, repo_name=repository, ) else: raise InvalidRequest( "Could not update tag expiration; Tag has probably changed" ) if "image" in request.get_json( ) or "manifest_digest" in request.get_json(): existing_tag = registry_model.get_repo_tag( repo_ref, tag, include_legacy_image=True) manifest_or_image = None image_id = None manifest_digest = None if "image" in request.get_json(): image_id = request.get_json()["image"] manifest_or_image = registry_model.get_legacy_image( repo_ref, image_id) else: manifest_digest = request.get_json()["manifest_digest"] manifest_or_image = registry_model.lookup_manifest_by_digest( repo_ref, manifest_digest, require_available=True) if manifest_or_image is None: raise NotFound() # TODO: Remove this check once fully on V22 existing_manifest_digest = None if existing_tag: existing_manifest = registry_model.get_manifest_for_tag( existing_tag) existing_manifest_digest = existing_manifest.digest if existing_manifest else None if not registry_model.retarget_tag(repo_ref, tag, manifest_or_image, storage, docker_v2_signing_key): raise InvalidRequest("Could not move tag") username = get_authenticated_user().username log_action( "move_tag" if existing_tag else "create_tag", namespace, { "username": username, "repo": repository, "tag": tag, "namespace": namespace, "image": image_id, "manifest_digest": manifest_digest, "original_image": (existing_tag.legacy_image.docker_image_id if existing_tag and existing_tag.legacy_image_if_present else None), "original_manifest_digest": existing_manifest_digest, }, repo_name=repository, ) return "Updated", 201
def get(self): """ Get the user's credit card. """ user = get_authenticated_user() return get_card(user)
def track_and_log(event_name, repo_obj, analytics_name=None, analytics_sample=1, **kwargs): repo_name = repo_obj.name namespace_name = repo_obj.namespace_name metadata = { "repo": repo_name, "namespace": namespace_name, "user-agent": request.user_agent.string, } metadata.update(kwargs) is_free_namespace = False if hasattr(repo_obj, "is_free_namespace"): is_free_namespace = repo_obj.is_free_namespace # Add auth context metadata. analytics_id = "anonymous" auth_context = get_authenticated_context() if auth_context is not None: analytics_id, context_metadata = auth_context.analytics_id_and_public_metadata( ) metadata.update(context_metadata) # Publish the user event (if applicable) logger.debug("Checking publishing %s to the user events system", event_name) if auth_context and auth_context.has_nonrobot_user: logger.debug("Publishing %s to the user events system", event_name) user_event_data = { "action": event_name, "repository": repo_name, "namespace": namespace_name, } event = userevents.get_event(auth_context.authed_user.username) event.publish_event_data("docker-cli", user_event_data) # Save the action to mixpanel. if random.random() < analytics_sample: if analytics_name is None: analytics_name = event_name logger.debug("Logging the %s to analytics engine", analytics_name) request_parsed = urlparse(request.url_root) extra_params = { "repository": "%s/%s" % (namespace_name, repo_name), "user-agent": request.user_agent.string, "hostname": request_parsed.hostname, } analytics.track(analytics_id, analytics_name, extra_params) # Add the resolved information to the metadata. logger.debug("Resolving IP address %s", get_request_ip()) resolved_ip = ip_resolver.resolve_ip(get_request_ip()) if resolved_ip is not None: metadata["resolved_ip"] = resolved_ip._asdict() logger.debug("Resolved IP address %s", get_request_ip()) # Log the action to the database. logger.debug("Logging the %s to logs system", event_name) try: logs_model.log_action( event_name, namespace_name, performer=get_authenticated_user(), ip=get_request_ip(), metadata=metadata, repository=repo_obj, is_free_namespace=is_free_namespace, ) logger.debug("Track and log of %s complete", event_name) except ReadOnlyModeException: pass
def get(self, robot_shortname): """ Returns the user's robot with the specified name. """ parent = get_authenticated_user() robot = model.get_user_robot(robot_shortname, parent) return robot.to_dict(include_metadata=True, include_token=True)
def put(self): """ Update a users details such as password or email. """ user = get_authenticated_user() user_data = request.get_json() previous_username = None headers = None try: if "password" in user_data: logger.debug("Changing password for user: %s", user.username) log_action("account_change_password", user.username) # Change the user's password. model.user.change_password(user, user_data["password"]) # Login again to reset their session cookie. success, headers = common_login(user.uuid) if not success: raise request_error( message="Could not perform login action") if features.MAILING: send_password_changed(user.username, user.email) if "invoice_email" in user_data: logger.debug("Changing invoice_email for user: %s", user.username) model.user.change_send_invoice_email( user, user_data["invoice_email"]) if features.CHANGE_TAG_EXPIRATION and "tag_expiration_s" in user_data: logger.debug("Changing user tag expiration to: %ss", user_data["tag_expiration_s"]) model.user.change_user_tag_expiration( user, user_data["tag_expiration_s"]) if ("invoice_email_address" in user_data and user_data["invoice_email_address"] != user.invoice_email_address): model.user.change_invoice_email_address( user, user_data["invoice_email_address"]) if "email" in user_data and user_data["email"] != user.email: new_email = user_data["email"] if model.user.find_user_by_email(new_email): # Email already used. raise request_error(message="E-mail address already used") if features.MAILING: logger.debug( "Sending email to change email address for user: %s", user.username) confirmation_code = model.user.create_confirm_email_code( user, new_email=new_email) send_change_email(user.username, user_data["email"], confirmation_code) else: model.user.update_email(user, new_email, auto_verify=not features.MAILING) if features.USER_METADATA: metadata = {} for field in ("given_name", "family_name", "company", "location"): if field in user_data: metadata[field] = user_data.get(field) if len(metadata) > 0: model.user.update_user_metadata(user, metadata) # Check for username rename. A username can be renamed if the feature is enabled OR the user # currently has a confirm_username prompt. if "username" in user_data: confirm_username = model.user.has_user_prompt( user, "confirm_username") new_username = user_data.get("username") previous_username = user.username rename_allowed = features.USER_RENAME or ( confirm_username and features.USERNAME_CONFIRMATION) username_changing = new_username and new_username != previous_username if rename_allowed and username_changing: if model.user.get_user_or_org(new_username) is not None: # Username already used. raise request_error( message="Username is already in use") user = model.user.change_username(user.id, new_username) elif confirm_username: model.user.remove_user_prompt(user, "confirm_username") except model.user.InvalidPasswordException as ex: raise request_error(exception=ex) return user_view(user, previous_username=previous_username), 200, headers
def post(self): """ Create a new repository. """ owner = get_authenticated_user() req = request.get_json() if owner is None and "namespace" not in "req": raise InvalidRequest( "Must provide a namespace or must be logged in.") namespace_name = req[ "namespace"] if "namespace" in req else owner.username permission = CreateRepositoryPermission(namespace_name) if permission.can(): repository_name = req["repository"] visibility = req["visibility"] if model.repo_exists(namespace_name, repository_name): raise request_error(message="Repository already exists") visibility = req["visibility"] if visibility == "private": check_allowed_private_repos(namespace_name) # Verify that the repository name is valid. if features.EXTENDED_REPOSITORY_NAMES: valid_repository_name = REPOSITORY_NAME_EXTENDED_REGEX.match( repository_name) else: valid_repository_name = REPOSITORY_NAME_REGEX.match( repository_name) if not valid_repository_name: raise InvalidRequest("Invalid repository name") kind = req.get("repo_kind", "image") or "image" created = model.create_repo( namespace_name, repository_name, owner, req["description"], visibility=visibility, repo_kind=kind, ) if created is None: raise InvalidRequest("Could not create repository") log_action( "create_repo", namespace_name, { "repo": repository_name, "namespace": namespace_name }, repo_name=repository_name, ) return { "namespace": namespace_name, "name": repository_name, "kind": kind, }, 201 raise Unauthorized()
def _verify_repo_verb(_, namespace, repo_name, tag_name, verb, checker=None): permission = ReadRepositoryPermission(namespace, repo_name) repo = model.repository.get_repository(namespace, repo_name) repo_is_public = repo is not None and model.repository.is_repository_public( repo) if not permission.can() and not repo_is_public: logger.debug( "No permission to read repository %s/%s for user %s with verb %s", namespace, repo_name, get_authenticated_user(), verb, ) abort(403) if repo is not None and repo.kind.name != "image": logger.debug( "Repository %s/%s for user %s is not an image repo", namespace, repo_name, get_authenticated_user(), ) abort(405) # Make sure the repo's namespace isn't disabled. if not registry_model.is_namespace_enabled(namespace): abort(400) # Lookup the requested tag. repo_ref = registry_model.lookup_repository(namespace, repo_name) if repo_ref is None: abort(404) tag = registry_model.get_repo_tag(repo_ref, tag_name) if tag is None: logger.debug( "Tag %s does not exist in repository %s/%s for user %s", tag, namespace, repo_name, get_authenticated_user(), ) abort(404) # Get its associated manifest. manifest = registry_model.get_manifest_for_tag(tag, backfill_if_necessary=True) if manifest is None: logger.debug("Could not get manifest on %s/%s:%s::%s", namespace, repo_name, tag.name, verb) abort(404) # Retrieve the schema1-compatible version of the manifest. try: schema1_manifest = registry_model.get_schema1_parsed_manifest( manifest, namespace, repo_name, tag.name, storage) except ManifestException: logger.exception("Could not get manifest on %s/%s:%s::%s", namespace, repo_name, tag.name, verb) abort(400) if schema1_manifest is None: abort(404) # If there is a data checker, call it first. if checker is not None: if not checker(tag, schema1_manifest): logger.debug("Check mismatch on %s/%s:%s, verb %s", namespace, repo_name, tag.name, verb) abort(404) # Preload the tag's repository information, so it gets cached. assert tag.repository.namespace_name assert tag.repository.name return tag, manifest, schema1_manifest
def start_build(repository, prepared_build, pull_robot_name=None): # Ensure that builds are only run in image repositories. if repository.kind.name != 'image': raise Exception( 'Attempt to start a build for application repository %s' % repository.id) # Ensure the repository isn't in mirror or read-only mode. if repository.state != RepositoryState.NORMAL: raise Exception( ('Attempt to start a build for a non-normal repository: %s %s' % (repository.id, repository.state))) # Ensure that disabled triggers are not run. if prepared_build.trigger is not None and not prepared_build.trigger.enabled: raise BuildTriggerDisabledException if repository.namespace_user.maximum_queued_builds_count is not None: queue_item_canonical_name = [repository.namespace_user.username] alive_builds = dockerfile_build_queue.num_alive_jobs( queue_item_canonical_name) if alive_builds >= repository.namespace_user.maximum_queued_builds_count: logger.debug( 'Prevented queueing of build under namespace %s due to reaching max: %s', repository.namespace_user.username, repository.namespace_user.maximum_queued_builds_count) raise MaximumBuildsQueuedException() host = app.config['SERVER_HOSTNAME'] repo_path = '%s/%s/%s' % (host, repository.namespace_user.username, repository.name) new_token = model.token.create_access_token( repository, 'write', kind='build-worker', friendly_name='Repository Build Token') logger.debug('Creating build %s with repo %s tags %s', prepared_build.build_name, repo_path, prepared_build.tags) job_config = { 'docker_tags': prepared_build.tags, 'registry': host, 'build_subdir': prepared_build.subdirectory, 'context': prepared_build.context, 'trigger_metadata': prepared_build.metadata or {}, 'is_manual': prepared_build.is_manual, 'manual_user': get_authenticated_user().username if get_authenticated_user() else None, 'archive_url': prepared_build.archive_url } with app.config['DB_TRANSACTION_FACTORY'](db): build_request = model.build.create_repository_build( repository, new_token, job_config, prepared_build.dockerfile_id, prepared_build.build_name, prepared_build.trigger, pull_robot_name=pull_robot_name) pull_creds = model.user.get_pull_credentials( pull_robot_name) if pull_robot_name else None json_data = json.dumps({ 'build_uuid': build_request.uuid, 'pull_credentials': pull_creds }) queue_id = dockerfile_build_queue.put( [repository.namespace_user.username, repository.name], json_data, retries_remaining=3) build_request.queue_id = queue_id build_request.save() # Add the queueing of the build to the metrics queue. metric_queue.repository_build_queued.Inc( labelvalues=[repository.namespace_user.username, repository.name]) # Add the build to the repo's log and spawn the build_queued notification. event_log_metadata = { 'build_id': build_request.uuid, 'docker_tags': prepared_build.tags, 'repo': repository.name, 'namespace': repository.namespace_user.username, 'is_manual': prepared_build.is_manual, 'manual_user': get_authenticated_user().username if get_authenticated_user() else None } if prepared_build.trigger: event_log_metadata['trigger_id'] = prepared_build.trigger.uuid event_log_metadata[ 'trigger_kind'] = prepared_build.trigger.service.name event_log_metadata['trigger_metadata'] = prepared_build.metadata or {} logs_model.log_action('build_dockerfile', repository.namespace_user.username, ip=get_request_ip(), metadata=event_log_metadata, repository=repository) # TODO: remove when more endpoints have been converted to using interfaces repo = AttrDict({ 'namespace_name': repository.namespace_user.username, 'name': repository.name, }) spawn_notification(repo, 'build_queued', event_log_metadata, subpage='build/%s' % build_request.uuid, pathargs=['build', build_request.uuid]) return build_request
def push(namespace, package_name): reponame = repo_name(namespace, package_name) if features.EXTENDED_REPOSITORY_NAMES: if not REPOSITORY_NAME_EXTENDED_REGEX.match(package_name): logger.debug("Found invalid repository name CNR push: %s", reponame) raise InvalidUsage("invalid repository name: %s" % reponame) else: if not REPOSITORY_NAME_REGEX.match(package_name): logger.debug("Found invalid repository name CNR push: %s", reponame) raise InvalidUsage("invalid repository name: %s" % reponame) values = request.get_json(force=True, silent=True) or {} private = values.get("visibility", "private") owner = get_authenticated_user() if not Package.exists(reponame): if not CreateRepositoryPermission(namespace).can(): raise Forbidden( "Unauthorized access for: %s" % reponame, { "package": reponame, "scopes": ["create"] }, ) Package.create_repository(reponame, private, owner) logs_model.log_action("create_repo", namespace, repository_name=package_name) if not ModifyRepositoryPermission(namespace, package_name).can(): raise Forbidden("Unauthorized access for: %s" % reponame, { "package": reponame, "scopes": ["push"] }) if not "release" in values: raise InvalidUsage("Missing release") if not "media_type" in values: raise InvalidUsage("Missing media_type") if not "blob" in values: raise InvalidUsage("Missing blob") release_version = str(values["release"]) media_type = values["media_type"] force = request.args.get("force", "false") == "true" blob = Blob(reponame, values["blob"]) app_release = cnr_registry.push( reponame, release_version, media_type, blob, force, package_class=Package, user=owner, visibility=private, ) logs_model.log_action("push_repo", namespace, repository_name=package_name, metadata={"release": release_version}) return jsonify(app_release)
def _repo_verb(namespace, repository, tag_name, verb, formatter, sign=False, checker=None, **kwargs): # Verify that the image exists and that we have access to it. logger.debug( 'Verifying repo verb %s for repository %s/%s with user %s with mimetype %s', verb, namespace, repository, get_authenticated_user(), request.accept_mimetypes.best) tag, manifest, schema1_manifest = _verify_repo_verb( storage, namespace, repository, tag_name, verb, checker) # Load the repository for later. repo = model.repository.get_repository(namespace, repository) if repo is None: abort(404) # Check for torrent. If found, we return a torrent for the repo verb image (if the derived # image already exists). if request.accept_mimetypes.best == 'application/x-bittorrent': metric_queue.repository_pull.Inc( labelvalues=[namespace, repository, verb + '+torrent', True]) return _torrent_repo_verb(repo, tag, manifest, verb, **kwargs) # Log the action. track_and_log('repo_verb', wrap_repository(repo), tag=tag.name, verb=verb, **kwargs) metric_queue.repository_pull.Inc( labelvalues=[namespace, repository, verb, True]) is_readonly = app.config.get('REGISTRY_STATE', 'normal') == 'readonly' # Lookup/create the derived image for the verb and repo image. if is_readonly: derived_image = registry_model.lookup_derived_image( manifest, verb, storage, varying_metadata={'tag': tag.name}, include_placements=True) else: derived_image = registry_model.lookup_or_create_derived_image( manifest, verb, storage.preferred_locations[0], storage, varying_metadata={'tag': tag.name}, include_placements=True) if derived_image is None: logger.error( 'Could not create or lookup a derived image for manifest %s', manifest) abort(400) if derived_image is not None and not derived_image.blob.uploading: logger.debug('Derived %s image %s exists in storage', verb, derived_image) is_head_request = request.method == 'HEAD' metric_queue.pull_byte_count.Inc(derived_image.blob.compressed_size, labelvalues=[verb]) download_url = storage.get_direct_download_url( derived_image.blob.placements, derived_image.blob.storage_path, head=is_head_request) if download_url: logger.debug('Redirecting to download URL for derived %s image %s', verb, derived_image) return redirect(download_url) # Close the database handle here for this process before we send the long download. database.close_db_filter(None) logger.debug('Sending cached derived %s image %s', verb, derived_image) return send_file(storage.stream_read_file( derived_image.blob.placements, derived_image.blob.storage_path), mimetype=LAYER_MIMETYPE) logger.debug('Building and returning derived %s image', verb) # Close the database connection before any process forking occurs. This is important because # the Postgres driver does not react kindly to forking, so we need to make sure it is closed # so that each process will get its own unique connection. database.close_db_filter(None) def _cleanup(): # Close any existing DB connection once the process has exited. database.close_db_filter(None) hasher = PieceHasher(app.config['BITTORRENT_PIECE_SIZE']) def _store_metadata_and_cleanup(): if is_readonly: return with database.UseThenDisconnect(app.config): registry_model.set_torrent_info( derived_image.blob, app.config['BITTORRENT_PIECE_SIZE'], hasher.final_piece_hashes()) registry_model.set_derived_image_size(derived_image, hasher.hashed_bytes) # Create a queue process to generate the data. The queue files will read from the process # and send the results to the client and storage. unique_id = (derived_image.unique_id if derived_image is not None else hashlib.sha256('%s:%s' % (verb, uuid.uuid4())).hexdigest()) handlers = [hasher.update] reporter = VerbReporter(verb) args = (formatter, tag, schema1_manifest, unique_id, handlers, reporter) queue_process = QueueProcess( _open_stream, 8 * 1024, 10 * 1024 * 1024, # 8K/10M chunk/max args, finished=_store_metadata_and_cleanup) client_queue_file = QueueFile(queue_process.create_queue(), 'client') if not is_readonly: storage_queue_file = QueueFile(queue_process.create_queue(), 'storage') # If signing is required, add a QueueFile for signing the image as we stream it out. signing_queue_file = None if sign and signer.name: signing_queue_file = QueueFile(queue_process.create_queue(), 'signing') # Start building. queue_process.run() # Start the storage saving. if not is_readonly: storage_args = (verb, derived_image, storage_queue_file) QueueProcess.run_process(_write_derived_image_to_storage, storage_args, finished=_cleanup) if sign and signer.name: signing_args = (verb, derived_image, signing_queue_file) QueueProcess.run_process(_sign_derived_image, signing_args, finished=_cleanup) # Close the database handle here for this process before we send the long download. database.close_db_filter(None) # Return the client's data. return send_file(client_queue_file, mimetype=LAYER_MIMETYPE)
def get_authorized_user(self): return get_authenticated_user()
def get(self, prefix, parsed_args): """ Get a list of entities that match the specified prefix. """ # Ensure we don't have any unicode characters in the search, as it breaks the search. Nothing # being searched can have unicode in it anyway, so this is a safe operation. prefix = prefix.encode('unidecode', 'ignore').replace(' ', '').lower() teams = [] org_data = [] namespace_name = parsed_args['namespace'] robot_namespace = None organization = None try: organization = model.organization.get_organization(namespace_name) # namespace name was an org permission = OrganizationMemberPermission(namespace_name) if permission.can(): robot_namespace = namespace_name if parsed_args['includeTeams']: teams = model.team.get_matching_teams(prefix, organization) if (parsed_args['includeOrgs'] and AdministerOrganizationPermission(namespace_name) and namespace_name.startswith(prefix)): org_data = [{ 'name': namespace_name, 'kind': 'org', 'is_org_member': True, 'avatar': avatar.get_data_for_org(organization), }] except model.organization.InvalidOrganizationException: # namespace name was a user user = get_authenticated_user() if user and user.username == namespace_name: # Check if there is admin user permissions (login only) admin_permission = UserAdminPermission(user.username) if admin_permission.can(): robot_namespace = namespace_name # Lookup users in the database for the prefix query. users = model.user.get_matching_users( prefix, robot_namespace, organization, limit=10, exact_matches_only=not features.PARTIAL_USER_AUTOCOMPLETE) # Lookup users via the user system for the prefix query. We'll filter out any users that # already exist in the database. external_users, federated_id, _ = authentication.query_users(prefix, limit=10) filtered_external_users = [] if external_users and federated_id is not None: users = list(users) user_ids = [user.id for user in users] # Filter the users if any are already found via the database. We do so by looking up all # the found users in the federated user system. federated_query = model.user.get_federated_logins( user_ids, federated_id) found = {result.service_ident for result in federated_query} filtered_external_users = [ user for user in external_users if not user.username in found ] def entity_team_view(team): result = { 'name': team.name, 'kind': 'team', 'is_org_member': True, 'avatar': avatar.get_data_for_team(team) } return result def user_view(user): user_json = { 'name': user.username, 'kind': 'user', 'is_robot': user.robot, 'avatar': avatar.get_data_for_user(user) } if organization is not None: user_json['is_org_member'] = user.robot or user.is_org_member return user_json def external_view(user): result = { 'name': user.username, 'kind': 'external', 'title': user.email or '', 'avatar': avatar.get_data_for_external_user(user) } return result team_data = [entity_team_view(team) for team in teams] user_data = [user_view(user) for user in users] external_data = [ external_view(user) for user in filtered_external_users ] return {'results': team_data + user_data + org_data + external_data}
def user_view(user, previous_username=None): def org_view(o, user_admin=True): admin_org = AdministerOrganizationPermission(o.username) org_response = { 'name': o.username, 'avatar': avatar.get_data_for_org(o), 'can_create_repo': CreateRepositoryPermission(o.username).can(), 'public': o.username in app.config.get('PUBLIC_NAMESPACES', []), } if user_admin: org_response.update({ 'is_org_admin': admin_org.can(), 'preferred_namespace': not (o.stripe_id is None), }) return org_response # Retrieve the organizations for the user. organizations = { o.username: o for o in model.organization.get_user_organizations(user.username) } # Add any public namespaces. public_namespaces = app.config.get('PUBLIC_NAMESPACES', []) if public_namespaces: organizations.update({ ns: model.user.get_namespace_user(ns) for ns in public_namespaces }) def login_view(login): try: metadata = json.loads(login.metadata_json) except: metadata = {} return { 'service': login.service.name, 'service_identifier': login.service_ident, 'metadata': metadata } logins = model.user.list_federated_logins(user) user_response = { 'anonymous': False, 'username': user.username, 'avatar': avatar.get_data_for_user(user), } user_admin = UserAdminPermission( previous_username if previous_username else user.username) if user_admin.can(): user_response.update({ 'can_create_repo': True, 'is_me': True, 'verified': user.verified, 'email': user.email, 'logins': [login_view(login) for login in logins], 'invoice_email': user.invoice_email, 'invoice_email_address': user.invoice_email_address, 'preferred_namespace': not (user.stripe_id is None), 'tag_expiration_s': user.removed_tag_expiration_s, 'prompts': model.user.get_user_prompts(user), 'company': user.company, 'family_name': user.family_name, 'given_name': user.given_name, 'location': user.location, 'is_free_account': user.stripe_id is None, 'has_password_set': authentication.has_password_set(user.username), }) analytics_metadata = user_analytics.get_user_analytics_metadata(user) # This is a sync call, but goes through the async wrapper interface and # returns a Future. By calling with timeout 0 immediately after the method # call, we ensure that if it ever accidentally becomes async it will raise # a TimeoutError. user_response.update(analytics_metadata.result(timeout=0)) user_view_perm = UserReadPermission(user.username) if user_view_perm.can(): user_response.update({ 'organizations': [ org_view(o, user_admin=user_admin.can()) for o in organizations.values() ], }) if features.SUPER_USERS and SuperUserPermission().can(): user_response.update({ 'super_user': user and user == get_authenticated_user() and SuperUserPermission().can() }) return user_response
def get(self): access_tokens = model.oauth.list_access_tokens_for_user(get_authenticated_user()) return {"authorizations": [authorization_view(token) for token in access_tokens]}
def put_image_json(namespace, repository, image_id): logger.debug("Checking repo permissions") permission = ModifyRepositoryPermission(namespace, repository) if not permission.can(): abort(403) repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter="image") if repository_ref is None: abort(403) builder = lookup_manifest_builder(repository_ref, session.get("manifest_builder"), store, docker_v2_signing_key) if builder is None: abort(400) logger.debug("Parsing image JSON") try: uploaded_metadata = request.data data = json.loads(uploaded_metadata.decode("utf8")) except ValueError: pass if not data or not isinstance(data, dict): abort( 400, "Invalid JSON for image: %(image_id)s\nJSON: %(json)s", issue="invalid-request", image_id=image_id, json=request.data, ) if "id" not in data: abort( 400, "Missing key `id` in JSON for image: %(image_id)s", issue="invalid-request", image_id=image_id, ) if image_id != data["id"]: abort( 400, "JSON data contains invalid id for image: %(image_id)s", issue="invalid-request", image_id=image_id, ) logger.debug("Looking up repo image") location_pref = store.preferred_locations[0] username = get_authenticated_user() and get_authenticated_user().username layer = builder.start_layer( image_id, uploaded_metadata, location_pref, username, app.config["PUSH_TEMP_TAG_EXPIRATION_SEC"], ) if layer is None: abort( 400, "Image %(image_id)s has invalid metadata", issue="invalid-request", image_id=image_id, ) return make_response("true", 200)
def get(self, uuid): note = model.notification.lookup_notification(get_authenticated_user(), uuid) if not note: raise NotFound() return notification_view(note)
def get(self): """ Get user information for the authenticated user. """ user = get_authenticated_user() return user_view(user)
def user_view(user, previous_username=None): def org_view(o, user_admin=True): admin_org = AdministerOrganizationPermission(o.username) org_response = { "name": o.username, "avatar": avatar.get_data_for_org(o), "can_create_repo": CreateRepositoryPermission(o.username).can(), "public": o.username in app.config.get("PUBLIC_NAMESPACES", []), } if user_admin: org_response.update({ "is_org_admin": admin_org.can(), "preferred_namespace": not (o.stripe_id is None), }) return org_response # Retrieve the organizations for the user. organizations = { o.username: o for o in model.organization.get_user_organizations(user.username) } # Add any public namespaces. public_namespaces = app.config.get("PUBLIC_NAMESPACES", []) if public_namespaces: organizations.update({ ns: model.user.get_namespace_user(ns) for ns in public_namespaces }) def login_view(login): try: metadata = json.loads(login.metadata_json) except: metadata = {} return { "service": login.service.name, "service_identifier": login.service_ident, "metadata": metadata, } logins = model.user.list_federated_logins(user) user_response = { "anonymous": False, "username": user.username, "avatar": avatar.get_data_for_user(user), } user_admin = UserAdminPermission( previous_username if previous_username else user.username) if user_admin.can(): user_response.update({ "can_create_repo": True, "is_me": True, "verified": user.verified, "email": user.email, "logins": [login_view(login) for login in logins], "invoice_email": user.invoice_email, "invoice_email_address": user.invoice_email_address, "preferred_namespace": not (user.stripe_id is None), "tag_expiration_s": user.removed_tag_expiration_s, "prompts": model.user.get_user_prompts(user), "company": user.company, "family_name": user.family_name, "given_name": user.given_name, "location": user.location, "is_free_account": user.stripe_id is None, "has_password_set": authentication.has_password_set(user.username), }) user_view_perm = UserReadPermission(user.username) if user_view_perm.can(): user_response.update({ "organizations": [ org_view(o, user_admin=user_admin.can()) for o in list(organizations.values()) ], }) if features.SUPER_USERS and SuperUserPermission().can(): user_response.update({ "super_user": user and user == get_authenticated_user() and SuperUserPermission().can() }) return user_response
def generate_registry_jwt(auth_result): """ This endpoint will generate a JWT conforming to the Docker Registry v2 Auth Spec: https://docs.docker.com/registry/spec/auth/token/ """ audience_param = request.args.get("service") logger.debug("Request audience: %s", audience_param) scope_params = request.args.getlist("scope") or [] logger.debug("Scope request: %s", scope_params) auth_header = request.headers.get("authorization", "") auth_credentials_sent = bool(auth_header) # Load the auth context and verify thatg we've directly received credentials. has_valid_auth_context = False if get_authenticated_context(): has_valid_auth_context = not get_authenticated_context().is_anonymous if auth_credentials_sent and not has_valid_auth_context: # The auth credentials sent for the user are invalid. raise InvalidLogin(auth_result.error_message) if not has_valid_auth_context and len(scope_params) == 0: # In this case, we are doing an auth flow, and it's not an anonymous pull. logger.debug("No user and no token sent for empty scope list") raise Unauthorized() # Build the access list for the authenticated context. access = [] scope_results = [] for scope_param in scope_params: scope_result = _authorize_or_downscope_request(scope_param, has_valid_auth_context) if scope_result is None: continue scope_results.append(scope_result) access.append({ "type": "repository", "name": scope_result.registry_and_repo, "actions": scope_result.actions, }) # Issue user events. user_event_data = { "action": "login", } # Set the user event data for when authed. if len(scope_results) > 0: if "push" in scope_results[0].actions: user_action = "push_start" elif "pull" in scope_results[0].actions: user_action = "pull_start" else: user_action = "login" user_event_data = { "action": user_action, "namespace": scope_results[0].namespace, "repository": scope_results[0].repository, } # Send the user event. if get_authenticated_user() is not None: event = userevents.get_event(get_authenticated_user().username) event.publish_event_data("docker-cli", user_event_data) # Build the signed JWT. tuf_roots = { "%s/%s" % (scope_result.namespace, scope_result.repository): scope_result.tuf_root for scope_result in scope_results } context, subject = build_context_and_subject(get_authenticated_context(), tuf_roots=tuf_roots) token = generate_bearer_token(audience_param, subject, context, access, TOKEN_VALIDITY_LIFETIME_S, instance_keys) return jsonify({"token": token})
def delete(self, robot_shortname): """ Delete an existing robot. """ parent = get_authenticated_user() model.delete_robot(format_robot_username(parent.username, robot_shortname)) log_action("delete_robot", parent.username, {"robot": robot_shortname}) return "", 204