Пример #1
0
Файл: web.py Проект: ynnt/quay
def robots():
    robots_txt = make_response(
        render_template("robots.txt", baseurl=get_app_url()))
    robots_txt.headers["Content-Type"] = "text/plain"
    return robots_txt
Пример #2
0
def _try_to_mount_blob(repository_ref, mount_blob_digest):
    """ Attempts to mount a blob requested by the user from another repository. """
    logger.debug('Got mount request for blob `%s` into `%s`',
                 mount_blob_digest, repository_ref)
    from_repo = request.args.get('from', None)
    if from_repo is None:
        raise InvalidRequest(message='Missing `from` repository argument')

    # Ensure the user has access to the repository.
    logger.debug(
        'Got mount request for blob `%s` under repository `%s` into `%s`',
        mount_blob_digest, from_repo, repository_ref)
    from_namespace, from_repo_name = parse_namespace_repository(
        from_repo, app.config['LIBRARY_NAMESPACE'], include_tag=False)

    from_repository_ref = registry_model.lookup_repository(
        from_namespace, from_repo_name)
    if from_repository_ref is None:
        logger.debug('Could not find from repo: `%s/%s`', from_namespace,
                     from_repo_name)
        return None

    # First check permission.
    read_permission = ReadRepositoryPermission(from_namespace,
                                               from_repo_name).can()
    if not read_permission:
        # If no direct permission, check if the repostory is public.
        if not from_repository_ref.is_public:
            logger.debug(
                'No permission to mount blob `%s` under repository `%s` into `%s`',
                mount_blob_digest, from_repo, repository_ref)
            return None

    # Lookup if the mount blob's digest exists in the repository.
    mount_blob = registry_model.get_cached_repo_blob(model_cache,
                                                     from_namespace,
                                                     from_repo_name,
                                                     mount_blob_digest)
    if mount_blob is None:
        logger.debug('Blob `%s` under repository `%s` not found',
                     mount_blob_digest, from_repo)
        return None

    logger.debug('Mounting blob `%s` under repository `%s` into `%s`',
                 mount_blob_digest, from_repo, repository_ref)

    # Mount the blob into the current repository and return that we've completed the operation.
    expiration_sec = app.config['PUSH_TEMP_TAG_EXPIRATION_SEC']
    mounted = registry_model.mount_blob_into_repository(
        mount_blob, repository_ref, expiration_sec)
    if not mounted:
        logger.debug(
            'Could not mount blob `%s` under repository `%s` not found',
            mount_blob_digest, from_repo)
        return

    # Return the response for the blob indicating that it was mounted, and including its content
    # digest.
    logger.debug('Mounted blob `%s` under repository `%s` into `%s`',
                 mount_blob_digest, from_repo, repository_ref)

    namespace_name = repository_ref.namespace_name
    repo_name = repository_ref.name

    return Response(
        status=201,
        headers={
            'Docker-Content-Digest':
            mount_blob_digest,
            'Location':
            get_app_url() + url_for('v2.download_blob',
                                    repository='%s/%s' %
                                    (namespace_name, repo_name),
                                    digest=mount_blob_digest),
        },
    )
Пример #3
0
def _current_request_url():
    return '{0}{1}{2}'.format(get_app_url(), request.script_root, request.path)
Пример #4
0
    def _process_queue_item(self, job_details, storage):
        logger.info("Got export actions logs queue item: %s", job_details)

        # job_details block (as defined in the logs.py API endpoint):
        # {
        #  'export_id': export_id,
        #  'repository_id': repository.id or None,
        #  'namespace_id': namespace.id,
        #  'namespace_name': namespace.username,
        #  'repository_name': repository.name or None,
        #  'start_time': start_time,
        #  'end_time': end_time,
        #  'callback_url': callback_url or None,
        #  'callback_email': callback_email or None,
        # }
        export_id = job_details["export_id"]

        start_time = _parse_time(job_details["start_time"])
        end_time = _parse_time(job_details["end_time"])

        # Make sure the end time has the whole day.
        if start_time is None or end_time is None:
            self._report_results(job_details, ExportResult.INVALID_REQUEST)
            return

        end_time = end_time + timedelta(days=1) - timedelta(milliseconds=1)

        # Select the minimum and maximum IDs for the logs for the repository/namespace
        # over the time range.
        namespace_id = job_details["namespace_id"]
        repository_id = job_details["repository_id"]
        max_query_time = timedelta(seconds=MAXIMUM_QUERY_TIME_SECONDS)

        # Generate a file key so that if we return an API URL, it cannot simply be constructed from
        # just the export ID.
        file_key = str(uuid.uuid4())
        exported_filename = "%s-%s" % (export_id, file_key)

        # Start a chunked upload for the logs and stream them.
        upload_id, upload_metadata = storage.initiate_chunked_upload(
            storage.preferred_locations)
        export_storage_path = os.path.join(EXPORT_LOGS_STORAGE_PATH,
                                           exported_filename)
        logger.debug("Starting chunked upload to path `%s`",
                     export_storage_path)

        # Start with a 'json' header that contains the opening bracket, as well as basic
        # information and the start of the `logs` array.
        details = {
            "start_time": format_date(start_time),
            "end_time": format_date(end_time),
            "namespace": job_details["namespace_name"],
            "repository": job_details["repository_name"],
        }

        prefix_data = """{
      "export_id": "%s",
      "details": %s,
      "logs": [
    """ % (
            export_id,
            json.dumps(details),
        )

        _, new_metadata, upload_error = storage.stream_upload_chunk(
            storage.preferred_locations,
            upload_id,
            0,
            -1,
            BytesIO(str(prefix_data)),
            upload_metadata,
        )
        uploaded_byte_count = len(prefix_data)
        if upload_error is not None:
            logger.error("Got an error when writing chunk for `%s`: %s",
                         export_id, upload_error)
            storage.cancel_chunked_upload(storage.preferred_locations,
                                          upload_id, upload_metadata)
            self._report_results(job_details, ExportResult.FAILED_EXPORT)
            raise IOError(upload_error)

        upload_metadata = new_metadata
        logs_iterator = logs_model.yield_logs_for_export(
            start_time, end_time, repository_id, namespace_id, max_query_time)

        try:
            # Stream the logs to storage as chunks.
            new_metadata, uploaded_byte_count = self._stream_logs(
                upload_id, upload_metadata, uploaded_byte_count, logs_iterator,
                job_details, storage)
            if uploaded_byte_count is None:
                logger.error("Failed to upload streamed logs for `%s`",
                             export_id)
                storage.cancel_chunked_upload(storage.preferred_locations,
                                              upload_id, upload_metadata)
                self._report_results(job_details, ExportResult.FAILED_EXPORT)
                raise IOError("Export failed to upload")

            upload_metadata = new_metadata

            # Close the JSON block.
            suffix_data = """
        {"terminator": true}]
      }"""

            _, new_metadata, upload_error = storage.stream_upload_chunk(
                storage.preferred_locations,
                upload_id,
                0,
                -1,
                BytesIO(str(suffix_data)),
                upload_metadata,
            )
            if upload_error is not None:
                logger.error("Got an error when writing chunk for `%s`: %s",
                             export_id, upload_error)
                storage.cancel_chunked_upload(storage.preferred_locations,
                                              upload_id, upload_metadata)
                self._report_results(job_details, ExportResult.FAILED_EXPORT)
                raise IOError(upload_error)

            # Complete the upload.
            upload_metadata = new_metadata
            storage.complete_chunked_upload(storage.preferred_locations,
                                            upload_id, export_storage_path,
                                            upload_metadata)
        except:
            logger.exception("Exception when exporting logs for `%s`",
                             export_id)
            storage.cancel_chunked_upload(storage.preferred_locations,
                                          upload_id, upload_metadata)
            self._report_results(job_details, ExportResult.FAILED_EXPORT)
            raise

        # Invoke the callbacks.
        export_url = storage.get_direct_download_url(
            storage.preferred_locations,
            export_storage_path,
            expires_in=EXPORTED_LOGS_EXPIRATION_SECONDS,
        )
        if export_url is None:
            export_url = "%s/exportedlogs/%s" % (get_app_url(),
                                                 exported_filename)

        self._report_results(job_details, ExportResult.SUCCESSFUL_EXPORT,
                             export_url)
Пример #5
0
def start_blob_upload(namespace_name, repo_name):
    repository_ref = registry_model.lookup_repository(namespace_name,
                                                      repo_name)
    if repository_ref is None:
        raise NameUnknown()

    # Check for mounting of a blob from another repository.
    mount_blob_digest = request.args.get("mount", None)
    if mount_blob_digest is not None:
        response = _try_to_mount_blob(repository_ref, mount_blob_digest)
        if response is not None:
            return response

    # Begin the blob upload process.
    blob_uploader = create_blob_upload(repository_ref, storage,
                                       _upload_settings())
    if blob_uploader is None:
        logger.debug("Could not create a blob upload for `%s/%s`",
                     namespace_name, repo_name)
        raise InvalidRequest(
            message="Unable to start blob upload for unknown repository")

    # Check if the blob will be uploaded now or in followup calls. If the `digest` is given, then
    # the upload will occur as a monolithic chunk in this call. Otherwise, we return a redirect
    # for the client to upload the chunks as distinct operations.
    digest = request.args.get("digest", None)
    if digest is None:
        # Short-circuit because the user will send the blob data in another request.
        return Response(
            status=202,
            headers={
                "Docker-Upload-UUID":
                blob_uploader.blob_upload_id,
                "Range":
                _render_range(0),
                "Location":
                get_app_url() + url_for(
                    "v2.upload_chunk",
                    repository="%s/%s" % (namespace_name, repo_name),
                    upload_uuid=blob_uploader.blob_upload_id,
                ),
            },
        )

    # Upload the data sent and commit it to a blob.
    with complete_when_uploaded(blob_uploader):
        _upload_chunk(blob_uploader, digest)

    # Write the response to the client.
    return Response(
        status=201,
        headers={
            "Docker-Content-Digest":
            digest,
            "Location":
            get_app_url() + url_for("v2.download_blob",
                                    repository="%s/%s" %
                                    (namespace_name, repo_name),
                                    digest=digest),
        },
    )
Пример #6
0
def robots():
    robots_txt = make_response(
        render_template('robots.txt', baseurl=get_app_url()))
    robots_txt.headers['Content-Type'] = 'text/plain'
    return robots_txt
Пример #7
0
def icon_path(icon_name):
    return '%s/static/img/icons/%s.png' % (get_app_url(), icon_name)
Пример #8
0
def _try_to_mount_blob(repository_ref, mount_blob_digest):
    """
    Attempts to mount a blob requested by the user from another repository.
    """
    logger.debug("Got mount request for blob `%s` into `%s`", mount_blob_digest, repository_ref)
    from_repo = request.args.get("from", None)
    if from_repo is None:
        # If we cannot mount the blob, fall back to the standard upload behavior,
        # since we don't support automatic mount origin discovery across all repos.
        return None

    # Ensure the user has access to the repository.
    logger.debug(
        "Got mount request for blob `%s` under repository `%s` into `%s`",
        mount_blob_digest,
        from_repo,
        repository_ref,
    )
    from_namespace, from_repo_name = parse_namespace_repository(
        from_repo, app.config["LIBRARY_NAMESPACE"], include_tag=False
    )

    from_repository_ref = registry_model.lookup_repository(from_namespace, from_repo_name)
    if from_repository_ref is None:
        logger.debug("Could not find from repo: `%s/%s`", from_namespace, from_repo_name)
        return None

    # First check permission.
    read_permission = ReadRepositoryPermission(from_namespace, from_repo_name).can()
    if not read_permission:
        # If no direct permission, check if the repostory is public.
        if not from_repository_ref.is_public:
            logger.debug(
                "No permission to mount blob `%s` under repository `%s` into `%s`",
                mount_blob_digest,
                from_repo,
                repository_ref,
            )
            return None

    # Lookup if the mount blob's digest exists in the repository.
    mount_blob = registry_model.get_cached_repo_blob(
        model_cache, from_namespace, from_repo_name, mount_blob_digest
    )
    if mount_blob is None:
        logger.debug("Blob `%s` under repository `%s` not found", mount_blob_digest, from_repo)
        return None

    logger.debug(
        "Mounting blob `%s` under repository `%s` into `%s`",
        mount_blob_digest,
        from_repo,
        repository_ref,
    )

    # Mount the blob into the current repository and return that we've completed the operation.
    expiration_sec = app.config["PUSH_TEMP_TAG_EXPIRATION_SEC"]
    mounted = registry_model.mount_blob_into_repository(mount_blob, repository_ref, expiration_sec)
    if not mounted:
        logger.debug(
            "Could not mount blob `%s` under repository `%s` not found",
            mount_blob_digest,
            from_repo,
        )
        return

    # Return the response for the blob indicating that it was mounted, and including its content
    # digest.
    logger.debug(
        "Mounted blob `%s` under repository `%s` into `%s`",
        mount_blob_digest,
        from_repo,
        repository_ref,
    )

    namespace_name = repository_ref.namespace_name
    repo_name = repository_ref.name

    return Response(
        status=201,
        headers={
            "Docker-Content-Digest": mount_blob_digest,
            "Location": get_app_url()
            + url_for(
                "v2.download_blob",
                repository="%s/%s" % (namespace_name, repo_name),
                digest=mount_blob_digest,
            ),
        },
    )