コード例 #1
0
ファイル: test_blobuploader.py プロジェクト: wjjmjh/quay
def test_basic_upload_blob(chunk_count, subchunk, registry_model):
    repository_ref = registry_model.lookup_repository("devtable", "complex")
    storage = DistributedStorage({"local_us": FakeStorage(None)}, ["local_us"])
    settings = BlobUploadSettings("2M", 3600)
    app_config = {"TESTING": True}

    data = b""
    with upload_blob(repository_ref, storage, settings) as manager:
        assert manager
        assert manager.blob_upload_id

        for index in range(0, chunk_count):
            chunk_data = os.urandom(100)
            data += chunk_data

            if subchunk:
                manager.upload_chunk(app_config, BytesIO(chunk_data))
                manager.upload_chunk(app_config, BytesIO(chunk_data),
                                     (index * 100) + 50)
            else:
                manager.upload_chunk(app_config, BytesIO(chunk_data))

        blob = manager.commit_to_blob(app_config)

    # Check the blob.
    assert blob.compressed_size == len(data)
    assert not blob.uploading
    assert blob.digest == "sha256:" + hashlib.sha256(data).hexdigest()

    # Ensure the blob exists in storage and has the expected data.
    assert storage.get_content(["local_us"], blob.storage_path) == data
コード例 #2
0
ファイル: test_blobuploader.py プロジェクト: wjjmjh/quay
def test_extra_blob_stream_handlers(registry_model):
    handler1_result = []
    handler2_result = []

    def handler1(bytes_data):
        handler1_result.append(bytes_data)

    def handler2(bytes_data):
        handler2_result.append(bytes_data)

    repository_ref = registry_model.lookup_repository("devtable", "complex")
    storage = DistributedStorage({"local_us": FakeStorage(None)}, ["local_us"])
    settings = BlobUploadSettings("1K", 3600)
    app_config = {"TESTING": True}

    with upload_blob(repository_ref,
                     storage,
                     settings,
                     extra_blob_stream_handlers=[handler1,
                                                 handler2]) as manager:
        manager.upload_chunk(app_config, BytesIO(b"hello "))
        manager.upload_chunk(app_config, BytesIO(b"world"))

    assert b"".join(handler1_result) == b"hello world"
    assert b"".join(handler2_result) == b"hello world"
コード例 #3
0
def _create_tag(repo, name):
    repo_ref = RepositoryReference.for_repo_obj(repo)

    with upload_blob(repo_ref, storage, BlobUploadSettings(500,
                                                           500)) as upload:
        app_config = {"TESTING": True}
        config_json = json.dumps({
            "config": {
                "author": "Repo Mirror",
            },
            "rootfs": {
                "type": "layers",
                "diff_ids": []
            },
            "history": [
                {
                    "created": "2019-07-30T18:37:09.284840891Z",
                    "created_by": "base",
                    "author": "Repo Mirror",
                },
            ],
        })
        upload.upload_chunk(app_config, BytesIO(config_json.encode("utf-8")))
        blob = upload.commit_to_blob(app_config)
    builder = DockerSchema2ManifestBuilder()
    builder.set_config_digest(blob.digest, blob.compressed_size)
    builder.add_layer("sha256:abcd", 1234, urls=["http://hello/world"])
    manifest = builder.build()

    manifest, tag = registry_model.create_manifest_and_retarget_tag(
        repo_ref, manifest, name, storage)
コード例 #4
0
ファイル: test_blobuploader.py プロジェクト: wjjmjh/quay
def test_too_large(registry_model):
    repository_ref = registry_model.lookup_repository("devtable", "complex")
    storage = DistributedStorage({"local_us": FakeStorage(None)}, ["local_us"])
    settings = BlobUploadSettings("1K", 3600)
    app_config = {"TESTING": True}

    with upload_blob(repository_ref, storage, settings) as manager:
        with pytest.raises(BlobTooLargeException):
            manager.upload_chunk(app_config,
                                 BytesIO(os.urandom(1024 * 1024 * 2)))
コード例 #5
0
def test_create_manifest_and_retarget_tag_with_labels_with_existing_manifest(
        oci_model):
    # Create a config blob for testing.
    config_json = json.dumps({
        "config": {
            "Labels": {
                "quay.expires-after": "2w",
            },
        },
        "rootfs": {
            "type": "layers",
            "diff_ids": []
        },
        "history": [
            {
                "created": "2018-04-03T18:37:09.284840891Z",
                "created_by": "do something",
            },
        ],
    })

    app_config = {"TESTING": True}
    repository_ref = oci_model.lookup_repository("devtable", "simple")
    with upload_blob(repository_ref, storage,
                     BlobUploadSettings(500, 500)) as upload:
        upload.upload_chunk(app_config, BytesIO(config_json.encode("utf-8")))
        blob = upload.commit_to_blob(app_config)

    # Create the manifest in the repo.
    builder = DockerSchema2ManifestBuilder()
    builder.set_config_digest(blob.digest, blob.compressed_size)
    builder.add_layer("sha256:abcd", 1234, urls=["http://hello/world"])
    manifest = builder.build()

    some_manifest, some_tag = oci_model.create_manifest_and_retarget_tag(
        repository_ref, manifest, "some_tag", storage)
    assert some_manifest is not None
    assert some_tag is not None
    assert some_tag.lifetime_end_ms is not None

    # Create tag and retarget it to an existing manifest; it should have an end date.
    # This is from a push, so it will attempt to create a manifest first.
    some_other_manifest, some_other_tag = oci_model.create_manifest_and_retarget_tag(
        repository_ref, manifest, "some_other_tag", storage)
    assert some_other_manifest is not None
    assert some_other_manifest == some_manifest
    assert some_other_tag is not None
    assert some_other_tag.lifetime_end_ms is not None

    # Create another tag and retarget it to an existing manifest; it should have an end date.
    # This is from a Quay's tag api, so it will not attempt to create a manifest first.
    yet_another_tag = oci_model.retarget_tag(repository_ref, "yet_another_tag",
                                             some_other_manifest, storage,
                                             docker_v2_signing_key)
    assert yet_another_tag.lifetime_end_ms is not None
コード例 #6
0
ファイル: test_blobuploader.py プロジェクト: wjjmjh/quay
def test_uncompressed_size(registry_model):
    repository_ref = registry_model.lookup_repository("devtable", "complex")
    storage = DistributedStorage({"local_us": FakeStorage(None)}, ["local_us"])
    settings = BlobUploadSettings("1K", 3600)
    app_config = {"TESTING": True}

    with upload_blob(repository_ref, storage, settings) as manager:
        manager.upload_chunk(app_config, BytesIO(valid_tar_gz(b"hello world")))

        blob = manager.commit_to_blob(app_config)

    assert blob.compressed_size is not None
    assert blob.uncompressed_size is not None
コード例 #7
0
ファイル: test_blobuploader.py プロジェクト: wjjmjh/quay
def test_cancel_upload(registry_model):
    repository_ref = registry_model.lookup_repository("devtable", "complex")
    storage = DistributedStorage({"local_us": FakeStorage(None)}, ["local_us"])
    settings = BlobUploadSettings("2M", 3600)
    app_config = {"TESTING": True}

    blob_upload_id = None
    with upload_blob(repository_ref, storage, settings) as manager:
        blob_upload_id = manager.blob_upload_id
        assert registry_model.lookup_blob_upload(repository_ref,
                                                 blob_upload_id) is not None

        manager.upload_chunk(app_config, BytesIO(b"hello world"))

    # Since the blob was not comitted, the upload should be deleted.
    assert blob_upload_id
    assert registry_model.lookup_blob_upload(repository_ref,
                                             blob_upload_id) is None
コード例 #8
0
ファイル: test_interface.py プロジェクト: xzwupeng/quay
def test_manifest_remote_layers(oci_model):
    # Create a config blob for testing.
    config_json = json.dumps({
        'config': {},
        "rootfs": {
            "type": "layers",
            "diff_ids": []
        },
        "history": [
            {
                "created": "2018-04-03T18:37:09.284840891Z",
                "created_by": "do something",
            },
        ],
    })

    app_config = {'TESTING': True}
    repository_ref = oci_model.lookup_repository('devtable', 'simple')
    with upload_blob(repository_ref, storage,
                     BlobUploadSettings(500, 500, 500)) as upload:
        upload.upload_chunk(app_config, BytesIO(config_json))
        blob = upload.commit_to_blob(app_config)

    # Create the manifest in the repo.
    builder = DockerSchema2ManifestBuilder()
    builder.set_config_digest(blob.digest, blob.compressed_size)
    builder.add_layer('sha256:abcd', 1234, urls=['http://hello/world'])
    manifest = builder.build()

    created_manifest, _ = oci_model.create_manifest_and_retarget_tag(
        repository_ref, manifest, 'sometag', storage)
    assert created_manifest

    layers = oci_model.list_parsed_manifest_layers(
        repository_ref, created_manifest.get_parsed_manifest(), storage)
    assert len(layers) == 1
    assert layers[0].layer_info.is_remote
    assert layers[0].layer_info.urls == ['http://hello/world']
    assert layers[0].blob is None
コード例 #9
0
ファイル: test_interface.py プロジェクト: xzwupeng/quay
def test_derived_image_for_manifest_list(oci_model):
    # Clear all existing derived storage.
    DerivedStorageForImage.delete().execute()

    # Create a config blob for testing.
    config_json = json.dumps({
        'config': {},
        "rootfs": {
            "type": "layers",
            "diff_ids": []
        },
        "history": [
            {
                "created": "2018-04-03T18:37:09.284840891Z",
                "created_by": "do something",
            },
        ],
    })

    app_config = {'TESTING': True}
    repository_ref = oci_model.lookup_repository('devtable', 'simple')
    with upload_blob(repository_ref, storage,
                     BlobUploadSettings(500, 500, 500)) as upload:
        upload.upload_chunk(app_config, BytesIO(config_json))
        blob = upload.commit_to_blob(app_config)

    # Create the manifest in the repo.
    builder = DockerSchema2ManifestBuilder()
    builder.set_config_digest(blob.digest, blob.compressed_size)
    builder.add_layer(blob.digest, blob.compressed_size)
    amd64_manifest = builder.build()

    oci_model.create_manifest_and_retarget_tag(repository_ref, amd64_manifest,
                                               'submanifest', storage)

    # Create a manifest list, pointing to at least one amd64+linux manifest.
    builder = DockerSchema2ManifestListBuilder()
    builder.add_manifest(amd64_manifest, 'amd64', 'linux')
    manifestlist = builder.build()

    oci_model.create_manifest_and_retarget_tag(repository_ref, manifestlist,
                                               'listtag', storage)
    manifest = oci_model.get_manifest_for_tag(
        oci_model.get_repo_tag(repository_ref, 'listtag'))
    assert manifest
    assert manifest.get_parsed_manifest().is_manifest_list

    # Ensure the squashed image doesn't exist.
    assert oci_model.lookup_derived_image(manifest, 'squash', storage,
                                          {}) is None

    # Create a new one.
    squashed = oci_model.lookup_or_create_derived_image(
        manifest, 'squash', 'local_us', storage, {})
    assert squashed.unique_id
    assert oci_model.lookup_or_create_derived_image(manifest, 'squash',
                                                    'local_us', storage,
                                                    {}) == squashed

    # Perform lookup.
    assert oci_model.lookup_derived_image(manifest, 'squash', storage,
                                          {}) == squashed
コード例 #10
0
def test_build_manifest(layers, fake_session, registry_model):
    repository_ref = registry_model.lookup_repository("devtable", "complex")
    storage = DistributedStorage({"local_us": FakeStorage(None)}, ["local_us"])
    settings = BlobUploadSettings("2M", 3600)
    app_config = {"TESTING": True}

    builder = create_manifest_builder(repository_ref, storage, docker_v2_signing_key)
    assert (
        lookup_manifest_builder(repository_ref, "anotherid", storage, docker_v2_signing_key) is None
    )
    assert (
        lookup_manifest_builder(repository_ref, builder.builder_id, storage, docker_v2_signing_key)
        is not None
    )

    blobs_by_layer = {}
    for layer_id, parent_id, layer_bytes in layers:
        # Start a new layer.
        assert builder.start_layer(
            layer_id, json.dumps({"id": layer_id, "parent": parent_id}), "local_us", None, 60
        )

        checksum = hashlib.sha1(layer_bytes).hexdigest()

        # Assign it a blob.
        with upload_blob(repository_ref, storage, settings) as uploader:
            uploader.upload_chunk(app_config, BytesIO(layer_bytes))
            blob = uploader.commit_to_blob(app_config)
            blobs_by_layer[layer_id] = blob
            builder.assign_layer_blob(builder.lookup_layer(layer_id), blob, [checksum])

        # Validate the checksum.
        assert builder.validate_layer_checksum(builder.lookup_layer(layer_id), checksum)

    # Commit the manifest to a tag.
    tag = builder.commit_tag_and_manifest("somenewtag", builder.lookup_layer(layers[-1][0]))
    assert tag
    assert tag in builder.committed_tags

    # Mark the builder as done.
    builder.done()

    # Verify the legacy image for the tag.
    found = registry_model.get_repo_tag(repository_ref, "somenewtag", include_legacy_image=True)
    assert found
    assert found.name == "somenewtag"
    assert found.legacy_image.docker_image_id == layers[-1][0]

    # Verify the blob and manifest.
    manifest = registry_model.get_manifest_for_tag(found)
    assert manifest

    parsed = manifest.get_parsed_manifest()
    assert len(list(parsed.layers)) == len(layers)

    for index, (layer_id, parent_id, layer_bytes) in enumerate(layers):
        assert list(parsed.blob_digests)[index] == blobs_by_layer[layer_id].digest
        assert list(parsed.layers)[index].v1_metadata.image_id == layer_id
        assert list(parsed.layers)[index].v1_metadata.parent_image_id == parent_id

    assert parsed.leaf_layer_v1_image_id == layers[-1][0]
コード例 #11
0
ファイル: registry.py プロジェクト: sabre1041/quay-1
def put_image_layer(namespace, repository, image_id):
    logger.debug("Checking repo permissions")
    permission = ModifyRepositoryPermission(namespace, repository)
    if not permission.can():
        abort(403)

    repository_ref = registry_model.lookup_repository(namespace,
                                                      repository,
                                                      kind_filter="image")
    if repository_ref is None:
        abort(403)

    logger.debug("Checking for image in manifest builder")
    builder = lookup_manifest_builder(repository_ref,
                                      session.get("manifest_builder"), store,
                                      docker_v2_signing_key)
    if builder is None:
        abort(400)

    layer = builder.lookup_layer(image_id)
    if layer is None:
        abort(404)

    logger.debug("Storing layer data")
    input_stream = request.stream
    if request.headers.get("transfer-encoding") == "chunked":
        # Careful, might work only with WSGI servers supporting chunked
        # encoding (Gunicorn)
        input_stream = request.environ["wsgi.input"]

    expiration_sec = app.config["PUSH_TEMP_TAG_EXPIRATION_SEC"]
    settings = BlobUploadSettings(
        maximum_blob_size=app.config["MAXIMUM_LAYER_SIZE"],
        committed_blob_expiration=expiration_sec,
    )

    extra_handlers = []

    # Add a handler that copies the data into a temp file. This is used to calculate the tarsum,
    # which is only needed for older versions of Docker.
    requires_tarsum = bool(builder.get_layer_checksums(layer))
    if requires_tarsum:
        tmp, tmp_hndlr = store.temp_store_handler()
        extra_handlers.append(tmp_hndlr)

    # Add a handler which computes the simple Docker V1 checksum.
    h, sum_hndlr = checksums.simple_checksum_handler(layer.v1_metadata_string)
    extra_handlers.append(sum_hndlr)

    uploaded_blob = None
    try:
        with upload_blob(repository_ref,
                         store,
                         settings,
                         extra_blob_stream_handlers=extra_handlers) as manager:
            manager.upload_chunk(app.config, input_stream)
            uploaded_blob = manager.commit_to_blob(app.config)
    except BlobUploadException:
        logger.exception("Exception when writing image data")
        abort(520,
              "Image %(image_id)s could not be written. Please try again.",
              image_id=image_id)

    # Compute the final checksum
    csums = []
    csums.append("sha256:{0}".format(h.hexdigest()))

    try:
        if requires_tarsum:
            tmp.seek(0)
            csums.append(
                checksums.compute_tarsum(tmp, layer.v1_metadata_string))
            tmp.close()
    except (IOError, checksums.TarError) as exc:
        logger.debug("put_image_layer: Error when computing tarsum %s", exc)

    # If there was already a precomputed checksum, validate against it now.
    if builder.get_layer_checksums(layer):
        checksum = builder.get_layer_checksums(layer)[0]
        if not builder.validate_layer_checksum(layer, checksum):
            logger.debug(
                "put_image_checksum: Wrong checksum. Given: %s and expected: %s",
                checksum,
                builder.get_layer_checksums(layer),
            )
            abort(
                400,
                "Checksum mismatch for image: %(image_id)s",
                issue="checksum-mismatch",
                image_id=image_id,
            )

    # Assign the blob to the layer in the manifest.
    if not builder.assign_layer_blob(layer, uploaded_blob, csums):
        abort(500, "Something went wrong")

    # Send a job to the work queue to replicate the image layer.
    # TODO: move this into a better place.
    queue_storage_replication(namespace, uploaded_blob)

    return make_response("true", 200)
コード例 #12
0
def test_derived_image_for_manifest_list(manifest_builder, list_builder,
                                         oci_model):
    # Clear all existing derived storage.
    DerivedStorageForImage.delete().execute()

    # Create a config blob for testing.
    config_json = json.dumps({
        "config": {},
        "architecture":
        "amd64",
        "os":
        "linux",
        "rootfs": {
            "type": "layers",
            "diff_ids": []
        },
        "history": [
            {
                "created": "2018-04-03T18:37:09.284840891Z",
                "created_by": "do something",
            },
        ],
    })

    app_config = {"TESTING": True}
    repository_ref = oci_model.lookup_repository("devtable", "simple")
    with upload_blob(repository_ref, storage,
                     BlobUploadSettings(500, 500)) as upload:
        upload.upload_chunk(app_config, BytesIO(config_json))
        blob = upload.commit_to_blob(app_config)

    # Create the manifest in the repo.
    builder = manifest_builder()
    builder.set_config_digest(blob.digest, blob.compressed_size)
    builder.add_layer(blob.digest, blob.compressed_size)
    amd64_manifest = builder.build()

    oci_model.create_manifest_and_retarget_tag(repository_ref,
                                               amd64_manifest,
                                               "submanifest",
                                               storage,
                                               raise_on_error=True)

    # Create a manifest list, pointing to at least one amd64+linux manifest.
    builder = list_builder()
    builder.add_manifest(amd64_manifest, "amd64", "linux")
    manifestlist = builder.build()

    oci_model.create_manifest_and_retarget_tag(repository_ref,
                                               manifestlist,
                                               "listtag",
                                               storage,
                                               raise_on_error=True)

    manifest = oci_model.get_manifest_for_tag(
        oci_model.get_repo_tag(repository_ref, "listtag"))
    assert manifest
    assert manifest.get_parsed_manifest().is_manifest_list

    # Ensure the squashed image doesn't exist.
    assert oci_model.lookup_derived_image(manifest, "squash", storage,
                                          {}) is None

    # Create a new one.
    squashed = oci_model.lookup_or_create_derived_image(
        manifest, "squash", "local_us", storage, {})
    assert squashed.unique_id
    assert (oci_model.lookup_or_create_derived_image(manifest, "squash",
                                                     "local_us", storage,
                                                     {}) == squashed)

    # Perform lookup.
    assert oci_model.lookup_derived_image(manifest, "squash", storage,
                                          {}) == squashed