def test_privileged_build(caplog, source_params): if MOCK: mock_docker() image_name = ImageName(repo=TEST_IMAGE) remote_image = image_name.copy() remote_image.registry = LOCALHOST_REGISTRY m = PrivilegedBuildManager( "buildroot-fedora", { "source": source_params, "image": remote_image.to_str(), "parent_registry": LOCALHOST_REGISTRY, # faster "target_registries_insecure": True, "parent_registry_insecure": True, }, ) results = m.build() dt = DockerTasker() dt.pull_image(remote_image, insecure=True) if source_params["provider"] == "path": assert_source_from_path_mounted_ok(caplog, m.temp_dir) assert len(results.build_logs) > 0 # assert isinstance(results.built_img_inspect, dict) # assert len(results.built_img_inspect.items()) > 0 # assert isinstance(results.built_img_info, dict) # assert len(results.built_img_info.items()) > 0 # assert isinstance(results.base_img_info, dict) # assert len(results.base_img_info.items()) > 0 # assert len(results.base_plugins_output) > 0 # assert len(results.built_img_plugins_output) > 0 dt.remove_container(results.container_id) dt.remove_image(remote_image)
def test_pull_base_image_plugin(df_base, parent_registry, expected_w_reg, expected_wo_reg): if MOCK: mock_docker(remember_images=True) tasker = DockerTasker() workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image') workflow.builder = MockBuilder() workflow.builder.base_image = ImageName.parse(df_base) assert not tasker.image_exists(BASE_IMAGE) assert not tasker.image_exists(BASE_IMAGE_W_REGISTRY) runner = PreBuildPluginsRunner( tasker, workflow, [{ 'name': PullBaseImagePlugin.key, 'args': {'parent_registry': parent_registry, 'parent_registry_insecure': True} }] ) runner.run() assert tasker.image_exists(BASE_IMAGE) == expected_wo_reg assert tasker.image_exists(BASE_IMAGE_W_REGISTRY) == expected_w_reg try: tasker.remove_image(BASE_IMAGE) tasker.remove_image(BASE_IMAGE_W_REGISTRY) except: pass
def test_hostdocker_build(caplog, source_params): if MOCK: mock_docker() image_name = ImageName(repo="atomic-reactor-test-ssh-image") remote_image = image_name.copy() remote_image.registry = LOCALHOST_REGISTRY m = DockerhostBuildManager("buildroot-dh-fedora", { "source": source_params, "image": remote_image.to_str(), "parent_registry": LOCALHOST_REGISTRY, # faster "target_registries_insecure": True, "parent_registry_insecure": True, }) results = m.build() dt = DockerTasker() dt.pull_image(remote_image, insecure=True) if source_params['provider'] == 'path': assert_source_from_path_mounted_ok(caplog, m.temp_dir) assert len(results.build_logs) > 0 #assert re.search(r'build json mounted in container .+"uri": %s' % # os.path.join(dconstants.CONTAINER_SHARE_PATH, 'source')) # assert isinstance(results.built_img_inspect, dict) # assert len(results.built_img_inspect.items()) > 0 # assert isinstance(results.built_img_info, dict) # assert len(results.built_img_info.items()) > 0 # assert isinstance(results.base_img_info, dict) # assert len(results.base_img_info.items()) > 0 # assert len(results.base_plugins_output) > 0 # assert len(results.built_img_plugins_output) > 0 dt.remove_container(results.container_id) dt.remove_image(remote_image)
def test_pull_base_image_plugin(df_base, parent_registry, expected_w_reg, expected_wo_reg): if MOCK: mock_docker(remember_images=True) tasker = DockerTasker() workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image') workflow.builder = MockBuilder() workflow.builder.base_image = ImageName.parse(df_base) assert not tasker.image_exists(BASE_IMAGE) assert not tasker.image_exists(BASE_IMAGE_W_REGISTRY) runner = PreBuildPluginsRunner(tasker, workflow, [{ 'name': PullBaseImagePlugin.key, 'args': { 'parent_registry': parent_registry, 'parent_registry_insecure': True } }]) runner.run() assert tasker.image_exists(BASE_IMAGE) == expected_wo_reg assert tasker.image_exists(BASE_IMAGE_W_REGISTRY) == expected_w_reg try: tasker.remove_image(BASE_IMAGE) tasker.remove_image(BASE_IMAGE_W_REGISTRY) except: pass
def test_privileged_build(caplog, source_params): if MOCK: mock_docker() image_name = ImageName(repo=TEST_IMAGE) remote_image = image_name.copy() remote_image.registry = LOCALHOST_REGISTRY m = PrivilegedBuildManager( "buildroot-fedora", { "source": source_params, "image": remote_image.to_str(), "parent_registry": LOCALHOST_REGISTRY, # faster "target_registries_insecure": True, "parent_registry_insecure": True, }) results = m.build() dt = DockerTasker() dt.pull_image(remote_image, insecure=True) if source_params['provider'] == 'path': assert_source_from_path_mounted_ok(caplog, m.temp_dir) assert len(results.build_logs) > 0 # assert isinstance(results.built_img_inspect, dict) # assert len(results.built_img_inspect.items()) > 0 # assert isinstance(results.built_img_info, dict) # assert len(results.built_img_info.items()) > 0 # assert isinstance(results.base_img_info, dict) # assert len(results.base_img_info.items()) > 0 # assert len(results.base_plugins_output) > 0 # assert len(results.built_img_plugins_output) > 0 dt.remove_container(results.container_id) dt.remove_image(remote_image)
def test_tag_by_labels_plugin(tmpdir, args): if MOCK: mock_docker() tasker = DockerTasker() workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, "test-image") version = "1.0" release = "1" workflow.built_image_inspect = { INSPECT_CONFIG: { "Labels": { "name": TEST_IMAGE, "version": version, "release": release } } } workflow.push_conf.add_docker_registry(LOCALHOST_REGISTRY, insecure=True) image = ImageName(repo=TEST_IMAGE, tag="%s_%s" % (version, release), registry=LOCALHOST_REGISTRY) setattr(workflow, 'builder', X) runner = PostBuildPluginsRunner( tasker, workflow, [{ 'name': TagByLabelsPlugin.key, 'args': args, }] ) output = runner.run() assert TagByLabelsPlugin.key in output.keys() images = [i.to_str() for i in workflow.tag_conf.images] primary_images = [i.to_str() for i in workflow.tag_conf.primary_images] unique_images = [i.to_str() for i in workflow.tag_conf.unique_images] if args.get('unique_tag_only'): assert len(workflow.tag_conf.images) == 1 assert len(primary_images) == 0 else: assert len(workflow.tag_conf.images) == 4 assert len(primary_images) == 3 assert ("%s:%s-%s" % (TEST_IMAGE, version, release)) in images assert ("%s:%s" % (TEST_IMAGE, version)) in images assert ("%s:latest" % (TEST_IMAGE, )) in images assert ("%s:%s-%s" % (TEST_IMAGE, version, release)) in primary_images assert ("%s:%s" % (TEST_IMAGE, version)) in primary_images assert ("%s:latest" % (TEST_IMAGE, )) in primary_images assert len(unique_images) == 1 assert ("%s:%s" % (TEST_IMAGE, "unique_tag_123")) in images assert ("%s:%s" % (TEST_IMAGE, "unique_tag_123")) in unique_images tasker.remove_image(image)
def test_tag_and_push_plugin(tmpdir, image_name, logs, should_raise, use_secret): if MOCK: mock_docker() flexmock(docker.Client, push=lambda iid, **kwargs: iter(logs), login=lambda username, registry, dockercfg_path: {'Status': 'Login Succeeded'}) tasker = DockerTasker() workflow = DockerBuildWorkflow({ "provider": "git", "uri": "asd" }, TEST_IMAGE) workflow.tag_conf.add_primary_image(image_name) setattr(workflow, 'builder', X) secret_path = None if use_secret: temp_dir = mkdtemp() with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig: dockerconfig_contents = { LOCALHOST_REGISTRY: { "username": "******", "email": "*****@*****.**", "password": "******" } } dockerconfig.write(json.dumps(dockerconfig_contents)) dockerconfig.flush() secret_path = temp_dir runner = PostBuildPluginsRunner(tasker, workflow, [{ 'name': TagAndPushPlugin.key, 'args': { 'registries': { LOCALHOST_REGISTRY: { 'insecure': True, 'secret': secret_path } } }, }]) if should_raise: with pytest.raises(Exception): runner.run() else: output = runner.run() image = output[TagAndPushPlugin.key][0] tasker.remove_image(image) assert len(workflow.push_conf.docker_registries) > 0 if MOCK: # we only test this when mocking docker because we don't expect # running actual docker against v2 registry assert workflow.push_conf.docker_registries[0].digests[ image_name] == DIGEST1
def test_tag_by_labels_plugin(tmpdir, args): if MOCK: mock_docker() tasker = DockerTasker() workflow = DockerBuildWorkflow({ "provider": "git", "uri": "asd" }, "test-image") version = "1.0" release = "1" workflow.built_image_inspect = { INSPECT_CONFIG: { "Labels": { "name": TEST_IMAGE, "version": version, "release": release } } } workflow.push_conf.add_docker_registry(LOCALHOST_REGISTRY, insecure=True) image = ImageName(repo=TEST_IMAGE, tag="%s_%s" % (version, release), registry=LOCALHOST_REGISTRY) setattr(workflow, 'builder', X) runner = PostBuildPluginsRunner(tasker, workflow, [{ 'name': TagByLabelsPlugin.key, 'args': args, }]) output = runner.run() assert TagByLabelsPlugin.key in output.keys() images = [i.to_str() for i in workflow.tag_conf.images] primary_images = [i.to_str() for i in workflow.tag_conf.primary_images] unique_images = [i.to_str() for i in workflow.tag_conf.unique_images] if args.get('unique_tag_only'): assert len(workflow.tag_conf.images) == 1 assert len(primary_images) == 0 else: assert len(workflow.tag_conf.images) == 4 assert len(primary_images) == 3 assert ("%s:%s-%s" % (TEST_IMAGE, version, release)) in images assert ("%s:%s" % (TEST_IMAGE, version)) in images assert ("%s:latest" % (TEST_IMAGE, )) in images assert ("%s:%s-%s" % (TEST_IMAGE, version, release)) in primary_images assert ("%s:%s" % (TEST_IMAGE, version)) in primary_images assert ("%s:latest" % (TEST_IMAGE, )) in primary_images assert len(unique_images) == 1 assert ("%s:%s" % (TEST_IMAGE, "unique_tag_123")) in images assert ("%s:%s" % (TEST_IMAGE, "unique_tag_123")) in unique_images tasker.remove_image(image)
def test_build_image_from_git(temp_image_name): if MOCK: mock_docker() t = DockerTasker() response = t.build_image_from_git(DOCKERFILE_GIT, temp_image_name, use_cache=True) list(response) assert response is not None assert t.image_exists(temp_image_name) t.remove_image(temp_image_name)
def test_build_image_from_git(temp_image_name): if MOCK: mock_docker() t = DockerTasker() response = t.build_image_from_git(DOCKERFILE_GIT, temp_image_name, use_cache=True) list(response) assert response is not None assert t.image_exists(temp_image_name) t.remove_image(temp_image_name)
def test_push_image(temp_image_name): if MOCK: mock_docker() t = DockerTasker() temp_image_name.registry = LOCALHOST_REGISTRY temp_image_name.tag = "1" t.tag_image(INPUT_IMAGE, temp_image_name) output = t.push_image(temp_image_name, insecure=True) assert output is not None t.remove_image(temp_image_name)
def test_image_creation_local_repo(): if MOCK: mock_docker() b = BuildImageBuilder(reactor_local_path=PARENT_DIR) df_dir_path = os.path.join(PARENT_DIR, 'images', 'privileged-builder') b.create_image(df_dir_path, TEST_BUILD_IMAGE) dt = DockerTasker() assert dt.image_exists(TEST_BUILD_IMAGE) dt.remove_image(TEST_BUILD_IMAGE)
def test_tag_and_push(temp_image_name): if MOCK: mock_docker() t = DockerTasker() temp_image_name.registry = LOCALHOST_REGISTRY temp_image_name.tag = "1" output = t.tag_and_push_image(INPUT_IMAGE, temp_image_name, insecure=True) assert output is not None assert t.image_exists(temp_image_name) t.remove_image(temp_image_name)
def test_image_creation_local_repo(): if MOCK: mock_docker() b = BuildImageBuilder(reactor_local_path=PARENT_DIR) df_dir_path = os.path.join(PARENT_DIR, 'images', 'privileged-builder') b.create_image(df_dir_path, TEST_BUILD_IMAGE) dt = DockerTasker() assert dt.image_exists(TEST_BUILD_IMAGE) dt.remove_image(TEST_BUILD_IMAGE)
def test_tag_and_push(temp_image_name): # noqa if MOCK: mock_docker() t = DockerTasker() temp_image_name.registry = LOCALHOST_REGISTRY temp_image_name.tag = "1" output = t.tag_and_push_image(INPUT_IMAGE, temp_image_name, insecure=True) assert output is not None assert t.image_exists(temp_image_name) t.remove_image(temp_image_name)
def test_tag_and_push_plugin(tmpdir, image_name, logs, should_raise, use_secret): if MOCK: mock_docker() flexmock(docker.Client, push=lambda iid, **kwargs: iter(logs), login=lambda username, registry, dockercfg_path: {'Status': 'Login Succeeded'}) tasker = DockerTasker() workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, TEST_IMAGE) workflow.tag_conf.add_primary_image(image_name) setattr(workflow, 'builder', X) secret_path = None if use_secret: temp_dir = mkdtemp() with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig: dockerconfig_contents = { LOCALHOST_REGISTRY: { "username": "******", "email": "*****@*****.**", "password": "******"}} dockerconfig.write(json.dumps(dockerconfig_contents)) dockerconfig.flush() secret_path = temp_dir runner = PostBuildPluginsRunner( tasker, workflow, [{ 'name': TagAndPushPlugin.key, 'args': { 'registries': { LOCALHOST_REGISTRY: { 'insecure': True, 'secret': secret_path } } }, }] ) if should_raise: with pytest.raises(Exception): runner.run() else: output = runner.run() image = output[TagAndPushPlugin.key][0] tasker.remove_image(image) assert len(workflow.push_conf.docker_registries) > 0 if MOCK: # we only test this when mocking docker because we don't expect # running actual docker against v2 registry assert workflow.push_conf.docker_registries[0].digests[image_name] == DIGEST1
def test_remove_image(temp_image_name): if MOCK: mock_docker(inspect_should_fail=True) t = DockerTasker() container_id = t.run(input_image_name, command="id") t.wait(container_id) image_id = t.commit_container(container_id, image=temp_image_name) try: t.remove_container(container_id) finally: t.remove_image(image_id) assert not t.image_exists(temp_image_name)
def test_build_image(tmpdir, source_params): provided_image = "test-build:test_tag" if MOCK: mock_docker(provided_image_repotags=provided_image) source_params.update({'tmpdir': str(tmpdir)}) s = get_source_instance_for(source_params) t = DockerTasker() b = InsideBuilder(s, provided_image) build_result = b.build() assert t.inspect_image(build_result.image_id) # clean t.remove_image(build_result.image_id)
def test_tag_image(temp_image_name): # noqa if MOCK: mock_docker() t = DockerTasker() temp_image_name.registry = "somewhere.example.com" temp_image_name.tag = "1" img = t.tag_image(INPUT_IMAGE, temp_image_name) try: assert t.image_exists(temp_image_name) assert img == temp_image_name.to_str() finally: t.remove_image(temp_image_name)
def test_build_image(tmpdir, source_params): provided_image = "test-build:test_tag" if MOCK: mock_docker(provided_image_repotags=provided_image) source_params.update({'tmpdir': str(tmpdir)}) s = get_source_instance_for(source_params) t = DockerTasker() b = InsideBuilder(s, provided_image) build_result = b.build() assert t.inspect_image(build_result.image_id) # clean t.remove_image(build_result.image_id)
def test_commit_container(temp_image_name): if MOCK: mock_docker() t = DockerTasker() container_id = t.run(INPUT_IMAGE, command="id") t.wait(container_id) image_id = t.commit_container(container_id, message="test message", image=temp_image_name) try: assert t.image_exists(image_id) finally: t.remove_container(container_id) t.remove_image(image_id)
def test_remove_image(temp_image_name): # noqa if MOCK: mock_docker(inspect_should_fail=True) t = DockerTasker() container_id = t.run(input_image_name, command="id") t.wait(container_id) image_id = t.commit_container(container_id, image=temp_image_name) try: t.remove_container(container_id) finally: t.remove_image(image_id) assert not t.image_exists(temp_image_name)
def test_pull_image(): if MOCK: mock_docker() t = DockerTasker() local_img = input_image_name remote_img = local_img.copy() remote_img.registry = LOCALHOST_REGISTRY t.tag_and_push_image(local_img, remote_img, insecure=True) got_image = t.pull_image(remote_img, insecure=True) assert remote_img.to_str() == got_image assert len(t.last_logs) > 0 t.remove_image(remote_img)
def test_tag_image(temp_image_name): if MOCK: mock_docker() t = DockerTasker() temp_image_name.registry = "somewhere.example.com" temp_image_name.tag = "1" img = t.tag_image(INPUT_IMAGE, temp_image_name) try: assert t.image_exists(temp_image_name) assert img == temp_image_name.to_str() finally: t.remove_image(temp_image_name)
def test_pull_image(): if MOCK: mock_docker() t = DockerTasker() local_img = input_image_name remote_img = local_img.copy() remote_img.registry = LOCALHOST_REGISTRY t.tag_and_push_image(local_img, remote_img, insecure=True) got_image = t.pull_image(remote_img, insecure=True) assert remote_img.to_str() == got_image assert len(t.last_logs) > 0 t.remove_image(remote_img)
def test_commit_container(temp_image_name): if MOCK: mock_docker() t = DockerTasker() container_id = t.run(INPUT_IMAGE, command="id") t.wait(container_id) image_id = t.commit_container(container_id, message="test message", image=temp_image_name) try: assert t.image_exists(image_id) finally: t.remove_container(container_id) t.remove_image(image_id)
def test_build_image_from_path(tmpdir, temp_image_name): if MOCK: mock_docker() tmpdir_path = str(tmpdir.realpath()) clone_git_repo(DOCKERFILE_GIT, tmpdir_path) df = tmpdir.join("Dockerfile") assert df.check() t = DockerTasker() response = t.build_image_from_path(tmpdir_path, temp_image_name, use_cache=True) list(response) assert response is not None assert t.image_exists(temp_image_name) t.remove_image(temp_image_name)
def test_build_image_from_path(tmpdir, temp_image_name): if MOCK: mock_docker() tmpdir_path = str(tmpdir.realpath()) clone_git_repo(DOCKERFILE_GIT, tmpdir_path) df = tmpdir.join("Dockerfile") assert df.check() t = DockerTasker() response = t.build_image_from_path(tmpdir_path, temp_image_name, use_cache=True) list(response) assert response is not None assert t.image_exists(temp_image_name) t.remove_image(temp_image_name)
def test_push_image(temp_image_name, should_fail): if MOCK: mock_docker(push_should_fail=should_fail) t = DockerTasker() temp_image_name.registry = LOCALHOST_REGISTRY temp_image_name.tag = "1" t.tag_image(INPUT_IMAGE, temp_image_name) if should_fail: with pytest.raises(RuntimeError) as exc: output = t.push_image(temp_image_name, insecure=True) assert "Failed to push image" in str(exc) else: output = t.push_image(temp_image_name, insecure=True) assert output is not None t.remove_image(temp_image_name)
def test_push_image(temp_image_name, should_fail): if MOCK: mock_docker(push_should_fail=should_fail) t = DockerTasker(retry_times=0) temp_image_name.registry = LOCALHOST_REGISTRY temp_image_name.tag = "1" t.tag_image(INPUT_IMAGE, temp_image_name) if should_fail: with pytest.raises(RetryGeneratorException) as exc: output = t.push_image(temp_image_name, insecure=True) assert "Failed to mock_method image" in str(exc) assert "connection refused" in str(exc) else: output = t.push_image(temp_image_name, insecure=True) assert output is not None t.remove_image(temp_image_name)
def test_push_image(temp_image_name, should_fail): if MOCK: mock_docker(push_should_fail=should_fail) t = DockerTasker(retry_times=0) temp_image_name.registry = LOCALHOST_REGISTRY temp_image_name.tag = "1" t.tag_image(INPUT_IMAGE, temp_image_name) if should_fail: with pytest.raises(RetryGeneratorException) as exc: output = t.push_image(temp_image_name, insecure=True) assert "Failed to mock_method image" in str(exc) assert "connection refused" in str(exc) else: output = t.push_image(temp_image_name, insecure=True) assert output is not None t.remove_image(temp_image_name)
def test_pull_base_image(tmpdir, source_params): if MOCK: mock_docker() source_params.update({"tmpdir": str(tmpdir)}) s = get_source_instance_for(source_params) t = DockerTasker() b = InsideBuilder(s, "") pulled_tags = b.pull_base_image(LOCALHOST_REGISTRY, insecure=True) assert isinstance(pulled_tags, set) assert len(pulled_tags) == 2 for reg_img_name in pulled_tags: reg_img_name = ImageName.parse(reg_img_name) assert t.inspect_image(reg_img_name) is not None assert reg_img_name.repo == git_base_image.repo assert reg_img_name.tag == git_base_image.tag # clean t.remove_image(git_base_image)
def test_pull_base_image_plugin(parent_registry, df_base, expected, not_expected): if MOCK: mock_docker(remember_images=True) tasker = DockerTasker(retry_times=0) workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image') workflow.builder = MockBuilder() workflow.builder.base_image = ImageName.parse(df_base) expected = set(expected) expected.add(UNIQUE_ID) expected.add(df_base) all_images = set(expected).union(not_expected) for image in all_images: assert not tasker.image_exists(image) runner = PreBuildPluginsRunner( tasker, workflow, [{ 'name': PullBaseImagePlugin.key, 'args': {'parent_registry': parent_registry, 'parent_registry_insecure': True} }] ) runner.run() for image in expected: assert tasker.image_exists(image) assert image in workflow.pulled_base_images for image in not_expected: assert not tasker.image_exists(image) for image in workflow.pulled_base_images: assert tasker.image_exists(image) for image in all_images: try: tasker.remove_image(image) except: pass assert workflow.builder.base_image == df_base
def test_tag_and_push_plugin(tmpdir): if MOCK: mock_docker() tasker = DockerTasker() workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, TEST_IMAGE) workflow.tag_conf.add_primary_image(TEST_IMAGE) workflow.push_conf.add_docker_registry(LOCALHOST_REGISTRY, insecure=True) setattr(workflow, 'builder', X) runner = PostBuildPluginsRunner( tasker, workflow, [{ 'name': TagAndPushPlugin.key, }] ) output = runner.run() image = output[TagAndPushPlugin.key][0] tasker.remove_image(image)
def test_tag_and_push_plugin(tmpdir): if MOCK: mock_docker() tasker = DockerTasker() workflow = DockerBuildWorkflow({ "provider": "git", "uri": "asd" }, TEST_IMAGE) workflow.tag_conf.add_primary_image(TEST_IMAGE) workflow.push_conf.add_docker_registry(LOCALHOST_REGISTRY, insecure=True) setattr(workflow, 'builder', X) runner = PostBuildPluginsRunner(tasker, workflow, [{ 'name': TagAndPushPlugin.key, }]) output = runner.run() image = output[TagAndPushPlugin.key][0] tasker.remove_image(image)
def test_privileged_gitrepo_build(caplog, source_params): if MOCK: mock_docker() image_name = ImageName(repo="atomic-reactor-test-ssh-image") remote_image = image_name.copy() remote_image.registry = LOCALHOST_REGISTRY m = PrivilegedBuildManager("buildroot-fedora", { "source": source_params, "image": remote_image.to_str(), "parent_registry": LOCALHOST_REGISTRY, # faster "target_registries_insecure": True, "parent_registry_insecure": True, }) results = m.build() dt = DockerTasker() dt.pull_image(remote_image, insecure=True) assert len(results.build_logs) > 0 dt.remove_container(results.container_id) dt.remove_image(remote_image)
def test_privileged_gitrepo_build(caplog, source_params): if MOCK: mock_docker() image_name = ImageName(repo="atomic-reactor-test-ssh-image") remote_image = image_name.copy() remote_image.registry = LOCALHOST_REGISTRY m = PrivilegedBuildManager("buildroot-fedora", { "source": source_params, "image": remote_image.to_str(), "parent_registry": LOCALHOST_REGISTRY, # faster "target_registries_insecure": True, "parent_registry_insecure": True, }) results = m.build() dt = DockerTasker() dt.pull_image(remote_image, insecure=True) assert len(results.build_logs) > 0 dt.remove_container(results.container_id) dt.remove_image(remote_image)
def test_tag_and_push_plugin(tmpdir, image_name, logs, should_raise): if MOCK: mock_docker() flexmock(docker.Client, push=lambda iid, **kwargs: logs) tasker = DockerTasker() workflow = DockerBuildWorkflow({ "provider": "git", "uri": "asd" }, TEST_IMAGE) workflow.tag_conf.add_primary_image(image_name) setattr(workflow, 'builder', X) runner = PostBuildPluginsRunner(tasker, workflow, [{ 'name': TagAndPushPlugin.key, 'args': { 'registries': { LOCALHOST_REGISTRY: { 'insecure': True } } }, }]) if should_raise: with pytest.raises(Exception): runner.run() else: output = runner.run() image = output[TagAndPushPlugin.key][0] tasker.remove_image(image) assert len(workflow.push_conf.docker_registries) > 0 if MOCK: # we only test this when mocking docker because we don't expect # running actual docker against v2 registry assert workflow.push_conf.docker_registries[0].digests[ image_name] == DIGEST1
def test_pull_image(tmpdir, insecure, dockercfg): if MOCK: mock_docker() dockercfg_path = None if dockercfg: dockercfg_path = str(tmpdir.realpath()) file_name = '.dockercfg' dockercfg_secret_path = os.path.join(dockercfg_path, file_name) with open(dockercfg_secret_path, "w+") as dockerconfig: dockerconfig.write(json.dumps(dockercfg)) dockerconfig.flush() t = DockerTasker() local_img = input_image_name remote_img = local_img.copy() remote_img.registry = LOCALHOST_REGISTRY t.tag_and_push_image(local_img, remote_img, insecure=insecure, dockercfg=dockercfg_path) got_image = t.pull_image(remote_img, insecure=insecure, dockercfg_path=dockercfg_path) assert remote_img.to_str() == got_image assert len(t.last_logs) > 0 t.remove_image(remote_img)
def test_tag_and_push_plugin(tmpdir, image_name, logs, should_raise): if MOCK: mock_docker() flexmock(docker.Client, push=lambda iid, **kwargs: iter(logs)) tasker = DockerTasker() workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, TEST_IMAGE) workflow.tag_conf.add_primary_image(image_name) setattr(workflow, 'builder', X) runner = PostBuildPluginsRunner( tasker, workflow, [{ 'name': TagAndPushPlugin.key, 'args': { 'registries': { LOCALHOST_REGISTRY: { 'insecure': True } } }, }] ) if should_raise: with pytest.raises(Exception): runner.run() else: output = runner.run() image = output[TagAndPushPlugin.key][0] tasker.remove_image(image) assert len(workflow.push_conf.docker_registries) > 0 if MOCK: # we only test this when mocking docker because we don't expect # running actual docker against v2 registry assert workflow.push_conf.docker_registries[0].digests[image_name] == DIGEST1
def test_hostdocker_build(caplog, source_params): if MOCK: mock_docker() image_name = ImageName(repo="atomic-reactor-test-ssh-image") remote_image = image_name.copy() remote_image.registry = LOCALHOST_REGISTRY m = DockerhostBuildManager( "buildroot-dh-fedora", { "source": source_params, "image": remote_image.to_str(), "parent_registry": LOCALHOST_REGISTRY, # faster "target_registries_insecure": True, "parent_registry_insecure": True, }) results = m.build() dt = DockerTasker() dt.pull_image(remote_image, insecure=True) if source_params['provider'] == 'path': assert_source_from_path_mounted_ok(caplog, m.temp_dir) assert len(results.build_logs) > 0 #assert re.search(r'build json mounted in container .+"uri": %s' % # os.path.join(dconstants.CONTAINER_SHARE_PATH, 'source')) # assert isinstance(results.built_img_inspect, dict) # assert len(results.built_img_inspect.items()) > 0 # assert isinstance(results.built_img_info, dict) # assert len(results.built_img_info.items()) > 0 # assert isinstance(results.base_img_info, dict) # assert len(results.base_img_info.items()) > 0 # assert len(results.base_plugins_output) > 0 # assert len(results.built_img_plugins_output) > 0 dt.remove_container(results.container_id) dt.remove_image(remote_image)
def test_tag_and_push_plugin_oci(tmpdir, monkeypatch, use_secret, fail_push, caplog, reactor_config_map): # For now, we don't want to require having a skopeo and an OCI-supporting # registry in the test environment if MOCK: mock_docker() else: return tasker = DockerTasker() workflow = DockerBuildWorkflow({ "provider": "git", "uri": "asd" }, TEST_IMAGE) setattr(workflow, 'builder', X) secret_path = None if use_secret: temp_dir = mkdtemp() with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig: dockerconfig_contents = { LOCALHOST_REGISTRY: { "username": "******", "email": "*****@*****.**", "password": "******" } } dockerconfig.write(json.dumps(dockerconfig_contents)) dockerconfig.flush() secret_path = temp_dir CONFIG_DIGEST = 'sha256:b79482f7dcab2a326c1e8c7025a4336d900e99f50db8b35a659fda67b5ebb3c2' MEDIA_TYPE = 'application/vnd.oci.image.manifest.v1+json' REF_NAME = "app/org.gnome.eog/x86_64/master" manifest_json = { "schemaVersion": 2, "mediaType": "application/vnd.oci.image.manifest.v1+json", "config": { "mediaType": MEDIA_TYPE, "digest": CONFIG_DIGEST, "size": 314 }, "layers": [{ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", "digest": "sha256:fd2b341d2ff3751ecdee8d8daacaa650d8a1703360c85d4cfc452d6ec32e147f", "size": 1863477 }], "annotations": { "org.flatpak.commit-metadata.xa.ref": "YXBwL29yZy5nbm9tZS5lb2cveDg2XzY0L21hc3RlcgAAcw==", # noqa "org.flatpak.body": "Name: org.gnome.eog\nArch: x86_64\nBranch: master\nBuilt with: Flatpak 0.9.7\n", # noqa "org.flatpak.commit-metadata.xa.metadata": "W0FwcGxpY2F0aW9uXQpuYW1lPW9yZy5nbm9tZS5lb2cKcnVudGltZT1vcmcuZmVkb3JhcHJvamVjdC5QbGF0Zm9ybS94ODZfNjQvMjYKc2RrPW9yZy5mZWRvcmFwcm9qZWN0LlBsYXRmb3JtL3g4Nl82NC8yNgpjb21tYW5kPWVvZwoKW0NvbnRleHRdCnNoYXJlZD1pcGM7CnNvY2tldHM9eDExO3dheWxhbmQ7c2Vzc2lvbi1idXM7CmZpbGVzeXN0ZW1zPXhkZy1ydW4vZGNvbmY7aG9zdDt+Ly5jb25maWcvZGNvbmY6cm87CgpbU2Vzc2lvbiBCdXMgUG9saWN5XQpjYS5kZXNydC5kY29uZj10YWxrCgpbRW52aXJvbm1lbnRdCkRDT05GX1VTRVJfQ09ORklHX0RJUj0uY29uZmlnL2Rjb25mCgAAcw==", # noqa "org.flatpak.download-size": "1863477", "org.flatpak.commit-metadata.xa.download-size": "AAAAAAAdF/IAdA==", "org.flatpak.commit-metadata.xa.installed-size": "AAAAAABDdgAAdA==", "org.flatpak.subject": "Export org.gnome.eog", "org.flatpak.installed-size": "4421120", "org.flatpak.commit": "d7b8789350660724b20643ebb615df466566b6d04682fa32800d3f10116eec54", # noqa "org.flatpak.metadata": "[Application]\nname=org.gnome.eog\nruntime=org.fedoraproject.Platform/x86_64/26\nsdk=org.fedoraproject.Platform/x86_64/26\ncommand=eog\n\n[Context]\nshared=ipc;\nsockets=x11;wayland;session-bus;\nfilesystems=xdg-run/dconf;host;~/.config/dconf:ro;\n\n[Session Bus Policy]\nca.desrt.dconf=talk\n\n[Environment]\nDCONF_USER_CONFIG_DIR=.config/dconf\n", # noqa "org.opencontainers.image.ref.name": REF_NAME, "org.flatpak.timestamp": "1499376525" } } config_json = { "created": "2017-07-06T21:28:45Z", "architecture": "arm64", "os": "linux", "config": { "Memory": 0, "MemorySwap": 0, "CpuShares": 0 }, "rootfs": { "type": "layers", "diff_ids": [ "sha256:4c5160fea65110aa1eb8ca022e2693bb868367c2502855887f21c77247199339" ] } } # Add a mock OCI image to exported_image_sequence; this forces the tag_and_push # plugin to push with skopeo rather than with 'docker push' # Since we are always mocking the push for now, we can get away with a stub image oci_dir = os.path.join(str(tmpdir), 'oci-image') os.mkdir(oci_dir) with open(os.path.join(oci_dir, "index.json"), "w") as f: f.write('"Not a real index.json"') with open(os.path.join(oci_dir, "oci-layout"), "w") as f: f.write('{"imageLayoutVersion": "1.0.0"}') os.mkdir(os.path.join(oci_dir, 'blobs')) metadata = get_exported_image_metadata(oci_dir, IMAGE_TYPE_OCI) metadata['ref_name'] = REF_NAME workflow.exported_image_sequence.append(metadata) oci_tarpath = os.path.join(str(tmpdir), 'oci-image.tar') with open(oci_tarpath, "wb") as f: with tarfile.TarFile(mode="w", fileobj=f) as tf: for f in os.listdir(oci_dir): tf.add(os.path.join(oci_dir, f), f) metadata = get_exported_image_metadata(oci_tarpath, IMAGE_TYPE_OCI_TAR) metadata['ref_name'] = REF_NAME workflow.exported_image_sequence.append(metadata) # Mock the subprocess call to skopeo def check_check_output(args, **kwargs): if fail_push: raise subprocess.CalledProcessError(returncode=1, cmd=args, output="Failed") assert args[0] == 'skopeo' if use_secret: assert '--dest-creds=user:mypassword' in args assert '--dest-tls-verify=false' in args assert args[-2] == 'oci:' + oci_dir + ':' + REF_NAME assert args[ -1] == 'docker://' + LOCALHOST_REGISTRY + '/' + TEST_IMAGE_NAME return '' (flexmock(subprocess).should_receive("check_output").once().replace_with( check_check_output)) # Mock out the response from the registry once the OCI image is uploaded manifest_latest_url = "https://{}/v2/{}/manifests/latest".format( LOCALHOST_REGISTRY, TEST_IMAGE) manifest_url = "https://{}/v2/{}/manifests/{}".format( LOCALHOST_REGISTRY, TEST_IMAGE, DIGEST_OCI) config_blob_url = "https://{}/v2/{}/blobs/{}".format( LOCALHOST_REGISTRY, TEST_IMAGE, CONFIG_DIGEST) manifest_response = requests.Response() (flexmock(manifest_response, raise_for_status=lambda: None, json=manifest_json, headers={ 'Content-Type': MEDIA_TYPE, 'Docker-Content-Digest': DIGEST_OCI })) manifest_unacceptable_response = requests.Response() (flexmock(manifest_unacceptable_response, status_code=404, json={"errors": [{ "code": "MANIFEST_UNKNOWN" }]})) config_blob_response = requests.Response() (flexmock(config_blob_response, raise_for_status=lambda: None, json=config_json)) def custom_get(method, url, headers, **kwargs): if url == manifest_latest_url: if headers['Accept'] == MEDIA_TYPE: return manifest_response else: return manifest_unacceptable_response if url == manifest_url: return manifest_response if url == config_blob_url: return config_blob_response mock_get_retry_session() (flexmock( requests.Session).should_receive('request').replace_with(custom_get)) if reactor_config_map: workflow.plugin_workspace[ReactorConfigPlugin.key] = {} workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\ ReactorConfig({'version': 1, 'registries': [{ 'url': LOCALHOST_REGISTRY, 'insecure': True, 'auth': {'cfg_path': secret_path}, }]}) runner = PostBuildPluginsRunner(tasker, workflow, [{ 'name': TagAndPushPlugin.key, 'args': { 'registries': { LOCALHOST_REGISTRY: { 'insecure': True, 'secret': secret_path } } }, }]) with caplog.at_level(logging.DEBUG): if fail_push: with pytest.raises(PluginFailedException): output = runner.run() else: output = runner.run() for r in caplog.records: assert 'mypassword' not in r.getMessage() if not fail_push: image = output[TagAndPushPlugin.key][0] tasker.remove_image(image) assert len(workflow.push_conf.docker_registries) > 0 assert workflow.push_conf.docker_registries[0].digests[ TEST_IMAGE_NAME].v1 is None assert workflow.push_conf.docker_registries[0].digests[ TEST_IMAGE_NAME].v2 is None assert workflow.push_conf.docker_registries[0].digests[ TEST_IMAGE_NAME].oci == DIGEST_OCI assert workflow.push_conf.docker_registries[0].config is config_json
def test_tag_and_push_plugin(tmpdir, monkeypatch, image_name, logs, should_raise, has_config, use_secret, reactor_config_map, file_name, dockerconfig_contents): if MOCK: mock_docker() flexmock(docker.APIClient, push=lambda iid, **kwargs: iter(logs), login=lambda username, registry, dockercfg_path: {'Status': 'Login Succeeded'}) tasker = DockerTasker(retry_times=0) workflow = DockerBuildWorkflow({ "provider": "git", "uri": "asd" }, TEST_IMAGE) workflow.tag_conf.add_primary_image(image_name) setattr(workflow, 'builder', X) secret_path = None if use_secret: temp_dir = mkdtemp() with open(os.path.join(temp_dir, file_name), "w+") as dockerconfig: dockerconfig.write(json.dumps(dockerconfig_contents)) dockerconfig.flush() secret_path = temp_dir CONFIG_DIGEST = 'sha256:2c782e3a93d34d89ea4cf54052768be117caed54803263dd1f3798ce42aac14e' media_type = 'application/vnd.docker.distribution.manifest.v2+json' manifest_json = { 'config': { 'digest': CONFIG_DIGEST, 'mediaType': 'application/octet-stream', 'size': 4132 }, 'layers': [{ 'digest': 'sha256:16dc1f96e3a1bb628be2e00518fec2bb97bd5933859de592a00e2eb7774b6ecf', 'mediaType': 'application/vnd.docker.image.rootfs.diff.tar.gzip', 'size': 71907148 }, { 'digest': 'sha256:cebc0565e1f096016765f55fde87a6f60fdb1208c0b5017e35a856ff578f5ccb', 'mediaType': 'application/vnd.docker.image.rootfs.diff.tar.gzip', 'size': 3945724 }], 'mediaType': media_type, 'schemaVersion': 2 } config_json = { 'config': { 'Size': 12509448, 'architecture': 'amd64', 'author': 'Red Hat, Inc.', 'config': { 'Cmd': ['/bin/rsyslog.sh'], 'Entrypoint': None, 'Image': 'c3fb36aafd5692d2a45115d32bb120edb6edf6c0c3c783ed6592a8dab969fb88', 'Labels': { 'Architecture': 'x86_64', 'Authoritative_Registry': 'registry.access.redhat.com', 'BZComponent': 'rsyslog-docker', 'Name': 'rhel7/rsyslog', 'Release': '28.vrutkovs.31', 'Vendor': 'Red Hat, Inc.', 'Version': '7.2', }, }, 'created': '2016-10-07T10:20:05.38595Z', 'docker_version': '1.9.1', 'id': '1ca220fbc2aed7c141b236c8729fe59db5771b32bf2da74e4a663407f32ec2a2', 'os': 'linux', 'parent': '47eed7a8ea3f131e9281ae09fcbfb0041872fd8b74a048f1c739302c8148505d' }, 'container_config': { 'foo': 'bar', 'spam': 'maps' }, 'id': '1ca220fbc2aed7c141b236c8729fe59db5771b32bf2da74e4a663407f32ec2a2', 'parent_id': 'c3fb36aafd5692d2a45115d32bb120edb6edf6c0c3c783ed6592a8dab969fb88' } # To test out the lack of a config, we really should be testing what happens # when we only return a v1 response and not a v2 response at all; what are # doing now is simply testing that if we return a None instead of json for the # config blob, that None is stored rather than json if not has_config: config_json = None manifest_latest_url = "https://{}/v2/{}/manifests/latest".format( LOCALHOST_REGISTRY, TEST_IMAGE) manifest_url = "https://{}/v2/{}/manifests/{}".format( LOCALHOST_REGISTRY, TEST_IMAGE, DIGEST_V2) config_blob_url = "https://{}/v2/{}/blobs/{}".format( LOCALHOST_REGISTRY, TEST_IMAGE, CONFIG_DIGEST) # We return our v2 manifest in the mocked v1 response as a placeholder - only the # digest matters anyways manifest_response_v1 = requests.Response() (flexmock(manifest_response_v1, status_code=200, json=manifest_json, headers={ 'Content-Type': 'application/vnd.docker.distribution.manifest.v1+json', 'Docker-Content-Digest': DIGEST_V1 })) manifest_response_v2 = requests.Response() (flexmock(manifest_response_v2, status_code=200, json=manifest_json, headers={ 'Content-Type': 'application/vnd.docker.distribution.manifest.v2+json', 'Docker-Content-Digest': DIGEST_V2 })) manifest_response_v2_list = requests.Response() (flexmock(manifest_response_v2_list, raise_for_status=lambda: None, json=manifest_json, headers={ 'Content-Type': 'application/vnd.docker.distribution.manifest.list.v2+json', })) config_blob_response = requests.Response() (flexmock(config_blob_response, status_code=200, json=config_json)) def custom_get(method, url, headers, **kwargs): if url == manifest_latest_url: # For a manifest stored as v2 or v1, the docker registry defaults to # returning a v1 manifest if a v2 manifest is not explicitly requested if headers[ 'Accept'] == 'application/vnd.docker.distribution.manifest.v2+json': return manifest_response_v2 else: return manifest_response_v1 if headers[ 'Accept'] == 'application/vnd.docker.distribution.manifest.list.v2+json': return manifest_response_v2_list if url == manifest_url: return manifest_response_v2 if url == config_blob_url: return config_blob_response mock_get_retry_session() (flexmock( requests.Session).should_receive('request').replace_with(custom_get)) if reactor_config_map: workflow.plugin_workspace[ReactorConfigPlugin.key] = {} workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\ ReactorConfig({'version': 1, 'registries': [{ 'url': LOCALHOST_REGISTRY, 'insecure': True, 'auth': {'cfg_path': secret_path}, }]}) runner = PostBuildPluginsRunner(tasker, workflow, [{ 'name': TagAndPushPlugin.key, 'args': { 'registries': { LOCALHOST_REGISTRY: { 'insecure': True, 'secret': secret_path } } }, }]) if should_raise: with pytest.raises(PluginFailedException): runner.run() else: output = runner.run() image = output[TagAndPushPlugin.key][0] tasker.remove_image(image) assert len(workflow.push_conf.docker_registries) > 0 if MOCK: # we only test this when mocking docker because we don't expect # running actual docker against v2 registry expected_digest = ManifestDigest(v1=DIGEST_V1, v2=DIGEST_V2, oci=None) assert workflow.push_conf.docker_registries[0].digests[image_name].v1 == \ expected_digest.v1 assert workflow.push_conf.docker_registries[0].digests[image_name].v2 == \ expected_digest.v2 assert workflow.push_conf.docker_registries[0].digests[image_name].oci == \ expected_digest.oci if has_config: assert isinstance( workflow.push_conf.docker_registries[0].config, dict) else: assert workflow.push_conf.docker_registries[0].config is None
class InsideBuilder(LastLogger, BuilderStateMachine): """ This is expected to run within container """ def __init__(self, source, image, **kwargs): """ """ LastLogger.__init__(self) BuilderStateMachine.__init__(self) self.tasker = DockerTasker() # arguments for build self.source = source self.base_image_id = None self.image_id = None self.built_image_info = None self.image = ImageName.parse(image) # get info about base image from dockerfile self.df_path, self.df_dir = self.source.get_dockerfile_path() self.base_image = ImageName.parse(DockerfileParser(self.df_path).baseimage) logger.debug("base image specified in dockerfile = '%s'", self.base_image) if not self.base_image.tag: self.base_image.tag = 'latest' def build(self): """ build image inside current environment; it's expected this may run within (privileged) docker container :return: image string (e.g. fedora-python:34) """ logger.info("building image '%s' inside current environment", self.image) self._ensure_not_built() logger.debug("using dockerfile:\n%s", DockerfileParser(self.df_path).content) logs_gen = self.tasker.build_image_from_path( self.df_dir, self.image, ) logger.debug("build is submitted, waiting for it to finish") command_result = wait_for_command(logs_gen) # wait for build to finish logger.info("build was %ssuccesful!", 'un' if command_result.is_failed() else '') self.is_built = True if not command_result.is_failed(): self.built_image_info = self.get_built_image_info() # self.base_image_id = self.built_image_info['ParentId'] # parent id is not base image! self.image_id = self.built_image_info['Id'] build_result = BuildResult(command_result, self.image_id) return build_result def push_built_image(self, registry, insecure=False): """ push built image to provided registry :param registry: str :param insecure: bool, allow connecting to registry over plain http :return: str, image """ logger.info("pushing built image '%s' to registry '%s'", self.image, registry) self._ensure_is_built() if not registry: logger.warning("no registry specified; skipping") return if self.image.registry and self.image.registry != registry: logger.error("registry in image name doesn't match provided target registry, " "image registry = '%s', target = '%s'", self.image.registry, registry) raise RuntimeError( "Registry in image name doesn't match target registry. Image: '%s', Target: '%s'" % (self.image.registry, registry)) target_image = self.image.copy() target_image.registry = registry response = self.tasker.tag_and_push_image(self.image, target_image, insecure=insecure) self.tasker.remove_image(target_image) return response def inspect_base_image(self): """ inspect base image :return: dict """ logger.info("inspecting base image '%s'", self.base_image) inspect_data = self.tasker.inspect_image(self.base_image) return inspect_data def inspect_built_image(self): """ inspect built image :return: dict """ logger.info("inspecting built image '%s'", self.image_id) self._ensure_is_built() inspect_data = self.tasker.inspect_image(self.image_id) # dict with lots of data, see man docker-inspect return inspect_data def get_base_image_info(self): """ query docker about base image :return dict """ logger.info("getting information about base image '%s'", self.base_image) image_info = self.tasker.get_image_info_by_image_name(self.base_image) items_count = len(image_info) if items_count == 1: return image_info[0] elif items_count <= 0: logger.error("image '%s' not found", self.base_image) raise RuntimeError("image '%s' not found", self.base_image) else: logger.error("multiple (%d) images found for image '%s'", items_count, self.base_image) raise RuntimeError("multiple (%d) images found for image '%s'" % (items_count, self.base_image)) def get_built_image_info(self): """ query docker about built image :return dict """ logger.info("getting information about built image '%s'", self.image) self._ensure_is_built() image_info = self.tasker.get_image_info_by_image_name(self.image) items_count = len(image_info) if items_count == 1: return image_info[0] elif items_count <= 0: logger.error("image '%s' not found", self.image) raise RuntimeError("image '%s' not found" % self.image) else: logger.error("multiple (%d) images found for image '%s'", items_count, self.image) raise RuntimeError("multiple (%d) images found for image '%s'" % (items_count, self.image))
def test_tag_and_push_plugin_oci( tmpdir, monkeypatch, use_secret, fail_push, caplog, reactor_config_map): # For now, we don't want to require having a skopeo and an OCI-supporting # registry in the test environment if MOCK: mock_docker() else: return tasker = DockerTasker() workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, TEST_IMAGE) setattr(workflow, 'builder', X) secret_path = None if use_secret: temp_dir = mkdtemp() with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig: dockerconfig_contents = { LOCALHOST_REGISTRY: { "username": "******", "email": "*****@*****.**", "password": "******"}} dockerconfig.write(json.dumps(dockerconfig_contents)) dockerconfig.flush() secret_path = temp_dir CONFIG_DIGEST = 'sha256:b79482f7dcab2a326c1e8c7025a4336d900e99f50db8b35a659fda67b5ebb3c2' MEDIA_TYPE = 'application/vnd.oci.image.manifest.v1+json' REF_NAME = "app/org.gnome.eog/x86_64/master" manifest_json = { "schemaVersion": 2, "mediaType": "application/vnd.oci.image.manifest.v1+json", "config": { "mediaType": MEDIA_TYPE, "digest": CONFIG_DIGEST, "size": 314 }, "layers": [ { "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", "digest": "sha256:fd2b341d2ff3751ecdee8d8daacaa650d8a1703360c85d4cfc452d6ec32e147f", "size": 1863477 } ], "annotations": { "org.flatpak.commit-metadata.xa.ref": "YXBwL29yZy5nbm9tZS5lb2cveDg2XzY0L21hc3RlcgAAcw==", # noqa "org.flatpak.body": "Name: org.gnome.eog\nArch: x86_64\nBranch: master\nBuilt with: Flatpak 0.9.7\n", # noqa "org.flatpak.commit-metadata.xa.metadata": "W0FwcGxpY2F0aW9uXQpuYW1lPW9yZy5nbm9tZS5lb2cKcnVudGltZT1vcmcuZmVkb3JhcHJvamVjdC5QbGF0Zm9ybS94ODZfNjQvMjYKc2RrPW9yZy5mZWRvcmFwcm9qZWN0LlBsYXRmb3JtL3g4Nl82NC8yNgpjb21tYW5kPWVvZwoKW0NvbnRleHRdCnNoYXJlZD1pcGM7CnNvY2tldHM9eDExO3dheWxhbmQ7c2Vzc2lvbi1idXM7CmZpbGVzeXN0ZW1zPXhkZy1ydW4vZGNvbmY7aG9zdDt+Ly5jb25maWcvZGNvbmY6cm87CgpbU2Vzc2lvbiBCdXMgUG9saWN5XQpjYS5kZXNydC5kY29uZj10YWxrCgpbRW52aXJvbm1lbnRdCkRDT05GX1VTRVJfQ09ORklHX0RJUj0uY29uZmlnL2Rjb25mCgAAcw==", # noqa "org.flatpak.download-size": "1863477", "org.flatpak.commit-metadata.xa.download-size": "AAAAAAAdF/IAdA==", "org.flatpak.commit-metadata.xa.installed-size": "AAAAAABDdgAAdA==", "org.flatpak.subject": "Export org.gnome.eog", "org.flatpak.installed-size": "4421120", "org.flatpak.commit": "d7b8789350660724b20643ebb615df466566b6d04682fa32800d3f10116eec54", # noqa "org.flatpak.metadata": "[Application]\nname=org.gnome.eog\nruntime=org.fedoraproject.Platform/x86_64/26\nsdk=org.fedoraproject.Platform/x86_64/26\ncommand=eog\n\n[Context]\nshared=ipc;\nsockets=x11;wayland;session-bus;\nfilesystems=xdg-run/dconf;host;~/.config/dconf:ro;\n\n[Session Bus Policy]\nca.desrt.dconf=talk\n\n[Environment]\nDCONF_USER_CONFIG_DIR=.config/dconf\n", # noqa "org.opencontainers.image.ref.name": REF_NAME, "org.flatpak.timestamp": "1499376525" } } config_json = { "created": "2017-07-06T21:28:45Z", "architecture": "arm64", "os": "linux", "config": { "Memory": 0, "MemorySwap": 0, "CpuShares": 0 }, "rootfs": { "type": "layers", "diff_ids": [ "sha256:4c5160fea65110aa1eb8ca022e2693bb868367c2502855887f21c77247199339" ] } } # Add a mock OCI image to exported_image_sequence; this forces the tag_and_push # plugin to push with skopeo rather than with 'docker push' # Since we are always mocking the push for now, we can get away with a stub image oci_dir = os.path.join(str(tmpdir), 'oci-image') os.mkdir(oci_dir) with open(os.path.join(oci_dir, "index.json"), "w") as f: f.write('"Not a real index.json"') with open(os.path.join(oci_dir, "oci-layout"), "w") as f: f.write('{"imageLayoutVersion": "1.0.0"}') os.mkdir(os.path.join(oci_dir, 'blobs')) metadata = get_exported_image_metadata(oci_dir, IMAGE_TYPE_OCI) metadata['ref_name'] = REF_NAME workflow.exported_image_sequence.append(metadata) oci_tarpath = os.path.join(str(tmpdir), 'oci-image.tar') with open(oci_tarpath, "wb") as f: with tarfile.TarFile(mode="w", fileobj=f) as tf: for f in os.listdir(oci_dir): tf.add(os.path.join(oci_dir, f), f) metadata = get_exported_image_metadata(oci_tarpath, IMAGE_TYPE_OCI_TAR) metadata['ref_name'] = REF_NAME workflow.exported_image_sequence.append(metadata) # Mock the subprocess call to skopeo def check_check_output(args, **kwargs): if fail_push: raise subprocess.CalledProcessError(returncode=1, cmd=args, output="Failed") assert args[0] == 'skopeo' if use_secret: assert '--authfile=' + os.path.join(secret_path, '.dockercfg') in args assert '--dest-tls-verify=false' in args assert args[-2] == 'oci:' + oci_dir + ':' + REF_NAME assert args[-1] == 'docker://' + LOCALHOST_REGISTRY + '/' + TEST_IMAGE_NAME return '' (flexmock(subprocess) .should_receive("check_output") .once() .replace_with(check_check_output)) # Mock out the response from the registry once the OCI image is uploaded manifest_latest_url = "https://{}/v2/{}/manifests/latest".format(LOCALHOST_REGISTRY, TEST_IMAGE) manifest_url = "https://{}/v2/{}/manifests/{}".format( LOCALHOST_REGISTRY, TEST_IMAGE, DIGEST_OCI) config_blob_url = "https://{}/v2/{}/blobs/{}".format( LOCALHOST_REGISTRY, TEST_IMAGE, CONFIG_DIGEST) manifest_response = requests.Response() (flexmock(manifest_response, raise_for_status=lambda: None, json=manifest_json, headers={ 'Content-Type': MEDIA_TYPE, 'Docker-Content-Digest': DIGEST_OCI })) manifest_unacceptable_response = requests.Response() (flexmock(manifest_unacceptable_response, status_code=404, json={ "errors": [{"code": "MANIFEST_UNKNOWN"}] })) config_blob_response = requests.Response() (flexmock(config_blob_response, raise_for_status=lambda: None, json=config_json)) def custom_get(method, url, headers, **kwargs): if url == manifest_latest_url: if headers['Accept'] == MEDIA_TYPE: return manifest_response else: return manifest_unacceptable_response if url == manifest_url: return manifest_response if url == config_blob_url: return config_blob_response mock_get_retry_session() (flexmock(requests.Session) .should_receive('request') .replace_with(custom_get)) if reactor_config_map: workflow.plugin_workspace[ReactorConfigPlugin.key] = {} workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\ ReactorConfig({'version': 1, 'registries': [{ 'url': LOCALHOST_REGISTRY, 'insecure': True, 'auth': {'cfg_path': secret_path}, }]}) runner = PostBuildPluginsRunner( tasker, workflow, [{ 'name': TagAndPushPlugin.key, 'args': { 'registries': { LOCALHOST_REGISTRY: { 'insecure': True, 'secret': secret_path } } }, }] ) with caplog.at_level(logging.DEBUG): if fail_push: with pytest.raises(PluginFailedException): output = runner.run() else: output = runner.run() if not fail_push: image = output[TagAndPushPlugin.key][0] tasker.remove_image(image) assert len(workflow.push_conf.docker_registries) > 0 assert workflow.push_conf.docker_registries[0].digests[TEST_IMAGE_NAME].v1 is None assert workflow.push_conf.docker_registries[0].digests[TEST_IMAGE_NAME].v2 is None assert workflow.push_conf.docker_registries[0].digests[TEST_IMAGE_NAME].oci == DIGEST_OCI assert workflow.push_conf.docker_registries[0].config is config_json
def test_tag_and_push_plugin( tmpdir, monkeypatch, caplog, image_name, logs, should_raise, has_config, missing_v2, use_secret, reactor_config_map, file_name, dockerconfig_contents): if MOCK: mock_docker() flexmock(docker.APIClient, push=lambda iid, **kwargs: iter(logs), login=lambda username, registry, dockercfg_path: {'Status': 'Login Succeeded'}) tasker = DockerTasker(retry_times=0) workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, TEST_IMAGE) workflow.tag_conf.add_primary_image(image_name) setattr(workflow, 'builder', X) secret_path = None if use_secret: temp_dir = mkdtemp() with open(os.path.join(temp_dir, file_name), "w+") as dockerconfig: dockerconfig.write(json.dumps(dockerconfig_contents)) dockerconfig.flush() secret_path = temp_dir CONFIG_DIGEST = 'sha256:2c782e3a93d34d89ea4cf54052768be117caed54803263dd1f3798ce42aac14e' media_type = 'application/vnd.docker.distribution.manifest.v2+json' manifest_json = { 'config': { 'digest': CONFIG_DIGEST, 'mediaType': 'application/octet-stream', 'size': 4132 }, 'layers': [ { 'digest': 'sha256:16dc1f96e3a1bb628be2e00518fec2bb97bd5933859de592a00e2eb7774b6ecf', 'mediaType': 'application/vnd.docker.image.rootfs.diff.tar.gzip', 'size': 71907148 }, { 'digest': 'sha256:cebc0565e1f096016765f55fde87a6f60fdb1208c0b5017e35a856ff578f5ccb', 'mediaType': 'application/vnd.docker.image.rootfs.diff.tar.gzip', 'size': 3945724 } ], 'mediaType': media_type, 'schemaVersion': 2 } config_json = { 'config': { 'Size': 12509448, 'architecture': 'amd64', 'author': 'Red Hat, Inc.', 'config': { 'Cmd': ['/bin/rsyslog.sh'], 'Entrypoint': None, 'Image': 'c3fb36aafd5692d2a45115d32bb120edb6edf6c0c3c783ed6592a8dab969fb88', 'Labels': { 'Architecture': 'x86_64', 'Authoritative_Registry': 'registry.access.redhat.com', 'BZComponent': 'rsyslog-docker', 'Name': 'rhel7/rsyslog', 'Release': '28.vrutkovs.31', 'Vendor': 'Red Hat, Inc.', 'Version': '7.2', }, }, 'created': '2016-10-07T10:20:05.38595Z', 'docker_version': '1.9.1', 'id': '1ca220fbc2aed7c141b236c8729fe59db5771b32bf2da74e4a663407f32ec2a2', 'os': 'linux', 'parent': '47eed7a8ea3f131e9281ae09fcbfb0041872fd8b74a048f1c739302c8148505d' }, 'container_config': { 'foo': 'bar', 'spam': 'maps' }, 'id': '1ca220fbc2aed7c141b236c8729fe59db5771b32bf2da74e4a663407f32ec2a2', 'parent_id': 'c3fb36aafd5692d2a45115d32bb120edb6edf6c0c3c783ed6592a8dab969fb88' } # To test out the lack of a config, we really should be testing what happens # when we only return a v1 response and not a v2 response at all; what are # doing now is simply testing that if we return a None instead of json for the # config blob, that None is stored rather than json if not has_config: config_json = None manifest_latest_url = "https://{}/v2/{}/manifests/latest".format(LOCALHOST_REGISTRY, TEST_IMAGE) manifest_url = "https://{}/v2/{}/manifests/{}".format(LOCALHOST_REGISTRY, TEST_IMAGE, DIGEST_V2) config_blob_url = "https://{}/v2/{}/blobs/{}".format( LOCALHOST_REGISTRY, TEST_IMAGE, CONFIG_DIGEST) # We return our v2 manifest in the mocked v1 response as a placeholder - only the # digest matters anyways manifest_response_v1 = requests.Response() (flexmock(manifest_response_v1, status_code=200, json=manifest_json, headers={ 'Content-Type': 'application/vnd.docker.distribution.manifest.v1+json', 'Docker-Content-Digest': DIGEST_V1 })) manifest_response_v2 = requests.Response() (flexmock(manifest_response_v2, status_code=200, json=manifest_json, headers={ 'Content-Type': 'application/vnd.docker.distribution.manifest.v2+json', 'Docker-Content-Digest': DIGEST_V2 })) manifest_response_v2_list = requests.Response() (flexmock(manifest_response_v2_list, raise_for_status=lambda: None, json=manifest_json, headers={ 'Content-Type': 'application/vnd.docker.distribution.manifest.list.v2+json', })) config_blob_response = requests.Response() (flexmock(config_blob_response, status_code=200, json=config_json)) manifest_unknown_response = requests.Response() (flexmock(manifest_unknown_response, status_code=404, json={ "errors": [{"code": "MANIFEST_UNKNOWN"}] })) def custom_get(method, url, headers, **kwargs): if url == manifest_latest_url: # For a manifest stored as v2 or v1, the docker registry defaults to # returning a v1 manifest if a v2 manifest is not explicitly requested if headers['Accept'] == 'application/vnd.docker.distribution.manifest.v2+json': if missing_v2: return manifest_unknown_response else: return manifest_response_v2 else: return manifest_response_v1 if headers['Accept'] == 'application/vnd.docker.distribution.manifest.list.v2+json': return manifest_response_v2_list if url == manifest_url: if missing_v2: return manifest_unknown_response else: return manifest_response_v2 if url == config_blob_url: return config_blob_response mock_get_retry_session() (flexmock(requests.Session) .should_receive('request') .replace_with(custom_get)) (flexmock(time) .should_receive('sleep') .and_return(None)) if reactor_config_map: workflow.plugin_workspace[ReactorConfigPlugin.key] = {} workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\ ReactorConfig({'version': 1, 'registries': [{ 'url': LOCALHOST_REGISTRY, 'insecure': True, 'auth': {'cfg_path': secret_path}}], 'group_manifests': missing_v2}) runner = PostBuildPluginsRunner( tasker, workflow, [{ 'name': TagAndPushPlugin.key, 'args': { 'registries': { LOCALHOST_REGISTRY: { 'insecure': True, 'secret': secret_path } } }, }] ) if should_raise: with pytest.raises(PluginFailedException): runner.run() else: output = runner.run() image = output[TagAndPushPlugin.key][0] tasker.remove_image(image) assert len(workflow.push_conf.docker_registries) > 0 if MOCK: # we only test this when mocking docker because we don't expect # running actual docker against v2 registry if missing_v2: expected_digest = ManifestDigest(v1=DIGEST_V1, v2=None, oci=None) if reactor_config_map: assert "Retrying push because V2 schema 2" in caplog.text else: assert "Retrying push because V2 schema 2" not in caplog.text else: expected_digest = ManifestDigest(v1=DIGEST_V1, v2=DIGEST_V2, oci=None) assert workflow.push_conf.docker_registries[0].digests[image_name].v2 == \ expected_digest.v2 assert workflow.push_conf.docker_registries[0].digests[image_name].v1 == \ expected_digest.v1 assert workflow.push_conf.docker_registries[0].digests[image_name].oci == \ expected_digest.oci if has_config: assert isinstance(workflow.push_conf.docker_registries[0].config, dict) else: assert workflow.push_conf.docker_registries[0].config is None
def test_pull_base_image_plugin(parent_registry, df_base, expected, not_expected, reactor_config_map, workflow_callback=None, check_platforms=False): if MOCK: mock_docker(remember_images=True) tasker = DockerTasker(retry_times=0) buildstep_plugin = [{ 'name': PLUGIN_BUILD_ORCHESTRATE_KEY, 'args': { 'platforms': ['x86_64'] }, }] workflow = DockerBuildWorkflow( MOCK_SOURCE, 'test-image', buildstep_plugins=buildstep_plugin, ) workflow.builder = MockBuilder() workflow.builder.base_image = ImageName.parse(df_base) expected = set(expected) expected.add(UNIQUE_ID) all_images = set(expected).union(not_expected) for image in all_images: assert not tasker.image_exists(image) if reactor_config_map: workflow.plugin_workspace[ReactorConfigPlugin.key] = {} workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\ ReactorConfig({'version': 1, 'source_registry': {'url': parent_registry, 'insecure': True}}) if workflow_callback: workflow = workflow_callback(workflow) runner = PreBuildPluginsRunner(tasker, workflow, [{ 'name': PullBaseImagePlugin.key, 'args': { 'parent_registry': parent_registry, 'parent_registry_insecure': True, 'check_platforms': check_platforms } }]) if parent_registry is None and reactor_config_map: with pytest.raises(PluginFailedException): runner.run() return runner.run() for image in expected: assert tasker.image_exists(image) assert image in workflow.pulled_base_images for image in not_expected: assert not tasker.image_exists(image) for image in workflow.pulled_base_images: assert tasker.image_exists(image) for image in all_images: try: tasker.remove_image(image) except: pass
def test_tag_and_push_plugin( tmpdir, monkeypatch, image_name, logs, should_raise, has_config, use_secret): if MOCK: mock_docker() flexmock(docker.Client, push=lambda iid, **kwargs: iter(logs), login=lambda username, registry, dockercfg_path: {'Status': 'Login Succeeded'}) tasker = DockerTasker() workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, TEST_IMAGE) workflow.tag_conf.add_primary_image(image_name) setattr(workflow, 'builder', X) secret_path = None if use_secret: temp_dir = mkdtemp() with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig: dockerconfig_contents = { LOCALHOST_REGISTRY: { "username": "******", "email": "*****@*****.**", "password": "******"}} dockerconfig.write(json.dumps(dockerconfig_contents)) dockerconfig.flush() secret_path = temp_dir CONFIG_DIGEST = 'sha256:2c782e3a93d34d89ea4cf54052768be117caed54803263dd1f3798ce42aac14e' media_type = 'application/vnd.docker.distribution.manifest.v2+json' response_config_json = { 'config': { 'digest': CONFIG_DIGEST, 'mediaType': 'application/octet-stream', 'size': 4132 }, 'layers': [ { 'digest': 'sha256:16dc1f96e3a1bb628be2e00518fec2bb97bd5933859de592a00e2eb7774b6ecf', 'mediaType': 'application/vnd.docker.image.rootfs.diff.tar.gzip', 'size': 71907148 }, { 'digest': 'sha256:cebc0565e1f096016765f55fde87a6f60fdb1208c0b5017e35a856ff578f5ccb', 'mediaType': 'application/vnd.docker.image.rootfs.diff.tar.gzip', 'size': 3945724 } ], 'mediaType': media_type, 'schemaVersion': 2 } response_json = { 'config': { 'Size': 12509448, 'architecture': 'amd64', 'author': 'Red Hat, Inc.', 'config': { 'Cmd': ['/bin/rsyslog.sh'], 'Entrypoint': None, 'Image': 'c3fb36aafd5692d2a45115d32bb120edb6edf6c0c3c783ed6592a8dab969fb88', 'Labels': { 'Architecture': 'x86_64', 'Authoritative_Registry': 'registry.access.redhat.com', 'BZComponent': 'rsyslog-docker', 'Name': 'rhel7/rsyslog', 'Release': '28.vrutkovs.31', 'Vendor': 'Red Hat, Inc.', 'Version': '7.2', }, }, 'created': '2016-10-07T10:20:05.38595Z', 'docker_version': '1.9.1', 'id': '1ca220fbc2aed7c141b236c8729fe59db5771b32bf2da74e4a663407f32ec2a2', 'os': 'linux', 'parent': '47eed7a8ea3f131e9281ae09fcbfb0041872fd8b74a048f1c739302c8148505d' }, 'container_config': { 'foo': 'bar', 'spam': 'maps' }, 'id': '1ca220fbc2aed7c141b236c8729fe59db5771b32bf2da74e4a663407f32ec2a2', 'parent_id': 'c3fb36aafd5692d2a45115d32bb120edb6edf6c0c3c783ed6592a8dab969fb88' } if not has_config: response_json = None config_latest_url = "https://{}/v2/{}/manifests/latest".format(LOCALHOST_REGISTRY, TEST_IMAGE,) config_url = "https://{}/v2/{}/manifests/{}".format(LOCALHOST_REGISTRY, TEST_IMAGE, DIGEST_V2) blob_url = "https://{}/v2/{}/blobs/{}".format( LOCALHOST_REGISTRY, TEST_IMAGE, CONFIG_DIGEST) config_response_config_v1 = requests.Response() (flexmock(config_response_config_v1, raise_for_status=lambda: None, json=response_config_json, headers={ 'Content-Type': 'application/vnd.docker.distribution.manifest.v1+json', 'Docker-Content-Digest': DIGEST_V1 } )) config_response_config_v2 = requests.Response() (flexmock(config_response_config_v2, raise_for_status=lambda: None, json=response_config_json, headers={ 'Content-Type': 'application/vnd.docker.distribution.manifest.v2+json', 'Docker-Content-Digest': DIGEST_V2 } )) blob_config = requests.Response() (flexmock(blob_config, raise_for_status=lambda: None, json=response_json)) def custom_get(url, headers, **kwargs): if url == config_latest_url: if headers['Accept'] == 'application/vnd.docker.distribution.manifest.v1+json': return config_response_config_v1 if headers['Accept'] == 'application/vnd.docker.distribution.manifest.v2+json': return config_response_config_v2 if url == config_url: return config_response_config_v2 if url == blob_url: return blob_config (flexmock(requests) .should_receive('get') .replace_with(custom_get) ) runner = PostBuildPluginsRunner( tasker, workflow, [{ 'name': TagAndPushPlugin.key, 'args': { 'registries': { LOCALHOST_REGISTRY: { 'insecure': True, 'secret': secret_path } } }, }] ) if should_raise: with pytest.raises(Exception): runner.run() else: output = runner.run() image = output[TagAndPushPlugin.key][0] tasker.remove_image(image) assert len(workflow.push_conf.docker_registries) > 0 if MOCK: # we only test this when mocking docker because we don't expect # running actual docker against v2 registry expected_digest = ManifestDigest(v1=DIGEST_V1, v2=DIGEST_V2) assert workflow.push_conf.docker_registries[0].digests[image_name].v1 == expected_digest.v1 assert workflow.push_conf.docker_registries[0].digests[image_name].v2 == expected_digest.v2 if has_config: assert isinstance(workflow.push_conf.docker_registries[0].config, dict) else: assert workflow.push_conf.docker_registries[0].config is None
class InsideBuilder(LastLogger, BuilderStateMachine): """ This is expected to run within container """ def __init__(self, source, image, **kwargs): """ """ LastLogger.__init__(self) BuilderStateMachine.__init__(self) print_version_of_tools() self.tasker = DockerTasker() info, version = self.tasker.get_info(), self.tasker.get_version() logger.debug(json.dumps(info, indent=2)) logger.info(json.dumps(version, indent=2)) # arguments for build self.source = source self.base_image_id = None self.image_id = None self.built_image_info = None self.image = ImageName.parse(image) # get info about base image from dockerfile self.df_path, self.df_dir = self.source.get_dockerfile_path() self.base_image = ImageName.parse(DockerfileParser(self.df_path).baseimage) logger.debug("base image specified in dockerfile = '%s'", self.base_image) if not self.base_image.tag: self.base_image.tag = 'latest' def build(self): """ build image inside current environment; it's expected this may run within (privileged) docker container :return: image string (e.g. fedora-python:34) """ logger.info("building image '%s' inside current environment", self.image) self._ensure_not_built() logger.debug("using dockerfile:\n%s", DockerfileParser(self.df_path).content) logs_gen = self.tasker.build_image_from_path( self.df_dir, self.image, ) logger.debug("build is submitted, waiting for it to finish") command_result = wait_for_command(logs_gen) # wait for build to finish logger.info("build was %ssuccesful!", 'un' if command_result.is_failed() else '') self.is_built = True if not command_result.is_failed(): self.built_image_info = self.get_built_image_info() # self.base_image_id = self.built_image_info['ParentId'] # parent id is not base image! self.image_id = self.built_image_info['Id'] build_result = BuildResult(command_result, self.image_id) return build_result def push_built_image(self, registry, insecure=False): """ push built image to provided registry :param registry: str :param insecure: bool, allow connecting to registry over plain http :return: str, image """ logger.info("pushing built image '%s' to registry '%s'", self.image, registry) self._ensure_is_built() if not registry: logger.warning("no registry specified; skipping") return if self.image.registry and self.image.registry != registry: logger.error("registry in image name doesn't match provided target registry, " "image registry = '%s', target = '%s'", self.image.registry, registry) raise RuntimeError( "Registry in image name doesn't match target registry. Image: '%s', Target: '%s'" % (self.image.registry, registry)) target_image = self.image.copy() target_image.registry = registry response = self.tasker.tag_and_push_image(self.image, target_image, insecure=insecure) self.tasker.remove_image(target_image) return response def inspect_base_image(self): """ inspect base image :return: dict """ logger.info("inspecting base image '%s'", self.base_image) inspect_data = self.tasker.inspect_image(self.base_image) return inspect_data def inspect_built_image(self): """ inspect built image :return: dict """ logger.info("inspecting built image '%s'", self.image_id) self._ensure_is_built() inspect_data = self.tasker.inspect_image(self.image_id) # dict with lots of data, see man docker-inspect return inspect_data def get_base_image_info(self): """ query docker about base image :return dict """ logger.info("getting information about base image '%s'", self.base_image) image_info = self.tasker.get_image_info_by_image_name(self.base_image) items_count = len(image_info) if items_count == 1: return image_info[0] elif items_count <= 0: logger.error("image '%s' not found", self.base_image) raise RuntimeError("image '%s' not found", self.base_image) else: logger.error("multiple (%d) images found for image '%s'", items_count, self.base_image) raise RuntimeError("multiple (%d) images found for image '%s'" % (items_count, self.base_image)) def get_built_image_info(self): """ query docker about built image :return dict """ logger.info("getting information about built image '%s'", self.image) self._ensure_is_built() image_info = self.tasker.get_image_info_by_image_name(self.image) items_count = len(image_info) if items_count == 1: return image_info[0] elif items_count <= 0: logger.error("image '%s' not found", self.image) raise RuntimeError("image '%s' not found" % self.image) else: logger.error("multiple (%d) images found for image '%s'", items_count, self.image) raise RuntimeError("multiple (%d) images found for image '%s'" % (items_count, self.image))
def test_tag_and_push_plugin( tmpdir, monkeypatch, image_name, logs, should_raise, has_config, use_secret): if MOCK: mock_docker() flexmock(docker.Client, push=lambda iid, **kwargs: iter(logs), login=lambda username, registry, dockercfg_path: {'Status': 'Login Succeeded'}) tasker = DockerTasker() workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, TEST_IMAGE) workflow.tag_conf.add_primary_image(image_name) setattr(workflow, 'builder', X) secret_path = None if use_secret: temp_dir = mkdtemp() with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig: dockerconfig_contents = { LOCALHOST_REGISTRY: { "username": "******", "email": "*****@*****.**", "password": "******"}} dockerconfig.write(json.dumps(dockerconfig_contents)) dockerconfig.flush() secret_path = temp_dir CONFIG_DIGEST = 'sha256:2c782e3a93d34d89ea4cf54052768be117caed54803263dd1f3798ce42aac14e' media_type = 'application/vnd.docker.distribution.manifest.v2+json' response_config_json = { 'config': { 'digest': CONFIG_DIGEST, 'mediaType': 'application/octet-stream', 'size': 4132 }, 'layers': [ { 'digest': 'sha256:16dc1f96e3a1bb628be2e00518fec2bb97bd5933859de592a00e2eb7774b6ecf', 'mediaType': 'application/vnd.docker.image.rootfs.diff.tar.gzip', 'size': 71907148 }, { 'digest': 'sha256:cebc0565e1f096016765f55fde87a6f60fdb1208c0b5017e35a856ff578f5ccb', 'mediaType': 'application/vnd.docker.image.rootfs.diff.tar.gzip', 'size': 3945724 } ], 'mediaType': media_type, 'schemaVersion': 2 } response_json = { 'config': { 'Size': 12509448, 'architecture': 'amd64', 'author': 'Red Hat, Inc.', 'config': { 'Cmd': ['/bin/rsyslog.sh'], 'Entrypoint': None, 'Image': 'c3fb36aafd5692d2a45115d32bb120edb6edf6c0c3c783ed6592a8dab969fb88', 'Labels': { 'Architecture': 'x86_64', 'Authoritative_Registry': 'registry.access.redhat.com', 'BZComponent': 'rsyslog-docker', 'Name': 'rhel7/rsyslog', 'Release': '28.vrutkovs.31', 'Vendor': 'Red Hat, Inc.', 'Version': '7.2', }, }, 'created': '2016-10-07T10:20:05.38595Z', 'docker_version': '1.9.1', 'id': '1ca220fbc2aed7c141b236c8729fe59db5771b32bf2da74e4a663407f32ec2a2', 'os': 'linux', 'parent': '47eed7a8ea3f131e9281ae09fcbfb0041872fd8b74a048f1c739302c8148505d' }, 'container_config': { 'foo': 'bar', 'spam': 'maps' }, 'id': '1ca220fbc2aed7c141b236c8729fe59db5771b32bf2da74e4a663407f32ec2a2', 'parent_id': 'c3fb36aafd5692d2a45115d32bb120edb6edf6c0c3c783ed6592a8dab969fb88' } if not has_config: response_json = None config_latest_url = "https://{}/v2/{}/manifests/latest".format(LOCALHOST_REGISTRY, TEST_IMAGE,) config_url = "https://{}/v2/{}/manifests/{}".format(LOCALHOST_REGISTRY, TEST_IMAGE, DIGEST_V2) blob_url = "https://{}/v2/{}/blobs/{}".format( LOCALHOST_REGISTRY, TEST_IMAGE, CONFIG_DIGEST) config_response_config_v1 = requests.Response() (flexmock(config_response_config_v1, raise_for_status=lambda: None, json=response_config_json, headers={ 'Content-Type': 'application/vnd.docker.distribution.manifest.v1+json', 'Docker-Content-Digest': DIGEST_V1 })) config_response_config_v2 = requests.Response() (flexmock(config_response_config_v2, raise_for_status=lambda: None, json=response_config_json, headers={ 'Content-Type': 'application/vnd.docker.distribution.manifest.v2+json', 'Docker-Content-Digest': DIGEST_V2 })) blob_config = requests.Response() (flexmock(blob_config, raise_for_status=lambda: None, json=response_json)) def custom_get(url, headers, **kwargs): if url == config_latest_url: if headers['Accept'] == 'application/vnd.docker.distribution.manifest.v1+json': return config_response_config_v1 if headers['Accept'] == 'application/vnd.docker.distribution.manifest.v2+json': return config_response_config_v2 if url == config_url: return config_response_config_v2 if url == blob_url: return blob_config (flexmock(requests) .should_receive('get') .replace_with(custom_get)) runner = PostBuildPluginsRunner( tasker, workflow, [{ 'name': TagAndPushPlugin.key, 'args': { 'registries': { LOCALHOST_REGISTRY: { 'insecure': True, 'secret': secret_path } } }, }] ) if should_raise: with pytest.raises(Exception): runner.run() else: output = runner.run() image = output[TagAndPushPlugin.key][0] tasker.remove_image(image) assert len(workflow.push_conf.docker_registries) > 0 if MOCK: # we only test this when mocking docker because we don't expect # running actual docker against v2 registry expected_digest = ManifestDigest(v1=DIGEST_V1, v2=DIGEST_V2) assert workflow.push_conf.docker_registries[0].digests[image_name].v1 == \ expected_digest.v1 assert workflow.push_conf.docker_registries[0].digests[image_name].v2 == \ expected_digest.v2 if has_config: assert isinstance(workflow.push_conf.docker_registries[0].config, dict) else: assert workflow.push_conf.docker_registries[0].config is None