def prepare(insecure_registry=None, retry_delay=None, namespace=None):
    """
    Boiler-plate test set-up
    """
    if MOCK:
        mock_docker()
    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(SOURCE, "test-image")
    setattr(workflow, 'builder', X())
    setattr(workflow.builder, 'image_id', 'asd123')
    setattr(workflow.builder, 'source', X())
    setattr(workflow.builder.source, 'dockerfile_path', None)
    setattr(workflow.builder.source, 'path', None)
    fake_conf = osbs.conf.Configuration(conf_file=None, openshift_uri='/')

    expectation = flexmock(osbs.conf).should_receive('Configuration').and_return(fake_conf)
    if namespace:
        expectation.with_args(namespace=namespace, verify_ssl=False, openshift_url="",
                              openshift_uri="", use_auth=False, build_json_dir="")

    runner = PostBuildPluginsRunner(tasker, workflow, [{
        'name': ImportImagePlugin.key,
        'args': {
            'imagestream': TEST_IMAGESTREAM,
            'docker_image_repo': TEST_REPO,
            'url': '',
            'build_json_dir': "",
            'verify_ssl': False,
            'use_auth': False,
            'insecure_registry': insecure_registry,
            'retry_delay': retry_delay or 0,
        }}])

    return runner
示例#2
0
def test_pulp(tmpdir, check_repo_retval, should_raise):
    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(SOURCE, "test-image")
    setattr(workflow, 'builder', X())
    setattr(workflow.builder, 'source', X())
    setattr(workflow.builder.source, 'dockerfile_path', None)
    setattr(workflow.builder.source, 'path', None)
    setattr(workflow, 'tag_conf', X())
    setattr(workflow.tag_conf, 'images', [ImageName(repo="image-name1"),
                                          ImageName(namespace="prefix",
                                                    repo="image-name2"),
                                          ImageName(repo="image-name3", tag="asd")])

    # Mock dockpulp and docker
    dockpulp.Pulp = flexmock(dockpulp.Pulp)
    dockpulp.Pulp.registry='registry.example.com'
    (flexmock(dockpulp.imgutils).should_receive('get_metadata')
     .with_args(object)
     .and_return([{'id': 'foo'}]))
    (flexmock(dockpulp.imgutils).should_receive('get_versions')
     .with_args(object)
     .and_return({'id': '1.6.0'}))
    (flexmock(dockpulp.imgutils).should_receive('check_repo')
     .and_return(check_repo_retval))
    (flexmock(dockpulp.Pulp)
     .should_receive('set_certs')
     .with_args(object, object))
    (flexmock(dockpulp.Pulp)
     .should_receive('push_tar_to_pulp')
     .with_args(object, object)
     .and_return([1, 2, 3]))
    (flexmock(dockpulp.Pulp)
     .should_receive('watch_tasks')
     .with_args(list))
    mock_docker()

    os.environ['SOURCE_SECRET_PATH'] = str(tmpdir)
    with open(os.path.join(str(tmpdir), "pulp.cer"), "wt") as cer:
        cer.write("pulp certificate\n")
    with open(os.path.join(str(tmpdir), "pulp.key"), "wt") as key:
        key.write("pulp key\n")

    runner = PostBuildPluginsRunner(tasker, workflow, [{
        'name': PulpPushPlugin.key,
        'args': {
            'pulp_registry_name': 'test'
        }}])

    if should_raise:
        with pytest.raises(Exception) as exc:
            runner.run()

        return

    runner.run()
    assert PulpPushPlugin.key is not None
    images = [i.to_str() for i in workflow.postbuild_results[PulpPushPlugin.key]]
    assert "registry.example.com/image-name1" in images
    assert "registry.example.com/prefix/image-name2" in images
    assert "registry.example.com/image-name3:asd" in images
def mock_environment(tmpdir, session=None, build_process_failed=False,
                     koji_build_id=None, use_import=False):
    if session is None:
        session = MockedClientSession('')

    if MOCK:
        mock_docker()
    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(SOURCE, 'test-image')
    setattr(workflow, 'builder', X())
    setattr(workflow.builder, 'image_id', '123456imageid')
    setattr(workflow.builder, 'base_image', ImageName(repo='Fedora', tag='22'))
    setattr(workflow.builder, 'source', X())
    setattr(workflow.builder.source, 'dockerfile_path', None)
    setattr(workflow.builder.source, 'path', None)

    flexmock(koji, ClientSession=lambda hub, opts: session)

    if build_process_failed:
        workflow.build_result = BuildResult(fail_reason="not built")
    else:
        workflow.build_result = BuildResult(image_id="id1234")

    if koji_build_id:
        if use_import:
            workflow.exit_results[KojiImportPlugin.key] = koji_build_id
        else:
            workflow.exit_results[KojiPromotePlugin.key] = koji_build_id

    (flexmock(time)
        .should_receive('sleep')
        .and_return(None))

    return tasker, workflow
def test_add_labels_equal_aliases(tmpdir, docker_tasker, caplog,
                                  base_l, df_l, expected, expected_log,
                                  reactor_config_map):
    if MOCK:
        mock_docker()

    df_content = "FROM fedora\n"
    plugin_labels = {}
    if df_l[0]:
        df_content += 'LABEL description="{0}"\n'.format(df_l[0])
    if df_l[1]:
        df_content += 'LABEL io.k8s.description="{0}"\n'.format(df_l[1])

    base_labels = {INSPECT_CONFIG: {"Labels": {}}}
    if base_l[0]:
        base_labels[INSPECT_CONFIG]["Labels"]["description"] = base_l[0]
    if base_l[1]:
        base_labels[INSPECT_CONFIG]["Labels"]["io.k8s.description"] = base_l[1]

    df = df_parser(str(tmpdir))
    df.content = df_content

    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
    setattr(workflow, 'builder', X())
    setattr(workflow.builder, 'df_path', df.dockerfile_path)
    setattr(workflow.builder, 'base_image_inspect', base_labels)

    if reactor_config_map:
        make_and_store_reactor_config_map(
            workflow,
            {
                'image_labels': plugin_labels,
                'image_equal_labels': [['description', 'io.k8s.description']]})

    runner = PreBuildPluginsRunner(
        docker_tasker,
        workflow,
        [{
            'name': AddLabelsPlugin.key,
            'args': {
                'labels': plugin_labels,
                'dont_overwrite': [],
                'auto_labels': [],
                'aliases': {},
                'equal_labels': [['description', 'io.k8s.description']]
            }
        }]
    )

    runner.run()
    assert AddLabelsPlugin.key is not None
    result_fst = df.labels.get("description") or \
        base_labels[INSPECT_CONFIG]["Labels"].get("description")
    result_snd = df.labels.get("io.k8s.description") or \
        base_labels[INSPECT_CONFIG]["Labels"].get("io.k8s.description")
    assert result_fst == expected[0]
    assert result_snd == expected[1]

    if expected_log:
        assert expected_log in caplog.text
def test_add_labels_plugin_explicit(tmpdir, docker_tasker, auto_label, labels_docker, labels_base):
    df = df_parser(str(tmpdir))
    df.content = labels_docker

    if MOCK:
        mock_docker()

    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
    setattr(workflow, 'builder', X)
    flexmock(workflow, source=MockSource())
    flexmock(workflow, base_image_inspect=labels_base)
    setattr(workflow.builder, 'df_path', df.dockerfile_path)

    prov_labels = {}
    prov_labels[auto_label] = 'explicit_value'

    runner = PreBuildPluginsRunner(
        docker_tasker,
        workflow,
        [{
            'name': AddLabelsPlugin.key,
            'args': {'labels': prov_labels, "dont_overwrite": [], "auto_labels": [auto_label],
                     'aliases': {'Build_Host': 'com.redhat.build-host'}}
        }]
    )

    runner.run()

    assert df.labels[auto_label] == 'explicit_value'
示例#6
0
def test_hostdocker_build(caplog, source_params):
    if MOCK:
        mock_docker()

    image_name = ImageName(repo="dock-test-ssh-image")
    remote_image = image_name.copy()
    remote_image.registry = LOCALHOST_REGISTRY
    m = DockerhostBuildManager("buildroot-dh-fedora", {
        "source": source_params,
        "image": remote_image.to_str(),
        "parent_registry": LOCALHOST_REGISTRY,  # faster
        "target_registries_insecure": True,
        "parent_registry_insecure": True,
    })
    results = m.build()
    dt = DockerTasker()
    dt.pull_image(remote_image, insecure=True)

    if source_params['provider'] == 'path':
        assert_source_from_path_mounted_ok(caplog, m.temp_dir)

    assert len(results.build_logs) > 0
    #assert re.search(r'build json mounted in container .+"uri": %s' %
    #        os.path.join(dconstants.CONTAINER_SHARE_PATH, 'source'))
    # assert isinstance(results.built_img_inspect, dict)
    # assert len(results.built_img_inspect.items()) > 0
    # assert isinstance(results.built_img_info, dict)
    # assert len(results.built_img_info.items()) > 0
    # assert isinstance(results.base_img_info, dict)
    # assert len(results.base_img_info.items()) > 0
    # assert len(results.base_plugins_output) > 0
    # assert len(results.built_img_plugins_output) > 0
    dt.remove_container(results.container_id)
    dt.remove_image(remote_image)
def test_add_labels_plugin_generated(tmpdir, docker_tasker, auto_label, value_re_part):
    df = df_parser(str(tmpdir))
    df.content = DF_CONTENT

    if MOCK:
        mock_docker()

    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
    setattr(workflow, 'builder', X)
    flexmock(workflow, source=MockSource())
    flexmock(workflow, base_image_inspect=LABELS_CONF_BASE)
    setattr(workflow.builder, 'df_path', df.dockerfile_path)

    runner = PreBuildPluginsRunner(
        docker_tasker,
        workflow,
        [{
            'name': AddLabelsPlugin.key,
            'args': {'labels': {}, "dont_overwrite": [], "auto_labels": [auto_label],
                     'aliases': {'Build_Host': 'com.redhat.build-host'}}
        }]
    )

    runner.run()
    if value_re_part:
        assert re.match(value_re_part, df.labels[auto_label])

    if auto_label == "build-date":
        utc_dt = datetime.datetime.utcfromtimestamp(atomic_reactor_start_time).isoformat()
        assert df.labels[auto_label] == utc_dt
    def prepare(self, key, value, set_labels_args=None, set_labels_kwargs=None):
        if MOCK:
            mock_docker()
        tasker = DockerTasker()
        workflow = DockerBuildWorkflow(SOURCE, "test-image")
        setattr(workflow, 'builder', X())
        setattr(workflow.builder, 'image_id', 'asd123')
        setattr(workflow.builder, 'base_image', ImageName(repo='Fedora',
                                                          tag='22'))
        setattr(workflow.builder, 'source', X())
        setattr(workflow.builder.source, 'path', '/tmp')
        setattr(workflow.builder.source, 'dockerfile_path', None)
        expectation = (flexmock(OSBS)
                       .should_receive('set_labels_on_build_config'))
        if set_labels_args is not None:
            if set_labels_kwargs is None:
                set_labels_kwargs = {}

            expectation.with_args(*set_labels_args, **set_labels_kwargs)

        runner = PreBuildPluginsRunner(tasker, workflow, [
            {
                'name': CheckAndSetRebuildPlugin.key,
                'args': {
                    'label_key': key,
                    'label_value': value,
                    'url': '',
                },
            }
        ])
        return workflow, runner
示例#9
0
def test_privileged_build(caplog, source_params):
    if MOCK:
        mock_docker()

    image_name = ImageName(repo=TEST_IMAGE)
    remote_image = image_name.copy()
    remote_image.registry = LOCALHOST_REGISTRY
    m = PrivilegedBuildManager("buildroot-fedora", {
        "source": source_params,
        "image": remote_image.to_str(),
        "parent_registry": LOCALHOST_REGISTRY,  # faster
        "target_registries_insecure": True,
        "parent_registry_insecure": True,
    })
    results = m.build()
    dt = DockerTasker()
    dt.pull_image(remote_image, insecure=True)

    if source_params['provider'] == 'path':
        assert_source_from_path_mounted_ok(caplog, m.temp_dir)

    assert len(results.build_logs) > 0
    # assert isinstance(results.built_img_inspect, dict)
    # assert len(results.built_img_inspect.items()) > 0
    # assert isinstance(results.built_img_info, dict)
    # assert len(results.built_img_info.items()) > 0
    # assert isinstance(results.base_img_info, dict)
    # assert len(results.base_img_info.items()) > 0
    # assert len(results.base_plugins_output) > 0
    # assert len(results.built_img_plugins_output) > 0
    dt.remove_container(results.container_id)
    dt.remove_image(remote_image)
示例#10
0
def test_add_labels_plugin(tmpdir, docker_tasker,
                           df_content, labels_conf_base, labels_conf, dont_overwrite, aliases,
                           expected_output, caplog):
    df = DockerfileParser(str(tmpdir))
    df.content = df_content

    if MOCK:
        mock_docker()

    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
    setattr(workflow, 'builder', X)
    flexmock(workflow, base_image_inspect=labels_conf_base)
    setattr(workflow.builder, 'df_path', df.dockerfile_path)

    runner = PreBuildPluginsRunner(
        docker_tasker,
        workflow,
        [{
            'name': AddLabelsPlugin.key,
            'args': {
                'labels': labels_conf,
                'dont_overwrite': dont_overwrite,
                'auto_labels': [],
                'aliases': aliases,
            }
        }]
    )

    runner.run()
    if isinstance(expected_output, RuntimeError):
        assert "plugin 'add_labels_in_dockerfile' raised an exception: RuntimeError" in caplog.text()

    else:
        assert AddLabelsPlugin.key is not None
        assert df.content in expected_output
def test_tarball_generation_local_repo(tmpdir):
    if MOCK:
        mock_docker()
    b = BuildImageBuilder(reactor_local_path=PARENT_DIR)
    tarball_path = b.get_reactor_tarball_path(str(tmpdir))
    assert os.path.exists(tarball_path)
    assert len(glob(os.path.join(str(tmpdir), 'atomic-reactor-*.tar.gz'))) == 1
def test_tarball_generation_upstream_repo(tmpdir):
    if MOCK:
        mock_docker()
    b = BuildImageBuilder(use_official_reactor_git=True)
    tarball_path = b.get_reactor_tarball_path(str(tmpdir))
    assert os.path.exists(tarball_path)
    assert len(glob(os.path.join(str(tmpdir), 'atomic-reactor-*.tar.gz'))) == 1
def test_pull_base_image_plugin(df_base, parent_registry, expected_w_reg, expected_wo_reg):
    if MOCK:
        mock_docker(remember_images=True)

    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
    workflow.builder = MockBuilder()
    workflow.builder.base_image = ImageName.parse(df_base)

    assert not tasker.image_exists(BASE_IMAGE)
    assert not tasker.image_exists(BASE_IMAGE_W_REGISTRY)

    runner = PreBuildPluginsRunner(
        tasker,
        workflow,
        [{
            'name': PullBaseImagePlugin.key,
            'args': {'parent_registry': parent_registry, 'parent_registry_insecure': True}
        }]
    )

    runner.run()

    assert tasker.image_exists(BASE_IMAGE) == expected_wo_reg
    assert tasker.image_exists(BASE_IMAGE_W_REGISTRY) == expected_w_reg

    try:
        tasker.remove_image(BASE_IMAGE)
        tasker.remove_image(BASE_IMAGE_W_REGISTRY)
    except:
        pass
示例#14
0
def test_get_image_info_by_id_nonexistent():
    if MOCK:
        mock_docker()

    t = DockerTasker()
    response = t.get_image_info_by_image_id("asd")
    assert response is None
示例#15
0
def test_get_image_info_by_name_tag_in_name_nonexisten(temp_image_name):
    if MOCK:
        mock_docker()

    t = DockerTasker()
    response = t.get_image_info_by_image_name(temp_image_name)
    assert len(response) == 0
示例#16
0
def test_image_doesnt_exist():
    image = "lerknglekrnglekrnglekrnglekrng"
    if MOCK:
        mock_docker(should_raise_error={'inspect_image': [image]})

    t = DockerTasker()
    assert t.image_exists(image) is False
示例#17
0
文件: test_cli.py 项目: pbabinca/dock
    def test_simple_privileged_build(self, is_registry_running, temp_image_name,
            source_provider, uri):
        if MOCK:
            mock_docker()

        temp_image = temp_image_name
        command = [
            "main.py",
            "--verbose",
            "build",
            source_provider,
            "--method", "privileged",
            "--build-image", PRIV_BUILD_IMAGE,
            "--image", temp_image.to_str(),
            "--uri", uri,
        ]
        if is_registry_running:
            logger.info("registry is running")
            command += ["--source-registry", LOCALHOST_REGISTRY]
        else:
            logger.info("registry is NOT running")
        with pytest.raises(SystemExit) as excinfo:
            self.exec_cli(command)

        assert excinfo.value.code == 0
示例#18
0
def test_get_image_info_by_name_tag_in_name():
    if MOCK:
        mock_docker()

    t = DockerTasker()
    response = t.get_image_info_by_image_name(input_image_name)
    assert len(response) == 1
示例#19
0
def test_get_version():
    if MOCK:
        mock_docker()

    t = DockerTasker()
    response = t.get_info()
    assert isinstance(response, dict)
def prepare():
    """
    Boiler-plate test set-up
    """
    if MOCK:
        mock_docker()
    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(SOURCE, "test-image")
    setattr(workflow, 'builder', X())
    setattr(workflow.builder, 'image_id', 'asd123')
    setattr(workflow.builder, 'source', X())
    setattr(workflow.builder.source, 'dockerfile_path', None)
    setattr(workflow.builder.source, 'path', None)
    fake_conf = osbs.conf.Configuration(conf_file=None, openshift_uri='/')
    flexmock(osbs.conf).should_receive('Configuration').and_return(fake_conf)

    runner = PostBuildPluginsRunner(tasker, workflow, [{
        'name': ImportImagePlugin.key,
        'args': {
            'imagestream': TEST_IMAGESTREAM,
            'docker_image_repo': TEST_REPO,
            'url': '',
            'verify_ssl': False,
            'use_auth': False
        }}])

    return runner
def test_tag_and_push_plugin(tmpdir):
    if MOCK:
        mock_docker()

    tasker = DockerTasker()
    workflow = DockerBuildWorkflow("asd", "test-image")
    setattr(workflow, 'builder', X)

    runner = PostBuildPluginsRunner(
        tasker,
        workflow,
        [{
            'name': TagAndPushPlugin.key,
            'args': {
                "mapping": {
                    LOCALHOST_REGISTRY: {
                        "insecure": True,
                        "image_names": [
                            TEST_IMAGE
                        ]
                    }
                }
            }
        }]
    )
    output = runner.run()
    image = output[TagAndPushPlugin.key][0]
    tasker.remove_image(image)
示例#22
0
文件: test_cli.py 项目: pbabinca/dock
    def test_building_from_json_source_provider(self, is_registry_running, temp_image_name):
        if MOCK:
            mock_docker()

        temp_image = temp_image_name
        command = [
            "main.py",
            "--verbose",
            "build",
            "json",
            "--method", "hostdocker",
            "--build-image", DH_BUILD_IMAGE,
            os.path.join(FILES, 'example-build.json'),
            "--overrides", "image={0}".format(temp_image),
            "source.uri={0}".format(DOCKERFILE_OK_PATH)
        ]
        if is_registry_running:
            logger.info("registry is running")
            command += ["--source-registry", LOCALHOST_REGISTRY]
        else:
            logger.info("registry is NOT running")
        with pytest.raises(SystemExit) as excinfo:
            self.exec_cli(command)
        assert excinfo.value.code == 0
        dt.remove_image(temp_image, noprune=True)
示例#23
0
def test_inspect_image():
    if MOCK:
        mock_docker()

    t = DockerTasker()
    inspect_data = t.inspect_image(input_image_name)
    assert isinstance(inspect_data, dict)
def mock_environment(tmpdir, primary_images=None,
                     annotations={}):
    if MOCK:
        mock_docker()
    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(SOURCE, "test-image")
    base_image_id = '123456parent-id'
    setattr(workflow, '_base_image_inspect', {'Id': base_image_id})
    setattr(workflow, 'builder', X())
    setattr(workflow.builder, 'image_id', '123456imageid')
    setattr(workflow.builder, 'base_image', ImageName(repo='Fedora', tag='22'))
    setattr(workflow.builder, 'source', X())
    setattr(workflow.builder, 'built_image_info', {'ParentId': base_image_id})
    setattr(workflow.builder.source, 'dockerfile_path', None)
    setattr(workflow.builder.source, 'path', None)
    setattr(workflow, 'tag_conf', TagConf())
    if primary_images:
        for image in primary_images:
            if '-' in ImageName.parse(image).tag:
                workflow.tag_conf.add_primary_image(image)
        workflow.tag_conf.add_unique_image(primary_images[0])

    workflow.build_result = BuildResult(image_id='123456', annotations=annotations)

    return tasker, workflow
示例#25
0
def test_wait_for_command():
    if MOCK:
        mock_docker()

    d = docker.Client()
    logs_gen = d.pull(INPUT_IMAGE, stream=True)
    assert wait_for_command(logs_gen) is not None
示例#26
0
def test_layer_sizes():
    flexmock(DockerfileParser, content='df_content')
    this_file = inspect.getfile(PreRaises)
    mock_docker()
    fake_builder = MockInsideBuilder()
    flexmock(InsideBuilder).new_instances(fake_builder)
    watch_exit = Watcher()
    watch_buildstep = Watcher()
    workflow = DockerBuildWorkflow(SOURCE, 'test-image',
                                   exit_plugins=[{'name': 'uses_source',
                                                  'args': {
                                                      'watcher': watch_exit,
                                                  }}],
                                   buildstep_plugins=[{'name': 'buildstep_watched',
                                                       'args': {
                                                           'watcher': watch_buildstep,
                                                       }}],
                                   plugin_files=[this_file])

    workflow.build_docker_image()

    expected = [
        {'diff_id': u'sha256:diff_id1-oldest', 'size': 4},
        {'diff_id': u'sha256:diff_id2', 'size': 3},
        {'diff_id': u'sha256:diff_id3', 'size': 2},
        {'diff_id': u'sha256:diff_id4-newest', 'size': 1}
    ]

    assert workflow.layer_sizes == expected
def test_adddockerfile_nvr_from_labels2(tmpdir, docker_tasker):
    df_content = """
FROM fedora
RUN yum install -y python-django
CMD blabla"""
    df = df_parser(str(tmpdir))
    df.content = df_content

    if MOCK:
        mock_docker()

    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
    flexmock(workflow, base_image_inspect={INSPECT_CONFIG: {"Labels": {}}})
    workflow.builder = X
    workflow.builder.df_path = df.dockerfile_path
    workflow.builder.df_dir = str(tmpdir)

    runner = PreBuildPluginsRunner(
        docker_tasker,
        workflow,
        [{
            'name': AddLabelsPlugin.key,
            'args': {'labels': {'Name': 'jboss-eap-6-docker',
                                'Version': '6.4',
                                'Release': '77'},
                     'auto_labels': []}
         },
         {
            'name': AddDockerfilePlugin.key
        }]
    )
    runner.run()
    assert AddDockerfilePlugin.key is not None

    assert "ADD Dockerfile-jboss-eap-6-docker-6.4-77 /root/buildinfo/Dockerfile-jboss-eap-6-docker-6.4-77" in df.content
示例#28
0
def test_workflow_plugin_results():
    """
    Verifies the results of plugins in different phases
    are stored properly.
    """

    this_file = inspect.getfile(PreRaises)
    mock_docker()
    fake_builder = MockInsideBuilder()
    flexmock(InsideBuilder).new_instances(fake_builder)

    prebuild_plugins = [{'name': 'pre_build_value'}]
    postbuild_plugins = [{'name': 'post_build_value'}]
    prepublish_plugins = [{'name': 'pre_publish_value'}]
    exit_plugins = [{'name': 'exit_value'}]

    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image',
                                   prebuild_plugins=prebuild_plugins,
                                   prepublish_plugins=prepublish_plugins,
                                   postbuild_plugins=postbuild_plugins,
                                   exit_plugins=exit_plugins,
                                   plugin_files=[this_file])

    workflow.build_docker_image()
    assert workflow.prebuild_results == {'pre_build_value': 'pre_build_value_result'}
    assert workflow.postbuild_results == {'post_build_value': 'post_build_value_result'}
    assert workflow.prepub_results == {'pre_publish_value': 'pre_publish_value_result'}
    assert workflow.exit_results == {'exit_value': 'exit_value_result'}
示例#29
0
def test_workflow_docker_build_error():
    """
    This is a test for what happens when the docker build fails.
    """

    this_file = inspect.getfile(PreRaises)
    mock_docker()
    fake_builder = MockInsideBuilder(failed=True)
    flexmock(InsideBuilder).new_instances(fake_builder)
    watch_prepub = Watcher()
    watch_post = Watcher()
    watch_exit = Watcher()

    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image',
                                   prepublish_plugins=[{'name': 'prepub_watched',
                                                        'args': {
                                                            'watcher': watch_prepub,
                                                        }}],
                                   postbuild_plugins=[{'name': 'post_watched',
                                                       'args': {
                                                           'watcher': watch_post
                                                       }}],
                                   exit_plugins=[{'name': 'exit_watched',
                                                  'args': {
                                                      'watcher': watch_exit
                                                  }}],
                                   plugin_files=[this_file])

    assert workflow.build_docker_image().is_failed()

    # No subsequent build phases should have run except 'exit'
    assert not watch_prepub.was_called()
    assert not watch_post.was_called()
    assert watch_exit.was_called()
示例#30
0
def test_workflow_compat(request):
    """
    Some of our plugins have changed from being run post-build to
    being run at exit. Let's test what happens when we try running an
    exit plugin as a post-build plugin.
    """

    this_file = inspect.getfile(PreWatched)
    mock_docker()
    fake_builder = MockInsideBuilder()
    flexmock(InsideBuilder).new_instances(fake_builder)
    watch_exit = Watcher()
    fake_logger = FakeLogger()
    existing_logger = atomic_reactor.plugin.logger

    def restore_logger():
        atomic_reactor.plugin.logger = existing_logger

    request.addfinalizer(restore_logger)
    atomic_reactor.plugin.logger = fake_logger

    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image',
                                   postbuild_plugins=[{'name': 'store_logs_to_file',
                                                       'args': {
                                                           'watcher': watch_exit
                                                       }}],
                                   plugin_files=[this_file])

    workflow.build_docker_image()
    assert watch_exit.was_called()
    assert len(fake_logger.errors) > 0
示例#31
0
def prepare(
        tmpdir,
        insecure_registry=None,
        namespace=None,  # noqa:F811
        primary_images_tag_conf=DEFAULT_TAGS_AMOUNT,
        primary_images_annotations=DEFAULT_TAGS_AMOUNT,
        build_process_failed=False,
        reactor_config_map=False):
    """
    Boiler-plate test set-up
    """
    if MOCK:
        mock_docker()
    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(SOURCE, "test-image")
    setattr(workflow, 'builder', X())
    flexmock(workflow, build_process_failed=build_process_failed)
    setattr(workflow.builder, 'image_id', 'asd123')
    setattr(workflow.builder, 'source', X())
    setattr(workflow.builder.source, 'dockerfile_path', None)
    setattr(workflow.builder.source, 'path', None)

    df = tmpdir.join('Dockerfile')
    df.write('FROM base\n')
    df.write('LABEL name={}'.format(TEST_NAME_LABEL))
    setattr(workflow.builder, 'df_path', str(df))

    version_release_primary_image = 'registry.example.com/fedora:version-release'

    annotations = None
    if primary_images_annotations:
        primary_images = [
            'registry.example.com/fedora:annotation_{}'.format(x)
            for x in range(primary_images_annotations)
        ]
        primary_images.append(version_release_primary_image)
        annotations = {'repositories': {'primary': primary_images}}
        annotations
    build_result = BuildResult(annotations=annotations, image_id='foo')
    setattr(workflow, 'build_result', build_result)

    if primary_images_tag_conf:
        primary_images = [
            'registry.example.com/fedora:tag_conf_{}'.format(x)
            for x in range(primary_images_tag_conf)
        ]
        primary_images.append(version_release_primary_image)
        workflow.tag_conf.add_primary_images(primary_images)

    fake_conf = osbs.conf.Configuration(conf_file=None, openshift_url='/')

    expectation = flexmock(
        osbs.conf).should_receive('Configuration').and_return(fake_conf)
    if namespace:
        expectation.with_args(conf_file=None,
                              namespace=namespace,
                              verify_ssl=not insecure_registry,
                              openshift_url="/",
                              use_auth=False,
                              build_json_dir="/var/json_dir")

    plugin_args = {'imagestream': TEST_IMAGESTREAM}

    if reactor_config_map:
        openshift_map = {
            'url': '/',
            'auth': {
                'enable': False
            },
            'insecure': insecure_registry,
            'build_json_dir': '/var/json_dir',
        }
        source_registry_map = {
            'url': TEST_REGISTRY,
            'insecure': insecure_registry
        }
        workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
        workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
            ReactorConfig({
                'version': 1,
                'openshift': openshift_map,
                'source_registry': source_registry_map
            })
    else:
        plugin_args.update({
            'docker_image_repo': TEST_REPO,
            'url': '/',
            'build_json_dir': "/var/json_dir",
            'verify_ssl': not insecure_registry,
            'use_auth': False,
            'insecure_registry': insecure_registry,
        })

    runner = PostBuildPluginsRunner(tasker, workflow,
                                    [{
                                        'name': ImportImagePlugin.key,
                                        'args': plugin_args
                                    }])

    return runner
def mock_environment(tmpdir,
                     session=None,
                     name=None,
                     component=None,
                     version=None,
                     release=None,
                     source=None,
                     build_process_failed=False,
                     docker_registry=True,
                     pulp_registries=0,
                     blocksize=None,
                     task_states=None,
                     additional_tags=None,
                     has_config=None,
                     prefer_schema1_digest=True):
    if session is None:
        session = MockedClientSession('', task_states=None)
    if source is None:
        source = GitSource('git', 'git://hostname/path')

    if MOCK:
        mock_docker()
    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(SOURCE, "test-image")
    base_image_id = '123456parent-id'
    workflow.source = StubSource()
    workflow.builder = StubInsideBuilder().for_workflow(workflow)
    workflow.builder.image_id = '123456imageid'
    workflow.builder.set_inspection_data({'Id': base_image_id})
    setattr(workflow, 'tag_conf', TagConf())
    with open(os.path.join(str(tmpdir), 'Dockerfile'), 'wt') as df:
        df.write(
            'FROM base\n'
            'LABEL BZComponent={component} com.redhat.component={component}\n'
            'LABEL Version={version} version={version}\n'
            'LABEL Release={release} release={release}\n'.format(
                component=component, version=version, release=release))
        workflow.builder.set_df_path(df.name)
    if name and version:
        workflow.tag_conf.add_unique_image(
            'user/test-image:{v}-timestamp'.format(v=version))
    if name and version and release:
        workflow.tag_conf.add_primary_images([
            "{0}:{1}-{2}".format(name, version, release),
            "{0}:{1}".format(name, version), "{0}:latest".format(name)
        ])

    if additional_tags:
        workflow.tag_conf.add_primary_images(
            ["{0}:{1}".format(name, tag) for tag in additional_tags])

    flexmock(subprocess, Popen=fake_Popen)
    flexmock(koji, ClientSession=lambda hub, opts: session)
    flexmock(GitSource)
    setattr(workflow, 'source', source)
    setattr(workflow.source, 'lg', X())
    setattr(workflow.source.lg, 'commit_id', '123456')
    setattr(workflow, 'push_conf', PushConf())
    if docker_registry:
        docker_reg = workflow.push_conf.add_docker_registry(
            'docker.example.com')

        for image in workflow.tag_conf.images:
            tag = image.to_str(registry=False)

            if pulp_registries and prefer_schema1_digest:
                docker_reg.digests[tag] = ManifestDigest(v1=fake_digest(image),
                                                         v2='sha256:not-used')
            else:
                docker_reg.digests[tag] = ManifestDigest(v1='sha256:not-used',
                                                         v2=fake_digest(image))

            if has_config:
                docker_reg.config = {
                    'config': {
                        'architecture': LOCAL_ARCH
                    },
                    'container_config': {}
                }

    for pulp_registry in range(pulp_registries):
        workflow.push_conf.add_pulp_registry('env', 'pulp.example.com')

    with open(os.path.join(str(tmpdir), 'image.tar.xz'), 'wt') as fp:
        fp.write('x' * 2**12)
        setattr(workflow, 'exported_image_sequence',
                [{
                    'path': fp.name,
                    'type': IMAGE_TYPE_DOCKER_ARCHIVE
                }])

    if build_process_failed:
        workflow.build_result = BuildResult(
            logs=["docker build log - \u2018 \u2017 \u2019 \n'"],
            fail_reason="not built")
    else:
        workflow.build_result = BuildResult(
            logs=["docker build log - \u2018 \u2017 \u2019 \n'"],
            image_id="id1234")
    workflow.prebuild_plugins_conf = {}

    workflow.image_components = parse_rpm_output([
        "name1;1.0;1;" + LOCAL_ARCH + ";0;2000;" + FAKE_SIGMD5.decode() +
        ";23000;"
        "RSA/SHA256, Tue 30 Aug 2016 00:00:00, Key ID 01234567890abc;(none)",
        "name2;2.0;1;" + LOCAL_ARCH + ";0;3000;" + FAKE_SIGMD5.decode() +
        ";24000"
        "RSA/SHA256, Tue 30 Aug 2016 00:00:00, Key ID 01234567890abd;(none)",
    ])

    return tasker, workflow
示例#33
0
def test_workflow_plugin_error(fail_at):
    """
    This is a test for what happens when plugins fail.

    When a prebuild or postbuild plugin fails, and doesn't have
    is_allowed_to_fail=True set, the whole build should fail.
    However, all the exit plugins should run.
    """
    flexmock(DockerfileParser, content='df_content')
    this_file = inspect.getfile(PreRaises)
    mock_docker()
    fake_builder = MockInsideBuilder()
    flexmock(InsideBuilder).new_instances(fake_builder)
    watch_pre = Watcher()
    watch_prepub = Watcher()
    watch_buildstep = Watcher()
    watch_post = Watcher()
    watch_exit = Watcher()
    prebuild_plugins = [{
        'name': 'pre_watched',
        'args': {
            'watcher': watch_pre,
        }
    }]
    buildstep_plugins = [{
        'name': 'buildstep_watched',
        'args': {
            'watcher': watch_buildstep,
        }
    }]
    prepublish_plugins = [{
        'name': 'prepub_watched',
        'args': {
            'watcher': watch_prepub,
        }
    }]
    postbuild_plugins = [{
        'name': 'post_watched',
        'args': {
            'watcher': watch_post
        }
    }]
    exit_plugins = [{'name': 'exit_watched', 'args': {'watcher': watch_exit}}]

    # Insert a failing plugin into one of the build phases
    if fail_at == 'pre_raises':
        prebuild_plugins.insert(0, {'name': fail_at, 'args': {}})
    elif fail_at == 'buildstep_raises':
        buildstep_plugins.insert(0, {'name': fail_at, 'args': {}})
    elif fail_at == 'prepub_raises':
        prepublish_plugins.insert(0, {'name': fail_at, 'args': {}})
    elif fail_at == 'post_raises':
        postbuild_plugins.insert(0, {'name': fail_at, 'args': {}})
    elif fail_at == 'exit_raises' or fail_at == 'exit_raises_allowed':
        exit_plugins.insert(0, {'name': fail_at, 'args': {}})
    else:
        # Typo in the parameter list?
        assert False

    workflow = DockerBuildWorkflow(MOCK_SOURCE,
                                   'test-image',
                                   prebuild_plugins=prebuild_plugins,
                                   buildstep_plugins=buildstep_plugins,
                                   prepublish_plugins=prepublish_plugins,
                                   postbuild_plugins=postbuild_plugins,
                                   exit_plugins=exit_plugins,
                                   plugin_files=[this_file])

    # Most failures cause the build process to abort. Unless, it's
    # an exit plugin that's explicitly allowed to fail.
    if fail_at == 'exit_raises_allowed':
        workflow.build_docker_image()
        assert not workflow.plugins_errors
    else:
        with pytest.raises(PluginFailedException):
            workflow.build_docker_image()

        assert fail_at in workflow.plugins_errors

    # The pre-build phase should only complete if there were no
    # earlier plugin failures.
    assert watch_pre.was_called() == (fail_at != 'pre_raises')

    # The buildstep phase should only complete if there were no
    # earlier plugin failures.
    assert watch_buildstep.was_called() == (fail_at
                                            not in ('pre_raises',
                                                    'buildstep_raises'))

    # The prepublish phase should only complete if there were no
    # earlier plugin failures.
    assert watch_prepub.was_called() == (fail_at
                                         not in ('pre_raises', 'prepub_raises',
                                                 'buildstep_raises'))

    # The post-build phase should only complete if there were no
    # earlier plugin failures.
    assert watch_post.was_called() == (fail_at
                                       not in ('pre_raises', 'prepub_raises',
                                               'buildstep_raises',
                                               'post_raises'))

    # But all exit plugins should run, even if one of them also raises
    # an exception.
    assert watch_exit.was_called()
def test_add_labels_equal_aliases2(tmpdir, docker_tasker, workflow, caplog,
                                   base_l, df_l, expected, expected_log,
                                   reactor_config_map):
    """
    test with 3 equal labels
    """
    if MOCK:
        mock_docker()

    df_content = "FROM fedora\n"
    plugin_labels = {}
    if df_l[0]:
        df_content += 'LABEL description="{0}"\n'.format(df_l[0])
    if df_l[1]:
        df_content += 'LABEL io.k8s.description="{0}"\n'.format(df_l[1])
    if df_l[2]:
        df_content += 'LABEL description_third="{0}"\n'.format(df_l[2])

    base_labels = {INSPECT_CONFIG: {"Labels": {}}}
    if base_l[0]:
        base_labels[INSPECT_CONFIG]["Labels"]["description"] = base_l[0]
    if base_l[1]:
        base_labels[INSPECT_CONFIG]["Labels"]["io.k8s.description"] = base_l[1]
    if base_l[2]:
        base_labels[INSPECT_CONFIG]["Labels"]["description_third"] = base_l[2]

    df = df_parser(str(tmpdir))
    df.content = df_content

    setattr(workflow, 'builder', X(parent_images=df.parent_images))
    setattr(workflow.builder, 'df_path', df.dockerfile_path)
    setattr(workflow.builder, 'base_image_inspect', base_labels)
    flexmock(workflow, source=MockSource())

    if reactor_config_map:
        make_and_store_reactor_config_map(
            workflow, {
                'image_labels':
                plugin_labels,
                'image_equal_labels':
                [['description', 'io.k8s.description', 'description_third']]
            })

    runner = PreBuildPluginsRunner(
        docker_tasker, workflow, [{
            'name': AddLabelsPlugin.key,
            'args': {
                'labels':
                plugin_labels,
                'dont_overwrite': [],
                'auto_labels': [],
                'aliases': {},
                'equal_labels':
                [['description', 'io.k8s.description', 'description_third']]
            }
        }])

    if isinstance(expected_log, RuntimeError):
        with pytest.raises(PluginFailedException):
            runner.run()
    else:
        runner.run()
        assert AddLabelsPlugin.key is not None
        result_fst = df.labels.get("description") or \
            base_labels[INSPECT_CONFIG]["Labels"].get("description")
        result_snd = df.labels.get("io.k8s.description") or \
            base_labels[INSPECT_CONFIG]["Labels"].get("io.k8s.description")
        result_trd = df.labels.get("description_third") or \
            base_labels[INSPECT_CONFIG]["Labels"].get("description_third")
        assert result_fst == expected[0]
        assert result_snd == expected[1]
        assert result_trd == expected[2]

        if expected_log:
            assert expected_log in caplog.text
def test_delete_from_registry_failures(tmpdir, status_code, reactor_config_map):
    if MOCK:
        mock_docker()
        mock_get_retry_session()

    req_registries = {DOCKER0_REGISTRY: True}
    saved_digests = {DOCKER0_REGISTRY: {'foo/bar:latest': DIGEST1}}

    tasker = DockerTasker()
    workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, TEST_IMAGE)
    setattr(workflow, 'builder', X)

    args_registries = {}
    config_map_regiestries = []
    for reg, use_secret in req_registries.items():
        cm_reg = {'url': reg}
        if use_secret:
            temp_dir = mkdtemp(dir=str(tmpdir))
            with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig:
                dockerconfig_contents = {
                    reg: {
                        "username": "******", "password": reg
                    }
                }
                dockerconfig.write(json.dumps(dockerconfig_contents))
                dockerconfig.flush()
                args_registries[reg] = {'secret': temp_dir}
                cm_reg['auth'] = {'cfg_path': temp_dir}
        else:
            args_registries[reg] = {}
    config_map_regiestries.append(cm_reg)

    for reg, digests in saved_digests.items():
        r = DockerRegistry(reg)
        for tag, dig in digests.items():
            r.digests[tag] = ManifestDigest(v1='not-used', v2=dig)
        workflow.push_conf._registries['docker'].append(r)

    if reactor_config_map:
        workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
        workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
            ReactorConfig({'version': 1, 'registries': config_map_regiestries})

    runner = ExitPluginsRunner(
        tasker,
        workflow,
        [{
            'name': DeleteFromRegistryPlugin.key,
            'args': {
                'registries': args_registries
            },
        }]
    )

    deleted_digests = set()
    for reg, digests in saved_digests.items():
        if reg not in req_registries:
            continue

        for tag, dig in digests.items():
            if dig in deleted_digests:
                continue
            url = "https://" + reg + "/v2/" + tag.split(":")[0] + "/manifests/" + dig
            auth_type = HTTPRegistryAuth

            response = requests.Response()
            response.status_code = status_code

            (flexmock(requests.Session)
                .should_receive('delete')
                .with_args(url, verify=bool, auth=auth_type)
                .and_return(response))

            deleted_digests.add(dig)

    if status_code == 520:
        with pytest.raises(PluginFailedException):
            result = runner.run()
            assert result[DeleteFromRegistryPlugin.key] == set([])
    else:
        result = runner.run()

        if status_code == requests.codes.ACCEPTED:
            assert result[DeleteFromRegistryPlugin.key] == deleted_digests
        else:
            assert result[DeleteFromRegistryPlugin.key] == set([])
示例#36
0
 def test_sha256_prefix(self, new_id, expected_id):
     if MOCK:
         mock_docker()
     self.should_squash_with_kwargs(new_id=new_id)
     self.run_plugin_with_args({})
     assert self.workflow.builder.image_id == expected_id
示例#37
0
def prepare(check_repo_retval=0,
            existing_layers=[],
            subprocess_exceptions=False,
            conf=None):
    if MOCK:
        mock_docker()
    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(SOURCE,
                                   "test-image",
                                   postbuild_plugins=conf)
    setattr(workflow, 'builder', X())
    setattr(workflow.builder, 'source', X())
    setattr(workflow.builder.source, 'dockerfile_path', None)
    setattr(workflow.builder.source, 'path', None)
    setattr(workflow, 'tag_conf', X())
    setattr(workflow.tag_conf, 'images', [
        ImageName(repo="image-name1"),
        ImageName(namespace="prefix", repo="image-name2"),
        ImageName(repo="image-name3", tag="asd")
    ])

    # Mock dockpulp and docker
    dockpulp.Pulp = flexmock(dockpulp.Pulp)
    dockpulp.Pulp.registry = 'registry.example.com'
    (flexmock(dockpulp.imgutils).should_receive('get_metadata').with_args(
        object).and_return([{
            'id': 'foo'
        }]))
    (flexmock(dockpulp.imgutils).should_receive('get_manifest').with_args(
        object).and_return([{
            'id': 'foo'
        }]))
    (flexmock(dockpulp.imgutils).should_receive('get_versions').with_args(
        object).and_return({'id': '1.6.0'}))
    (flexmock(dockpulp.imgutils).should_receive('check_repo').and_return(
        check_repo_retval))
    (flexmock(dockpulp.Pulp).should_receive('set_certs').with_args(
        object, object))
    (flexmock(dockpulp.Pulp).should_receive('getRepos').with_args(
        list, fields=list).and_return([{
            "id": "redhat-image-name1"
        }, {
            "id": "redhat-prefix-image-name2"
        }]))
    (flexmock(dockpulp.Pulp).should_receive('createRepo'))
    (flexmock(dockpulp.Pulp).should_receive('upload').with_args(unicode)
     ).at_most().once()
    (flexmock(dockpulp.Pulp).should_receive('copy').with_args(
        unicode, unicode))
    (flexmock(dockpulp.Pulp).should_receive('updateRepo').with_args(
        unicode, dict))
    (flexmock(dockpulp.Pulp).should_receive('crane').with_args(
        list, wait=True).and_return([2, 3, 4]))
    (flexmock(dockpulp.Pulp).should_receive('').with_args(
        object, object).and_return([1, 2, 3]))
    (flexmock(dockpulp.Pulp).should_receive('watch_tasks').with_args(list))
    if existing_layers is not None:
        (flexmock(dockpulp.Pulp).should_receive('getImageIdsExist').with_args(
            list).and_return(existing_layers))
    if subprocess_exceptions:
        (flexmock(subprocess).should_receive("check_call").and_raise(Exception)
         )

    mock_docker()
    return tasker, workflow
def test_pull_base_image_special(add_another_parent, special_image, change_base, skip_parent,
                                 reactor_config_map, monkeypatch):
    monkeypatch.setenv("BUILD", json.dumps({
        'metadata': {
            'name': UNIQUE_ID,
        },
    }))

    if MOCK:
        mock_docker(remember_images=True)

    tasker = DockerTasker(retry_times=0)
    buildstep_plugin = [{
        'name': PLUGIN_BUILD_ORCHESTRATE_KEY,
        'args': {'platforms': ['x86_64']},
    }]
    workflow = DockerBuildWorkflow(MOCK_SOURCE, special_image, buildstep_plugins=buildstep_plugin,)
    builder = workflow.builder = MockBuilder()
    builder.parent_images = {}
    builder.set_base_image(special_image)
    if add_another_parent:
        builder.parent_images[BASE_IMAGE_NAME_W_SHA] = None
    parent_images = builder.parent_images

    expected = set([])
    for nonce in range(len(parent_images) - skip_parent):
        expected.add("{}:{}".format(UNIQUE_ID, nonce))
    for image in expected:
        assert not tasker.image_exists(image)

    if reactor_config_map:
        workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
        workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
            ReactorConfig({'version': 1,
                           'source_registry': {'url': LOCALHOST_REGISTRY,
                                               'insecure': True},
                           'registries_organization': None})

    runner = PreBuildPluginsRunner(
        tasker,
        workflow,
        [{
            'name': PullBaseImagePlugin.key,
            'args': {'parent_registry': LOCALHOST_REGISTRY,
                     'parent_registry_insecure': True,
                     }
        }]
    )

    runner.run()
    if change_base:
        assert workflow.builder.base_image.to_str().startswith(UNIQUE_ID)
    else:
        assert workflow.builder.base_image.to_str().startswith(special_image)
    for image in expected:
        assert tasker.image_exists(image)
        assert image in workflow.pulled_base_images

    for image in workflow.pulled_base_images:
        assert tasker.image_exists(image)

    for df, tagged in workflow.builder.parent_images.items():
        assert tagged is not None, "Did not tag parent image " + str(df)
    assert len(set(workflow.builder.parent_images.values())) == len(workflow.builder.parent_images)
示例#39
0
def prepare(success=True, v1_image_ids={}):
    if MOCK:
        mock_docker()
    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(SOURCE, "test-image")
    setattr(workflow, 'builder', X())
    setattr(workflow.builder, 'source', X())
    setattr(workflow.builder.source, 'dockerfile_path', None)
    setattr(workflow.builder.source, 'path', None)
    setattr(workflow, 'tag_conf', X())
    setattr(workflow.tag_conf, 'images', [
        ImageName(repo="image-name1"),
        ImageName(repo="image-name1", tag="2"),
        ImageName(namespace="namespace", repo="image-name2"),
        ImageName(repo="image-name3", tag="asd")
    ])

    # Mock dockpulp and docker
    dockpulp.Pulp = flexmock(dockpulp.Pulp)
    dockpulp.Pulp.registry = 'registry.example.com'
    (flexmock(dockpulp.imgutils).should_receive('check_repo').and_return(0))
    (flexmock(dockpulp.Pulp).should_receive('set_certs').with_args(
        object, object))
    (flexmock(dockpulp.Pulp).should_receive('getRepos').with_args(
        list, fields=list).and_return([{
            "id": "redhat-image-name1"
        }, {
            "id": "redhat-namespace-image-name2"
        }]))
    (flexmock(dockpulp.Pulp).should_receive('createRepo'))
    (flexmock(dockpulp.Pulp).should_receive('copy').with_args(
        unicode, unicode))
    (flexmock(dockpulp.Pulp).should_receive('updateRepo').with_args(
        unicode, dict))
    (flexmock(dockpulp.Pulp).should_receive('').with_args(
        object, object).and_return([1, 2, 3]))

    annotations = {
        'worker-builds': {
            'x86_64': {
                'build': {
                    'build-name': 'build-1-x64_64',
                },
                'metadata_fragment': 'configmap/build-1-x86_64-md',
                'metadata_fragment_key': 'metadata.json',
            },
            'ppc64le': {
                'build': {
                    'build-name': 'build-1-ppc64le',
                },
                'metadata_fragment': 'configmap/build-1-ppc64le-md',
                'metadata_fragment_key': 'metadata.json',
            },
            'bogus': {},
        },
    }

    if success:
        workflow.build_result = BuildResult(image_id='12345')
    else:
        workflow.build_result = BuildResult(fail_reason="not built",
                                            annotations=annotations)

    build_info = {}
    build_info['x86_64'] = BuildInfo()
    build_info['ppc64le'] = BuildInfo()
    build_info['bogus'] = BuildInfo(unset_annotations=True)  # OSBS-5262

    for platform, v1_image_id in v1_image_ids.items():
        build_info[platform] = BuildInfo(v1_image_id)

    workflow.plugin_workspace = {
        OrchestrateBuildPlugin.key: {
            WORKSPACE_KEY_BUILD_INFO: build_info
        }
    }

    mock_docker()
    return tasker, workflow
示例#40
0
def test_pull_base_image_plugin(parent_registry,
                                df_base,
                                expected,
                                not_expected,
                                reactor_config_map,
                                workflow_callback=None,
                                check_platforms=False,
                                parent_images=None):
    if MOCK:
        mock_docker(remember_images=True)

    tasker = DockerTasker(retry_times=0)
    buildstep_plugin = [{
        'name': PLUGIN_BUILD_ORCHESTRATE_KEY,
        'args': {
            'platforms': ['x86_64']
        },
    }]
    parent_images = parent_images or {df_base: None}
    workflow = DockerBuildWorkflow(
        MOCK_SOURCE,
        'test-image',
        buildstep_plugins=buildstep_plugin,
    )
    builder = workflow.builder = MockBuilder()
    builder.base_image = builder.original_base_image = ImageName.parse(df_base)
    builder.parent_images = parent_images

    expected = set(expected)
    for nonce in range(len(parent_images)):
        expected.add("{}:{}".format(UNIQUE_ID, nonce))
    all_images = set(expected).union(not_expected)
    for image in all_images:
        assert not tasker.image_exists(image)

    if reactor_config_map:
        workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
        workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
            ReactorConfig({'version': 1,
                           'source_registry': {'url': parent_registry,
                                               'insecure': True}})

    if workflow_callback:
        workflow = workflow_callback(workflow)

    runner = PreBuildPluginsRunner(tasker, workflow,
                                   [{
                                       'name': PullBaseImagePlugin.key,
                                       'args': {
                                           'parent_registry': parent_registry,
                                           'parent_registry_insecure': True,
                                           'check_platforms': check_platforms
                                       }
                                   }])

    if parent_registry is None and reactor_config_map:
        with pytest.raises(PluginFailedException):
            runner.run()
        return

    runner.run()

    for image in expected:
        assert tasker.image_exists(image)
        assert image in workflow.pulled_base_images

    for image in not_expected:
        assert not tasker.image_exists(image)

    for image in workflow.pulled_base_images:
        assert tasker.image_exists(image)

    for df, tagged in parent_images.items():
        assert tagged is not None, "Did not tag parent image " + df
    # tags should all be unique
    assert len(set(parent_images.values())) == len(parent_images)
def test_add_labels_aliases(tmpdir, docker_tasker, workflow, caplog,
                            df_old_as_plugin_arg, df_new_as_plugin_arg,
                            base_old, base_new, df_old, df_new, exp_old,
                            exp_new, exp_log, reactor_config_map):
    if MOCK:
        mock_docker()

    df_content = "FROM fedora\n"
    plugin_labels = {}
    if df_old:
        if df_old_as_plugin_arg:
            plugin_labels["label_old"] = df_old
        else:
            df_content += 'LABEL label_old="{0}"\n'.format(df_old)
    if df_new:
        if df_new_as_plugin_arg:
            plugin_labels["label_new"] = df_new
        else:
            df_content += 'LABEL label_new="{0}"\n'.format(df_new)

    base_labels = {INSPECT_CONFIG: {"Labels": {}}}
    if base_old:
        base_labels[INSPECT_CONFIG]["Labels"]["label_old"] = base_old
    if base_new:
        base_labels[INSPECT_CONFIG]["Labels"]["label_new"] = base_new

    df = df_parser(str(tmpdir))
    df.content = df_content

    setattr(workflow, 'builder', X(parent_images=df.parent_images))
    setattr(workflow.builder, 'df_path', df.dockerfile_path)
    setattr(workflow.builder, 'base_image_inspect', base_labels)
    flexmock(workflow, source=MockSource())

    if reactor_config_map:
        make_and_store_reactor_config_map(workflow,
                                          {'image_labels': plugin_labels})

    runner = PreBuildPluginsRunner(docker_tasker, workflow,
                                   [{
                                       'name': AddLabelsPlugin.key,
                                       'args': {
                                           'labels': plugin_labels,
                                           'dont_overwrite': [],
                                           'auto_labels': [],
                                           'aliases': {
                                               "label_old": "label_new"
                                           },
                                       }
                                   }])

    runner.run()
    assert AddLabelsPlugin.key is not None
    result_old = df.labels.get("label_old") or \
        base_labels[INSPECT_CONFIG]["Labels"].get("label_old")
    result_new = df.labels.get("label_new") or \
        base_labels[INSPECT_CONFIG]["Labels"].get("label_new")
    assert result_old == exp_old
    assert result_new == exp_new

    if exp_log:
        assert exp_log in caplog.text
def mock_environment(tmpdir, session=None, name=None,
                     component=None, version=None, release=None,
                     source=None, build_process_failed=False,
                     is_rebuild=True, docker_registry=True,
                     pulp_registries=0, blocksize=None,
                     task_states=None, additional_tags=None,
                     has_config=None):
    if session is None:
        session = MockedClientSession('', task_states=None)
    if source is None:
        source = GitSource('git', 'git://hostname/path')

    if MOCK:
        mock_docker()
    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(SOURCE, "test-image")
    base_image_id = '123456parent-id'
    setattr(workflow, '_base_image_inspect', {'Id': base_image_id})
    setattr(workflow, 'builder', X())
    setattr(workflow.builder, 'image_id', '123456imageid')
    setattr(workflow.builder, 'base_image', ImageName(repo='Fedora', tag='22'))
    setattr(workflow.builder, 'source', X())
    setattr(workflow.builder, 'built_image_info', {'ParentId': base_image_id})
    setattr(workflow.builder.source, 'dockerfile_path', None)
    setattr(workflow.builder.source, 'path', None)
    setattr(workflow, 'tag_conf', TagConf())
    with open(os.path.join(str(tmpdir), 'Dockerfile'), 'wt') as df:
        df.write('FROM base\n'
                 'LABEL BZComponent={component} com.redhat.component={component}\n'
                 'LABEL Version={version} version={version}\n'
                 'LABEL Release={release} release={release}\n'
                 .format(component=component, version=version, release=release))
        setattr(workflow.builder, 'df_path', df.name)
    if name and version:
        workflow.tag_conf.add_unique_image('user/test-image:{v}-timestamp'
                                           .format(v=version))
    if name and version and release:
        workflow.tag_conf.add_primary_images(["{0}:{1}-{2}".format(name,
                                                                   version,
                                                                   release),
                                              "{0}:{1}".format(name, version),
                                              "{0}:latest".format(name)])

    if additional_tags:
        workflow.tag_conf.add_primary_images(["{0}:{1}".format(name, tag)
                                              for tag in additional_tags])

    flexmock(subprocess, Popen=fake_Popen)
    flexmock(koji, ClientSession=lambda hub, opts: session)
    flexmock(GitSource)
    (flexmock(OSBS)
        .should_receive('get_build_logs')
        .with_args(BUILD_ID)
        .and_return('build logs - \u2018 \u2017 \u2019'))
    (flexmock(OSBS)
        .should_receive('get_pod_for_build')
        .with_args(BUILD_ID)
        .and_return(MockedPodResponse()))
    setattr(workflow, 'source', source)
    setattr(workflow.source, 'lg', X())
    setattr(workflow.source.lg, 'commit_id', '123456')
    setattr(workflow, 'push_conf', PushConf())
    if docker_registry:
        docker_reg = workflow.push_conf.add_docker_registry('docker.example.com')

        for image in workflow.tag_conf.images:
            tag = image.to_str(registry=False)
            if pulp_registries:
                docker_reg.digests[tag] = ManifestDigest(v1=fake_digest(image),
                                                         v2='sha256:not-used')
            else:
                docker_reg.digests[tag] = ManifestDigest(v1='sha256:not-used',
                                                         v2=fake_digest(image))

            if has_config:
                docker_reg.config = {
                    'config': {'architecture': 'x86_64'},
                    'container_config': {}
                }

    for pulp_registry in range(pulp_registries):
        workflow.push_conf.add_pulp_registry('env', 'pulp.example.com')

    with open(os.path.join(str(tmpdir), 'image.tar.xz'), 'wt') as fp:
        fp.write('x' * 2**12)
        setattr(workflow, 'exported_image_sequence', [{'path': fp.name}])

    if build_process_failed:
        workflow.build_result = BuildResult(logs=["docker build log - \u2018 \u2017 \u2019 \n'"],
                                            fail_reason="not built")
    else:
        workflow.build_result = BuildResult(logs=["docker build log - \u2018 \u2017 \u2019 \n'"],
                                            image_id="id1234")
    workflow.prebuild_plugins_conf = {}
    workflow.prebuild_results[CheckAndSetRebuildPlugin.key] = is_rebuild
    workflow.postbuild_results[PostBuildRPMqaPlugin.key] = [
        "name1;1.0;1;x86_64;0;2000;" + FAKE_SIGMD5.decode() + ";23000;"
        "RSA/SHA256, Tue 30 Aug 2016 00:00:00, Key ID 01234567890abc;(none)",
        "name2;2.0;1;x86_64;0;3000;" + FAKE_SIGMD5.decode() + ";24000"
        "RSA/SHA256, Tue 30 Aug 2016 00:00:00, Key ID 01234567890abd;(none)",
    ]

    return tasker, workflow
def test_delete_from_registry_plugin(saved_digests, req_registries, tmpdir, orchestrator,
                                     manifest_list_digests, reactor_config_map):
    if MOCK:
        mock_docker()
        mock_get_retry_session()

    buildstep_plugin = None
    if orchestrator:
        ann_digests = []
        buildstep_plugin = [{
            'name': OrchestrateBuildPlugin.key,
            'args': {
                'platforms': "x86_64"
            },
        }]

    tasker = DockerTasker()
    workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, TEST_IMAGE,
                                   buildstep_plugins=buildstep_plugin, )
    setattr(workflow, 'builder', X)

    args_registries = {}
    config_map_regiestries = []
    for reg, use_secret in req_registries.items():
        cm_reg = {'url': reg}
        if use_secret:
            temp_dir = mkdtemp(dir=str(tmpdir))
            with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig:
                dockerconfig_contents = {
                    reg: {
                        "username": "******", "password": reg
                    }
                }
                dockerconfig.write(json.dumps(dockerconfig_contents))
                dockerconfig.flush()
                args_registries[reg] = {'secret': temp_dir}
                cm_reg['auth'] = {'cfg_path': temp_dir}
        else:
            args_registries[reg] = {}
        config_map_regiestries.append(cm_reg)

    for reg, digests in saved_digests.items():
        if orchestrator:
            for tag, dig in digests.items():
                repo = tag.split(':')[0]
                t = tag.split(':')[1]
                ann_digests.append({
                    'digest': dig,
                    'tag': t,
                    'repository': repo,
                    'registry': reg,
                })
        else:
            r = DockerRegistry(reg)
            for tag, dig in digests.items():
                r.digests[tag] = ManifestDigest(v1='not-used', v2=dig)
            workflow.push_conf._registries['docker'].append(r)

    group_manifest_digests = {}
    if orchestrator:
        build_annotations = {'digests': ann_digests}
        annotations = {'worker-builds': {'x86_64': build_annotations}}
        setattr(workflow, 'build_result', Y)
        setattr(workflow.build_result, 'annotations', annotations)

        # group_manifest digest should be added only
        # if there are worker builds and images are pushed to one registry
        if len(req_registries) == 1 and len(saved_digests.keys()) == 1 and \
           all(saved_digests.values()):
            workflow.postbuild_results[PLUGIN_GROUP_MANIFESTS_KEY] = manifest_list_digests
            for ml_repo, ml_digest in manifest_list_digests.items():
                for reg in req_registries:
                    if reg in saved_digests:
                        group_manifest_digests.setdefault(reg, {})
                        group_manifest_digests[reg] = saved_digests[reg].copy()
                        group_manifest_digests[reg][ml_repo] = ml_digest.default

    result_digests = saved_digests.copy()
    result_digests.update(group_manifest_digests)

    if reactor_config_map:
        workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
        workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
            ReactorConfig({'version': 1, 'registries': config_map_regiestries})

    runner = ExitPluginsRunner(
        tasker,
        workflow,
        [{
            'name': DeleteFromRegistryPlugin.key,
            'args': {
                'registries': args_registries
            },
        }]
    )

    deleted_digests = set()
    for reg, digests in result_digests.items():
        if reg not in req_registries:
            continue

        for tag, dig in digests.items():
            if dig in deleted_digests:
                continue
            url = "https://" + reg + "/v2/" + tag.split(":")[0] + "/manifests/" + dig
            auth_type = HTTPRegistryAuth
            (flexmock(requests.Session)
                .should_receive('delete')
                .with_args(url, verify=bool, auth=auth_type)
                .once()
                .and_return(flexmock(status_code=202, ok=True, raise_for_status=lambda: None)))
            deleted_digests.add(dig)

    result = runner.run()
    assert result[DeleteFromRegistryPlugin.key] == deleted_digests
示例#44
0
def mock_environment(tmpdir,
                     session=None,
                     name=None,
                     component=None,
                     version=None,
                     release=None,
                     source=None,
                     build_process_failed=False,
                     is_rebuild=True,
                     pulp_registries=0,
                     blocksize=None,
                     task_states=None,
                     additional_tags=None):
    if session is None:
        session = MockedClientSession('', task_states=None)
    if source is None:
        source = GitSource('git', 'git://hostname/path')

    if MOCK:
        mock_docker()
    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(SOURCE, "test-image")
    base_image_id = '123456parent-id'
    setattr(workflow, '_base_image_inspect', {'Id': base_image_id})
    setattr(workflow, 'builder', X())
    setattr(workflow.builder, 'image_id', '123456imageid')
    setattr(workflow.builder, 'base_image', ImageName(repo='Fedora', tag='22'))
    setattr(workflow.builder, 'source', X())
    setattr(workflow.builder, 'built_image_info', {'ParentId': base_image_id})
    setattr(workflow.builder.source, 'dockerfile_path', None)
    setattr(workflow.builder.source, 'path', None)
    setattr(workflow, 'tag_conf', TagConf())
    with open(os.path.join(str(tmpdir), 'Dockerfile'), 'wt') as df:
        df.write(
            'FROM base\n'
            'LABEL BZComponent={component} com.redhat.component={component}\n'
            'LABEL Version={version} version={version}\n'
            'LABEL Release={release} release={release}\n'.format(
                component=component, version=version, release=release))
        setattr(workflow.builder, 'df_path', df.name)
    if name and version:
        workflow.tag_conf.add_unique_image(
            'user/test-image:{v}-timestamp'.format(v=version))
    if name and version and release:
        workflow.tag_conf.add_primary_images([
            "{0}:{1}-{2}".format(name, version, release),
            "{0}:{1}".format(name, version), "{0}:latest".format(name)
        ])

    if additional_tags:
        workflow.tag_conf.add_primary_images(
            ["{0}:{1}".format(name, tag) for tag in additional_tags])

    flexmock(subprocess, Popen=fake_Popen)
    flexmock(koji, ClientSession=lambda hub: session)
    flexmock(GitSource)
    (flexmock(OSBS).should_receive('get_build_logs').with_args(
        BUILD_ID).and_return('build logs'))
    (flexmock(OSBS).should_receive('get_pod_for_build').with_args(
        BUILD_ID).and_return(MockedPodResponse()))
    setattr(workflow, 'source', source)
    setattr(workflow.source, 'lg', X())
    setattr(workflow.source.lg, 'commit_id', '123456')
    setattr(workflow, 'build_logs', ['docker build log\n'])
    setattr(workflow, 'push_conf', PushConf())
    docker_reg = workflow.push_conf.add_docker_registry('docker.example.com')

    for image in workflow.tag_conf.images:
        tag = image.to_str(registry=False)
        docker_reg.digests[tag] = fake_digest(image)

    for pulp_registry in range(pulp_registries):
        workflow.push_conf.add_pulp_registry('env', 'pulp.example.com')

    with open(os.path.join(str(tmpdir), 'image.tar.xz'), 'wt') as fp:
        fp.write('x' * 2**12)
        setattr(workflow, 'exported_image_sequence', [{'path': fp.name}])

    setattr(workflow, 'build_failed', build_process_failed)
    workflow.prebuild_results[CheckAndSetRebuildPlugin.key] = is_rebuild
    workflow.postbuild_results[PostBuildRPMqaPlugin.key] = [
        "name1,1.0,1,x86_64,0,2000," + FAKE_SIGMD5.decode() + ",23000",
        "name2,2.0,1,x86_64,0,3000," + FAKE_SIGMD5.decode() + ",24000",
    ]

    return tasker, workflow
示例#45
0
def docker_tasker():
    if MOCK:
        mock_docker()
    return DockerTasker(retry_times=0)
示例#46
0
from atomic_reactor.plugin import InputPluginsRunner
import atomic_reactor.cli.main

from tests.fixtures import is_registry_running, temp_image_name, get_uuid  # noqa
from tests.constants import LOCALHOST_REGISTRY, DOCKERFILE_GIT, DOCKERFILE_OK_PATH, FILES, MOCK

if MOCK:
    from tests.docker_mock import mock_docker

PRIV_BUILD_IMAGE = None
DH_BUILD_IMAGE = None

logger = logging.getLogger('atomic_reactor.tests')

if MOCK:
    mock_docker()
dt = DockerTasker()
reactor_root = os.path.dirname(os.path.dirname(__file__))

with_all_sources = pytest.mark.parametrize('source_provider, uri', [
    ('git', DOCKERFILE_GIT),
    ('path', DOCKERFILE_OK_PATH),
])

# TEST-SUITE SETUP


def setup_module(module):
    global PRIV_BUILD_IMAGE, DH_BUILD_IMAGE
    PRIV_BUILD_IMAGE = get_uuid()
    DH_BUILD_IMAGE = get_uuid()
示例#47
0
def test_cancel_build(request, fail_at):
    """
    Verifies that exit plugins are executed when the build is canceled
    """
    # Make the phase we're testing send us SIGTERM
    phase_signal = defaultdict(lambda: None)
    phase_signal[fail_at] = signal.SIGTERM
    flexmock(DockerfileParser, content='df_content')
    this_file = inspect.getfile(PreRaises)
    mock_docker()
    fake_builder = MockInsideBuilder()
    flexmock(InsideBuilder).new_instances(fake_builder)
    watch_pre = WatcherWithSignal(phase_signal['pre'])
    watch_prepub = WatcherWithSignal(phase_signal['prepub'])
    watch_buildstep = WatcherWithSignal(phase_signal['buildstep'])
    watch_post = WatcherWithSignal(phase_signal['post'])
    watch_exit = WatcherWithSignal(phase_signal['exit'])

    fake_logger = FakeLogger()
    existing_logger = atomic_reactor.plugin.logger

    def restore_logger():
        atomic_reactor.plugin.logger = existing_logger

    request.addfinalizer(restore_logger)
    atomic_reactor.plugin.logger = fake_logger

    workflow = DockerBuildWorkflow(MOCK_SOURCE,
                                   'test-image',
                                   prebuild_plugins=[{
                                       'name': 'pre_watched',
                                       'args': {
                                           'watcher': watch_pre
                                       }
                                   }],
                                   prepublish_plugins=[{
                                       'name': 'prepub_watched',
                                       'args': {
                                           'watcher': watch_prepub,
                                       }
                                   }],
                                   buildstep_plugins=[{
                                       'name': 'buildstep_watched',
                                       'args': {
                                           'watcher': watch_buildstep
                                       }
                                   }],
                                   postbuild_plugins=[{
                                       'name': 'post_watched',
                                       'args': {
                                           'watcher': watch_post
                                       }
                                   }],
                                   exit_plugins=[{
                                       'name': 'exit_watched',
                                       'args': {
                                           'watcher': watch_exit
                                       }
                                   }],
                                   plugin_files=[this_file])
    if fail_at == 'buildstep':
        with pytest.raises(PluginFailedException):
            workflow.build_docker_image()
        assert workflow.build_canceled
        assert ("plugin '%s_watched' raised an exception:" % fail_at +
                " BuildCanceledException('Build was canceled',)",
                ) in fake_logger.errors
    else:
        workflow.build_docker_image()

        if fail_at != 'exit':
            assert workflow.build_canceled
            assert ("plugin '%s_watched' raised an exception:" % fail_at +
                    " BuildCanceledException('Build was canceled',)",
                    ) in fake_logger.warnings
        else:
            assert not workflow.build_canceled

    assert watch_exit.was_called()
    assert watch_pre.was_called()

    if fail_at not in ['pre', 'buildstep']:
        assert watch_prepub.was_called()

    if fail_at not in ['pre', 'prepub', 'buildstep']:
        assert watch_post.was_called()
示例#48
0
def test_image_exists():
    if MOCK:
        mock_docker()

    t = DockerTasker()
    assert t.image_exists(input_image_name) is True
def test_tag_and_push_plugin(tmpdir, monkeypatch, image_name, logs,
                             should_raise, has_config, use_secret,
                             reactor_config_map):

    if MOCK:
        mock_docker()
        flexmock(docker.APIClient,
                 push=lambda iid, **kwargs: iter(logs),
                 login=lambda username, registry, dockercfg_path:
                 {'Status': 'Login Succeeded'})

    tasker = DockerTasker(retry_times=0)
    workflow = DockerBuildWorkflow({
        "provider": "git",
        "uri": "asd"
    }, TEST_IMAGE)
    workflow.tag_conf.add_primary_image(image_name)
    setattr(workflow, 'builder', X)

    secret_path = None
    if use_secret:
        temp_dir = mkdtemp()
        with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig:
            dockerconfig_contents = {
                LOCALHOST_REGISTRY: {
                    "username": "******",
                    "email": "*****@*****.**",
                    "password": "******"
                }
            }
            dockerconfig.write(json.dumps(dockerconfig_contents))
            dockerconfig.flush()
            secret_path = temp_dir

    CONFIG_DIGEST = 'sha256:2c782e3a93d34d89ea4cf54052768be117caed54803263dd1f3798ce42aac14e'
    media_type = 'application/vnd.docker.distribution.manifest.v2+json'

    manifest_json = {
        'config': {
            'digest': CONFIG_DIGEST,
            'mediaType': 'application/octet-stream',
            'size': 4132
        },
        'layers': [{
            'digest':
            'sha256:16dc1f96e3a1bb628be2e00518fec2bb97bd5933859de592a00e2eb7774b6ecf',
            'mediaType': 'application/vnd.docker.image.rootfs.diff.tar.gzip',
            'size': 71907148
        }, {
            'digest':
            'sha256:cebc0565e1f096016765f55fde87a6f60fdb1208c0b5017e35a856ff578f5ccb',
            'mediaType': 'application/vnd.docker.image.rootfs.diff.tar.gzip',
            'size': 3945724
        }],
        'mediaType':
        media_type,
        'schemaVersion':
        2
    }

    config_json = {
        'config': {
            'Size':
            12509448,
            'architecture':
            'amd64',
            'author':
            'Red Hat, Inc.',
            'config': {
                'Cmd': ['/bin/rsyslog.sh'],
                'Entrypoint': None,
                'Image':
                'c3fb36aafd5692d2a45115d32bb120edb6edf6c0c3c783ed6592a8dab969fb88',
                'Labels': {
                    'Architecture': 'x86_64',
                    'Authoritative_Registry': 'registry.access.redhat.com',
                    'BZComponent': 'rsyslog-docker',
                    'Name': 'rhel7/rsyslog',
                    'Release': '28.vrutkovs.31',
                    'Vendor': 'Red Hat, Inc.',
                    'Version': '7.2',
                },
            },
            'created':
            '2016-10-07T10:20:05.38595Z',
            'docker_version':
            '1.9.1',
            'id':
            '1ca220fbc2aed7c141b236c8729fe59db5771b32bf2da74e4a663407f32ec2a2',
            'os':
            'linux',
            'parent':
            '47eed7a8ea3f131e9281ae09fcbfb0041872fd8b74a048f1c739302c8148505d'
        },
        'container_config': {
            'foo': 'bar',
            'spam': 'maps'
        },
        'id':
        '1ca220fbc2aed7c141b236c8729fe59db5771b32bf2da74e4a663407f32ec2a2',
        'parent_id':
        'c3fb36aafd5692d2a45115d32bb120edb6edf6c0c3c783ed6592a8dab969fb88'
    }

    # To test out the lack of a config, we really should be testing what happens
    # when we only return a v1 response and not a v2 response at all; what are
    # doing now is simply testing that if we return a None instead of json for the
    # config blob, that None is stored rather than json
    if not has_config:
        config_json = None

    manifest_latest_url = "https://{}/v2/{}/manifests/latest".format(
        LOCALHOST_REGISTRY, TEST_IMAGE)
    manifest_url = "https://{}/v2/{}/manifests/{}".format(
        LOCALHOST_REGISTRY, TEST_IMAGE, DIGEST_V2)
    config_blob_url = "https://{}/v2/{}/blobs/{}".format(
        LOCALHOST_REGISTRY, TEST_IMAGE, CONFIG_DIGEST)

    # We return our v2 manifest in the mocked v1 response as a placeholder - only the
    # digest matters anyways
    manifest_response_v1 = requests.Response()
    (flexmock(manifest_response_v1,
              status_code=200,
              json=manifest_json,
              headers={
                  'Content-Type':
                  'application/vnd.docker.distribution.manifest.v1+json',
                  'Docker-Content-Digest': DIGEST_V1
              }))

    manifest_response_v2 = requests.Response()
    (flexmock(manifest_response_v2,
              status_code=200,
              json=manifest_json,
              headers={
                  'Content-Type':
                  'application/vnd.docker.distribution.manifest.v2+json',
                  'Docker-Content-Digest': DIGEST_V2
              }))
    manifest_response_v2_list = requests.Response()
    (flexmock(manifest_response_v2_list,
              raise_for_status=lambda: None,
              json=manifest_json,
              headers={
                  'Content-Type':
                  'application/vnd.docker.distribution.manifest.list.v2+json',
              }))

    config_blob_response = requests.Response()
    (flexmock(config_blob_response, status_code=200, json=config_json))

    def custom_get(method, url, headers, **kwargs):
        if url == manifest_latest_url:
            # For a manifest stored as v2 or v1, the docker registry defaults to
            # returning a v1 manifest if a v2 manifest is not explicitly requested
            if headers[
                    'Accept'] == 'application/vnd.docker.distribution.manifest.v2+json':
                return manifest_response_v2
            else:
                return manifest_response_v1

            if headers[
                    'Accept'] == 'application/vnd.docker.distribution.manifest.list.v2+json':
                return manifest_response_v2_list

        if url == manifest_url:
            return manifest_response_v2

        if url == config_blob_url:
            return config_blob_response

    mock_get_retry_session()

    (flexmock(
        requests.Session).should_receive('request').replace_with(custom_get))

    if reactor_config_map:
        workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
        workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
            ReactorConfig({'version': 1, 'registries': [{
                'url': LOCALHOST_REGISTRY,
                'insecure': True,
                'auth': {'cfg_path': secret_path},
            }]})

    runner = PostBuildPluginsRunner(tasker, workflow,
                                    [{
                                        'name': TagAndPushPlugin.key,
                                        'args': {
                                            'registries': {
                                                LOCALHOST_REGISTRY: {
                                                    'insecure': True,
                                                    'secret': secret_path
                                                }
                                            }
                                        },
                                    }])

    if should_raise:
        with pytest.raises(PluginFailedException):
            runner.run()
    else:
        output = runner.run()
        image = output[TagAndPushPlugin.key][0]
        tasker.remove_image(image)
        assert len(workflow.push_conf.docker_registries) > 0

        if MOCK:
            # we only test this when mocking docker because we don't expect
            # running actual docker against v2 registry
            expected_digest = ManifestDigest(v1=DIGEST_V1,
                                             v2=DIGEST_V2,
                                             oci=None)
            assert workflow.push_conf.docker_registries[0].digests[image_name].v1 == \
                expected_digest.v1
            assert workflow.push_conf.docker_registries[0].digests[image_name].v2 == \
                expected_digest.v2
            assert workflow.push_conf.docker_registries[0].digests[image_name].oci == \
                expected_digest.oci

            if has_config:
                assert isinstance(
                    workflow.push_conf.docker_registries[0].config, dict)
            else:
                assert workflow.push_conf.docker_registries[0].config is None
    def test_group_manifests_false(self, tmpdir, use_secret, goarch,
                                   worker_annotations, version, valid,
                                   respond):
        if MOCK:
            mock_docker()

        if version == '1':
            valid = False

        test_images = [
            'registry.example.com/namespace/httpd:2.4',
            'registry.example.com/namespace/httpd:latest'
        ]

        registries = {
            DOCKER0_REGISTRY: {
                'version': 'v2'
            },
            V1_REGISTRY: {
                'version': 'v1'
            },
        }
        if use_secret:
            temp_dir = mkdtemp(dir=str(tmpdir))
            with open(os.path.join(temp_dir, ".dockercfg"),
                      "w+") as dockerconfig:
                dockerconfig_contents = {
                    DOCKER0_REGISTRY: {
                        "username": "******",
                        "password": DOCKER0_REGISTRY
                    }
                }
                dockerconfig.write(json.dumps(dockerconfig_contents))
                dockerconfig.flush()
                registries[DOCKER0_REGISTRY]['secret'] = temp_dir

        plugins_conf = [{
            'name': GroupManifestsPlugin.key,
            'args': {
                'registries': registries,
                'group': False,
                'goarch': goarch,
            },
        }]
        tasker, workflow = mock_environment(
            tmpdir,
            primary_images=test_images,
            worker_annotations=worker_annotations)
        mock_url_responses([DOCKER0_REGISTRY], test_images, [X86_DIGESTS],
                           version, respond)

        runner = PostBuildPluginsRunner(tasker, workflow, plugins_conf)
        if valid and respond:
            result = runner.run()
            assert result['group_manifests'] == []
            expected_digests = {}
            for image in workflow.tag_conf.primary_images:
                expected_digests[
                    image.tag] = "sha256:worker-build-x86_64-digest"
            assert workflow.push_conf.docker_registries
            assert expected_digests == workflow.push_conf.docker_registries[
                0].digests
        else:
            with pytest.raises(PluginFailedException):
                runner.run()
示例#51
0
 def prepare(self):
     mock_docker()
     tasker = DockerTasker()
     workflow = DockerBuildWorkflow({'provider': 'git', 'uri': 'asd'},
                                    TEST_IMAGE)
     return tasker, workflow
def test_add_labels_equal_aliases2(tmpdir, docker_tasker, caplog, base_fst,
                                   base_snd, base_trd, df_fst, df_snd, df_trd,
                                   expected, expected_log):
    """
    test with 3 equal labels
    """
    if MOCK:
        mock_docker()

    df_content = "FROM fedora\n"
    plugin_labels = {}
    if df_fst:
        df_content += 'LABEL description="{0}"\n'.format(df_fst)
    if df_snd:
        df_content += 'LABEL io.k8s.description="{0}"\n'.format(df_snd)
    if df_trd:
        df_content += 'LABEL description_third="{0}"\n'.format(df_trd)

    base_labels = {INSPECT_CONFIG: {"Labels": {}}}
    if base_fst:
        base_labels[INSPECT_CONFIG]["Labels"]["description"] = base_fst
    if base_snd:
        base_labels[INSPECT_CONFIG]["Labels"]["io.k8s.description"] = base_snd
    if base_trd:
        base_labels[INSPECT_CONFIG]["Labels"]["description_third"] = base_trd

    df = df_parser(str(tmpdir))
    df.content = df_content

    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
    setattr(workflow, 'builder', X)
    flexmock(workflow, base_image_inspect=base_labels)
    setattr(workflow.builder, 'df_path', df.dockerfile_path)

    runner = PreBuildPluginsRunner(
        docker_tasker, workflow, [{
            'name': AddLabelsPlugin.key,
            'args': {
                'labels':
                plugin_labels,
                'dont_overwrite': [],
                'auto_labels': [],
                'aliases': {},
                'equal_labels':
                [['description', 'io.k8s.description', 'description_third']]
            }
        }])

    if isinstance(expected_log, RuntimeError):
        with pytest.raises(PluginFailedException):
            runner.run()
    else:
        runner.run()
        assert AddLabelsPlugin.key is not None
        result_fst = df.labels.get("description") or \
            base_labels[INSPECT_CONFIG]["Labels"].get("description")
        result_snd = df.labels.get("io.k8s.description") or \
            base_labels[INSPECT_CONFIG]["Labels"].get("io.k8s.description")
        result_trd = df.labels.get("description_third") or \
            base_labels[INSPECT_CONFIG]["Labels"].get("description_third")
        assert result_fst == expected
        assert result_snd == expected
        assert result_trd == expected

        if expected_log:
            assert expected_log in caplog.text()
def test_tag_and_push_plugin_oci(tmpdir, monkeypatch, use_secret, fail_push,
                                 caplog, reactor_config_map):

    # For now, we don't want to require having a skopeo and an OCI-supporting
    # registry in the test environment
    if MOCK:
        mock_docker()
    else:
        return

    tasker = DockerTasker()
    workflow = DockerBuildWorkflow({
        "provider": "git",
        "uri": "asd"
    }, TEST_IMAGE)
    setattr(workflow, 'builder', X)

    secret_path = None
    if use_secret:
        temp_dir = mkdtemp()
        with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig:
            dockerconfig_contents = {
                LOCALHOST_REGISTRY: {
                    "username": "******",
                    "email": "*****@*****.**",
                    "password": "******"
                }
            }
            dockerconfig.write(json.dumps(dockerconfig_contents))
            dockerconfig.flush()
            secret_path = temp_dir

    CONFIG_DIGEST = 'sha256:b79482f7dcab2a326c1e8c7025a4336d900e99f50db8b35a659fda67b5ebb3c2'
    MEDIA_TYPE = 'application/vnd.oci.image.manifest.v1+json'
    REF_NAME = "app/org.gnome.eog/x86_64/master"

    manifest_json = {
        "schemaVersion":
        2,
        "mediaType":
        "application/vnd.oci.image.manifest.v1+json",
        "config": {
            "mediaType": MEDIA_TYPE,
            "digest": CONFIG_DIGEST,
            "size": 314
        },
        "layers": [{
            "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
            "digest":
            "sha256:fd2b341d2ff3751ecdee8d8daacaa650d8a1703360c85d4cfc452d6ec32e147f",
            "size": 1863477
        }],
        "annotations": {
            "org.flatpak.commit-metadata.xa.ref":
            "YXBwL29yZy5nbm9tZS5lb2cveDg2XzY0L21hc3RlcgAAcw==",  # noqa
            "org.flatpak.body":
            "Name: org.gnome.eog\nArch: x86_64\nBranch: master\nBuilt with: Flatpak 0.9.7\n",  # noqa
            "org.flatpak.commit-metadata.xa.metadata":
            "W0FwcGxpY2F0aW9uXQpuYW1lPW9yZy5nbm9tZS5lb2cKcnVudGltZT1vcmcuZmVkb3JhcHJvamVjdC5QbGF0Zm9ybS94ODZfNjQvMjYKc2RrPW9yZy5mZWRvcmFwcm9qZWN0LlBsYXRmb3JtL3g4Nl82NC8yNgpjb21tYW5kPWVvZwoKW0NvbnRleHRdCnNoYXJlZD1pcGM7CnNvY2tldHM9eDExO3dheWxhbmQ7c2Vzc2lvbi1idXM7CmZpbGVzeXN0ZW1zPXhkZy1ydW4vZGNvbmY7aG9zdDt+Ly5jb25maWcvZGNvbmY6cm87CgpbU2Vzc2lvbiBCdXMgUG9saWN5XQpjYS5kZXNydC5kY29uZj10YWxrCgpbRW52aXJvbm1lbnRdCkRDT05GX1VTRVJfQ09ORklHX0RJUj0uY29uZmlnL2Rjb25mCgAAcw==",  # noqa
            "org.flatpak.download-size": "1863477",
            "org.flatpak.commit-metadata.xa.download-size": "AAAAAAAdF/IAdA==",
            "org.flatpak.commit-metadata.xa.installed-size":
            "AAAAAABDdgAAdA==",
            "org.flatpak.subject": "Export org.gnome.eog",
            "org.flatpak.installed-size": "4421120",
            "org.flatpak.commit":
            "d7b8789350660724b20643ebb615df466566b6d04682fa32800d3f10116eec54",  # noqa
            "org.flatpak.metadata":
            "[Application]\nname=org.gnome.eog\nruntime=org.fedoraproject.Platform/x86_64/26\nsdk=org.fedoraproject.Platform/x86_64/26\ncommand=eog\n\n[Context]\nshared=ipc;\nsockets=x11;wayland;session-bus;\nfilesystems=xdg-run/dconf;host;~/.config/dconf:ro;\n\n[Session Bus Policy]\nca.desrt.dconf=talk\n\n[Environment]\nDCONF_USER_CONFIG_DIR=.config/dconf\n",  # noqa
            "org.opencontainers.image.ref.name": REF_NAME,
            "org.flatpak.timestamp": "1499376525"
        }
    }

    config_json = {
        "created": "2017-07-06T21:28:45Z",
        "architecture": "arm64",
        "os": "linux",
        "config": {
            "Memory": 0,
            "MemorySwap": 0,
            "CpuShares": 0
        },
        "rootfs": {
            "type":
            "layers",
            "diff_ids": [
                "sha256:4c5160fea65110aa1eb8ca022e2693bb868367c2502855887f21c77247199339"
            ]
        }
    }

    # Add a mock OCI image to exported_image_sequence; this forces the tag_and_push
    # plugin to push with skopeo rather than with 'docker push'

    # Since we are always mocking the push for now, we can get away with a stub image
    oci_dir = os.path.join(str(tmpdir), 'oci-image')
    os.mkdir(oci_dir)
    with open(os.path.join(oci_dir, "index.json"), "w") as f:
        f.write('"Not a real index.json"')
    with open(os.path.join(oci_dir, "oci-layout"), "w") as f:
        f.write('{"imageLayoutVersion": "1.0.0"}')
    os.mkdir(os.path.join(oci_dir, 'blobs'))

    metadata = get_exported_image_metadata(oci_dir, IMAGE_TYPE_OCI)
    metadata['ref_name'] = REF_NAME
    workflow.exported_image_sequence.append(metadata)

    oci_tarpath = os.path.join(str(tmpdir), 'oci-image.tar')
    with open(oci_tarpath, "wb") as f:
        with tarfile.TarFile(mode="w", fileobj=f) as tf:
            for f in os.listdir(oci_dir):
                tf.add(os.path.join(oci_dir, f), f)

    metadata = get_exported_image_metadata(oci_tarpath, IMAGE_TYPE_OCI_TAR)
    metadata['ref_name'] = REF_NAME
    workflow.exported_image_sequence.append(metadata)

    # Mock the subprocess call to skopeo

    def check_check_output(args, **kwargs):
        if fail_push:
            raise subprocess.CalledProcessError(returncode=1,
                                                cmd=args,
                                                output="Failed")
        assert args[0] == 'skopeo'
        if use_secret:
            assert '--dest-creds=user:mypassword' in args
        assert '--dest-tls-verify=false' in args
        assert args[-2] == 'oci:' + oci_dir + ':' + REF_NAME
        assert args[-1] == 'docker://' + LOCALHOST_REGISTRY + '/' + TEST_IMAGE
        return ''

    (flexmock(subprocess).should_receive("check_output").once().replace_with(
        check_check_output))

    # Mock out the response from the registry once the OCI image is uploaded

    manifest_latest_url = "https://{}/v2/{}/manifests/latest".format(
        LOCALHOST_REGISTRY, TEST_IMAGE)
    manifest_url = "https://{}/v2/{}/manifests/{}".format(
        LOCALHOST_REGISTRY, TEST_IMAGE, DIGEST_OCI)
    config_blob_url = "https://{}/v2/{}/blobs/{}".format(
        LOCALHOST_REGISTRY, TEST_IMAGE, CONFIG_DIGEST)

    manifest_response = requests.Response()
    (flexmock(manifest_response,
              raise_for_status=lambda: None,
              json=manifest_json,
              headers={
                  'Content-Type': MEDIA_TYPE,
                  'Docker-Content-Digest': DIGEST_OCI
              }))

    manifest_unacceptable_response = requests.Response()
    (flexmock(manifest_unacceptable_response,
              status_code=404,
              json={"errors": [{
                  "code": "MANIFEST_UNKNOWN"
              }]}))

    config_blob_response = requests.Response()
    (flexmock(config_blob_response,
              raise_for_status=lambda: None,
              json=config_json))

    def custom_get(method, url, headers, **kwargs):
        if url == manifest_latest_url:
            if headers['Accept'] == MEDIA_TYPE:
                return manifest_response
            else:
                return manifest_unacceptable_response

        if url == manifest_url:
            return manifest_response

        if url == config_blob_url:
            return config_blob_response

    mock_get_retry_session()

    (flexmock(
        requests.Session).should_receive('request').replace_with(custom_get))

    if reactor_config_map:
        workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
        workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
            ReactorConfig({'version': 1, 'registries': [{
                'url': LOCALHOST_REGISTRY,
                'insecure': True,
                'auth': {'cfg_path': secret_path},
            }]})

    runner = PostBuildPluginsRunner(tasker, workflow,
                                    [{
                                        'name': TagAndPushPlugin.key,
                                        'args': {
                                            'registries': {
                                                LOCALHOST_REGISTRY: {
                                                    'insecure': True,
                                                    'secret': secret_path
                                                }
                                            }
                                        },
                                    }])

    with caplog.atLevel(logging.DEBUG):
        if fail_push:
            with pytest.raises(PluginFailedException):
                output = runner.run()
        else:
            output = runner.run()

    for r in caplog.records():
        assert 'mypassword' not in r.getMessage()

    if not fail_push:
        image = output[TagAndPushPlugin.key][0]
        tasker.remove_image(image)
        assert len(workflow.push_conf.docker_registries) > 0

        assert workflow.push_conf.docker_registries[0].digests[
            TEST_IMAGE].v1 is None
        assert workflow.push_conf.docker_registries[0].digests[
            TEST_IMAGE].v2 is None
        assert workflow.push_conf.docker_registries[0].digests[
            TEST_IMAGE].oci == DIGEST_OCI

        assert workflow.push_conf.docker_registries[0].config is config_json
    def test_group_manifests_true(self, tmpdir, goarch, worker_annotations,
                                  valid):
        if MOCK:
            mock_docker()

        test_images = [
            'registry.example.com/namespace/httpd:2.4',
            'registry.example.com/namespace/httpd:latest'
        ]
        expected_results = set()

        registries = {
            DOCKER0_REGISTRY: {
                'version': 'v2',
                'insecure': True
            },
            V1_REGISTRY: {
                'version': 'v2',
                'insecure': True
            },
        }

        plugins_conf = [{
            'name': GroupManifestsPlugin.key,
            'args': {
                'registries': registries,
                'group': True,
                'goarch': goarch,
            },
        }]
        tasker, workflow = mock_environment(
            tmpdir,
            primary_images=test_images,
            worker_annotations=worker_annotations)

        def request_callback(request):
            media_type = request.headers['Accept']
            if media_type.endswith('list.v2+json'):
                digest = 'v2_list-digest:{0}'.format(request.url)
            else:
                raise ValueError('Unexpected media type {}'.format(media_type))

            media_type_prefix = media_type.split('+')[0]
            headers = {
                'Content-Type': '{}+jsonish'.format(media_type_prefix),
                'Docker-Content-Digest': digest
            }
            return (200, headers, '')

        for registry in registries:
            if valid:
                repo_and_tag = workflow.tag_conf.images[0].to_str(
                    registry=False).split(':')
                url = 'http://{0}/v2/{1}/manifests/{2}'.format(
                    registry, repo_and_tag[0], repo_and_tag[1])
                expected_results.add('v2_list-digest:{0}'.format(url))
            for image in workflow.tag_conf.images:
                repo_and_tag = image.to_str(registry=False).split(':')
                path = '/v2/{0}/manifests/{1}'.format(repo_and_tag[0],
                                                      repo_and_tag[1])
                https_url = 'https://' + registry + path
                responses.add(responses.GET, https_url, body=ConnectionError())
                url = 'http://' + registry + path
                if valid:
                    responses.add_callback(responses.GET,
                                           url,
                                           callback=request_callback)

        (flexmock(subprocess).should_receive("check_output"))

        runner = PostBuildPluginsRunner(tasker, workflow, plugins_conf)
        if valid:
            result = runner.run()
            test_results = set()
            for digest in result['group_manifests']:
                test_results.add(digest.v2_list)
            assert test_results == expected_results
        else:
            with pytest.raises(PluginFailedException):
                runner.run()
示例#55
0
 def test_missing_base_image_id(self):
     if MOCK:
         mock_docker(inspect_should_fail=True)
     self.should_squash_with_kwargs(from_layer=None)
     with pytest.raises(PluginFailedException):
         self.run_plugin_with_args({'from_layer': None})
示例#56
0
def test_floating_tags_push(tmpdir, workflow, test_name, registries, manifest_results,
                            schema_version, floating_tags, workers, expected_exception,
                            caplog):
    if MOCK:
        mock_docker()

    primary_images = ['namespace/httpd:2.4', 'namespace/httpd:primary']

    goarch = {
        'ppc64le': 'powerpc',
        'x86_64': 'amd64',
    }

    all_registry_conf = {
        REGISTRY_V2: {'version': 'v2', 'insecure': True},
        OTHER_V2: {'version': 'v2', 'insecure': False},
    }

    temp_dir = mkdtemp(dir=str(tmpdir))
    with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig:
        dockerconfig_contents = {
            REGISTRY_V2: {
                "username": "******", "password": DOCKER0_REGISTRY
            }
        }
        dockerconfig.write(json.dumps(dockerconfig_contents))
        dockerconfig.flush()
        all_registry_conf[REGISTRY_V2]['secret'] = temp_dir

    registry_conf = {
        k: v for k, v in all_registry_conf.items() if k in registries
    }

    for registry, opts in registry_conf.items():
        kwargs = {}
        if 'insecure' in opts:
            kwargs['insecure'] = opts['insecure']
        workflow.push_conf.add_docker_registry(registry, **kwargs)

    plugins_conf = [{
        'name': PushFloatingTagsPlugin.key,
    }]

    mocked_registries, annotations = mock_registries(registry_conf, workers,
                                                     primary_images=primary_images,
                                                     manifest_results=manifest_results,
                                                     schema_version=schema_version)
    tasker, workflow = mock_environment(tmpdir, workflow, primary_images=primary_images,
                                        floating_images=floating_tags,
                                        manifest_results=manifest_results,
                                        annotations=annotations)

    if workers:
        workflow.buildstep_plugins_conf = [{'name': PLUGIN_BUILD_ORCHESTRATE_KEY}]

    registries_list = []

    for docker_uri in registry_conf:
        reg_ver = registry_conf[docker_uri]['version']
        reg_secret = None
        if 'secret' in registry_conf[docker_uri]:
            reg_secret = registry_conf[docker_uri]['secret']

        new_reg = {}
        if reg_secret:
            new_reg['auth'] = {'cfg_path': reg_secret}
        else:
            new_reg['auth'] = {'cfg_path': str(temp_dir)}
        new_reg['url'] = 'https://' + docker_uri + '/' + reg_ver

        registries_list.append(new_reg)

    platform_descriptors_list = []
    for platform in goarch:
        new_plat = {
            'platform': platform,
            'architecture': goarch[platform],
        }
        platform_descriptors_list.append(new_plat)

    workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
    workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
        ReactorConfig({'version': 1,
                       'registries': registries_list,
                       'platform_descriptors': platform_descriptors_list})

    runner = ExitPluginsRunner(tasker, workflow, plugins_conf)
    results = runner.run()
    plugin_result = results[PushFloatingTagsPlugin.key]

    if expected_exception is None:
        primary_name, primary_tag = primary_images[0].split(':')
        for registry in registry_conf:
            target_registry = mocked_registries[registry]
            primary_manifest_list = target_registry.get_manifest(primary_name, primary_tag)

            for image in floating_tags:
                name, tag = image.split(':')

                assert tag in target_registry.get_repo(name)['tags']
                assert target_registry.get_manifest(name, tag) == primary_manifest_list

        # Check that plugin returns ManifestDigest object
        assert isinstance(plugin_result, dict)
        # Check that plugin returns correct list of repos
        actual_repos = sorted(plugin_result.keys())
        expected_repos = sorted(set([x.get_repo() for x in workflow.tag_conf.images]))
        assert expected_repos == actual_repos
    else:
        assert not plugin_result
        assert expected_exception in caplog.text
示例#57
0
def test_workflow_docker_build_error_reports(steps_to_fail, step_reported):
    """
    Test if first error is reported properly. (i.e. exit plugins are not
    hiding the original root cause)
    """
    def exc_string(step):
        return 'test_workflow_docker_build_error_reports.{}'.format(step)

    def construct_watcher(step):
        watcher = Watcher(raise_exc=Exception(exc_string(step)) if step in
                          steps_to_fail else None)
        return watcher

    flexmock(DockerfileParser, content='df_content')
    this_file = inspect.getfile(PreRaises)
    mock_docker()
    fake_builder = MockInsideBuilder(failed=True)
    flexmock(InsideBuilder).new_instances(fake_builder)
    watch_pre = construct_watcher('pre')
    watch_buildstep = construct_watcher('buildstep')
    watch_prepub = construct_watcher('prepub')
    watch_post = construct_watcher('post')
    watch_exit = construct_watcher('exit')

    workflow = DockerBuildWorkflow(MOCK_SOURCE,
                                   'test-image',
                                   prebuild_plugins=[{
                                       'name': 'pre_watched',
                                       'is_allowed_to_fail': False,
                                       'args': {
                                           'watcher': watch_pre
                                       }
                                   }],
                                   buildstep_plugins=[{
                                       'name': 'buildstep_watched',
                                       'is_allowed_to_fail': False,
                                       'args': {
                                           'watcher': watch_buildstep,
                                       }
                                   }],
                                   prepublish_plugins=[{
                                       'name': 'prepub_watched',
                                       'is_allowed_to_fail': False,
                                       'args': {
                                           'watcher': watch_prepub,
                                       }
                                   }],
                                   postbuild_plugins=[{
                                       'name': 'post_watched',
                                       'is_allowed_to_fail': False,
                                       'args': {
                                           'watcher': watch_post
                                       }
                                   }],
                                   exit_plugins=[{
                                       'name': 'exit_watched',
                                       'is_allowed_to_fail': False,
                                       'args': {
                                           'watcher': watch_exit
                                       }
                                   }],
                                   plugin_files=[this_file])

    with pytest.raises(Exception) as exc:
        workflow.build_docker_image()
    assert exc_string(step_reported) in str(exc)
示例#58
0
def docker_tasker():
    if MOCK:
        mock_docker()
    return DockerTasker()
示例#59
0
def test_get_version(docker_tasker):
    if MOCK:
        mock_docker()

    response = docker_tasker.get_info()
    assert isinstance(response, dict)
示例#60
0
def prepare(v1_image_ids={}):
    if MOCK:
        mock_docker()
    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(SOURCE, "test-image")
    setattr(workflow, 'builder', X())
    setattr(workflow.builder, 'source', X())
    setattr(workflow.builder.source, 'dockerfile_path', None)
    setattr(workflow.builder.source, 'path', None)
    setattr(workflow, 'tag_conf', X())
    setattr(workflow.tag_conf, 'images', [ImageName(repo="image-name1")])

    # Mock dockpulp and docker
    dockpulp.Pulp = flexmock(dockpulp.Pulp)
    dockpulp.Pulp.registry = 'registry.example.com'
    (flexmock(dockpulp.imgutils).should_receive('get_metadata').with_args(
        object).and_return([{
            'id': 'foo'
        }]))
    (flexmock(dockpulp.imgutils).should_receive('get_manifest').with_args(
        object).and_return([{
            'id': 'foo'
        }]))
    (flexmock(dockpulp.imgutils).should_receive('get_versions').with_args(
        object).and_return({'id': '1.6.0'}))
    (flexmock(dockpulp.imgutils).should_receive('check_repo').and_return(0))
    (flexmock(dockpulp.Pulp).should_receive('set_certs').with_args(
        object, object))
    (flexmock(dockpulp.Pulp).should_receive('getRepos').with_args(
        list, fields=list).and_return([{
            "id": "redhat-image-name1"
        }, {
            "id": "redhat-prefix-image-name2"
        }]))
    (flexmock(dockpulp.Pulp).should_receive('createRepo'))
    (flexmock(dockpulp.Pulp).should_receive('upload').with_args(unicode)
     ).at_most().once()
    (flexmock(dockpulp.Pulp).should_receive('copy').with_args(
        unicode, unicode))
    (flexmock(dockpulp.Pulp).should_receive('updateRepo').with_args(
        'redhat-image-name1', {'tag': 'latest:ppc64le_v1_image_id'}))
    (flexmock(dockpulp.Pulp).should_receive('crane').with_args(
        list, wait=True).and_return([2, 3, 4]))
    (flexmock(dockpulp.Pulp).should_receive('').with_args(
        object, object).and_return([1, 2, 3]))
    (flexmock(dockpulp.Pulp).should_receive('watch_tasks').with_args(list))

    annotations = {
        'worker-builds': {
            'x86_64': {
                'build': {
                    'build-name': 'build-1-x64_64',
                },
                'metadata_fragment': 'configmap/build-1-x86_64-md',
                'metadata_fragment_key': 'metadata.json',
            },
            'ppc64le': {
                'build': {
                    'build-name': 'build-1-ppc64le',
                },
                'metadata_fragment': 'configmap/build-1-ppc64le-md',
                'metadata_fragment_key': 'metadata.json',
            }
        }
    }

    workflow.build_result = BuildResult(
        logs=["docker build log - \u2018 \u2017 \u2019 \n'"],
        image_id="id1234",
        annotations=annotations)
    build_info = {}
    build_info['x86_64'] = BuildInfo()
    build_info['ppc64le'] = BuildInfo()

    for platform, v1_image_id in v1_image_ids.items():
        build_info[platform] = BuildInfo(v1_image_id)

    workflow.plugin_workspace = {
        OrchestrateBuildPlugin.key: {
            WORKSPACE_KEY_BUILD_INFO: build_info
        }
    }

    mock_docker()
    return tasker, workflow