Esempio n. 1
0
def test_add_labels_plugin(tmpdir, docker_tasker,
                           df_content, labels_conf_base, labels_conf, dont_overwrite, aliases,
                           expected_output, caplog):
    df = DockerfileParser(str(tmpdir))
    df.content = df_content

    if MOCK:
        mock_docker()

    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
    setattr(workflow, 'builder', X)
    flexmock(workflow, base_image_inspect=labels_conf_base)
    setattr(workflow.builder, 'df_path', df.dockerfile_path)

    runner = PreBuildPluginsRunner(
        docker_tasker,
        workflow,
        [{
            'name': AddLabelsPlugin.key,
            'args': {
                'labels': labels_conf,
                'dont_overwrite': dont_overwrite,
                'auto_labels': [],
                'aliases': aliases,
            }
        }]
    )

    runner.run()
    if isinstance(expected_output, RuntimeError):
        assert "plugin 'add_labels_in_dockerfile' raised an exception: RuntimeError" in caplog.text()

    else:
        assert AddLabelsPlugin.key is not None
        assert df.content in expected_output
    def __init__(self, tasker, workflow, nvr=None, destdir="/root/buildinfo/",
                 use_final_dockerfile=False):
        """
        constructor

        :param tasker: DockerTasker instance
        :param workflow: DockerBuildWorkflow instance
        :param nvr: name-version-release, will be appended to Dockerfile-.
                    If not specified, try to get it from Name, Version, Release labels.
        :param destdir: directory in the image to put Dockerfile-N-V-R into
        :param use_final_dockerfile: bool, when set to True, uses final version of processed dockerfile,
                                     when set to False, uses Dockerfile from time when this plugin was executed
        """
        # call parent constructor
        super(AddDockerfilePlugin, self).__init__(tasker, workflow)

        self.use_final_dockerfile = use_final_dockerfile

        if nvr is None:
            labels = DockerfileParser(self.workflow.builder.df_path).labels
            name = labels.get('Name')
            version = labels.get('Version')
            release = labels.get('Release')
            if name is None or version is None or release is None:
                raise ValueError("You have to specify either nvr arg or Name/Version/Release labels.")
            nvr = "{0}-{1}-{2}".format(name, version, release)
            nvr = nvr.replace("/", "-")
        self.df_name = '{0}-{1}'.format(DOCKERFILE_FILENAME, nvr)
        self.df_dir = destdir
        self.df_path = os.path.join(self.df_dir, self.df_name)

        # we are not using final dockerfile, so let's copy current snapshot
        if not self.use_final_dockerfile:
            local_df_path = os.path.join(self.workflow.builder.df_dir, self.df_name)
            shutil.copy2(self.workflow.builder.df_path, local_df_path)
def test_adddockerfile_plugin(tmpdir):
    df_content = """
FROM fedora
RUN yum install -y python-django
CMD blabla"""
    df = DockerfileParser(str(tmpdir))
    df.content = df_content

    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
    workflow.builder = X
    workflow.builder.df_path = df.dockerfile_path
    workflow.builder.df_dir = str(tmpdir)

    runner = PreBuildPluginsRunner(
        tasker,
        workflow,
        [{
            'name': AddDockerfilePlugin.key,
            'args': {'nvr': 'rhel-server-docker-7.1-20'}
        }]
    )
    runner.run()
    assert AddDockerfilePlugin.key is not None

    expected_output = """
FROM fedora
RUN yum install -y python-django
ADD Dockerfile-rhel-server-docker-7.1-20 /root/buildinfo/Dockerfile-rhel-server-docker-7.1-20
CMD blabla"""
    assert df.content == expected_output
Esempio n. 4
0
def test_add_labels_plugin(tmpdir, labels_conf_base, labels_conf, dont_overwrite, expected_output):
    df = DockerfileParser(str(tmpdir))
    df.content = DF_CONTENT

    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
    setattr(workflow, 'builder', X)
    flexmock(workflow, base_image_inspect=labels_conf_base)
    setattr(workflow.builder, 'df_path', df.dockerfile_path)

    runner = PreBuildPluginsRunner(
        tasker,
        workflow,
        [{
            'name': AddLabelsPlugin.key,
            'args': {'labels': labels_conf, "dont_overwrite": dont_overwrite}
        }]
    )

    if isinstance(expected_output, RuntimeError):
        with pytest.raises(RuntimeError):
            runner.run()
    else:
        runner.run()
        assert AddLabelsPlugin.key is not None
        assert df.content in expected_output
    def run(self):
        """
        run the plugin
        """
        # dict comprehension is syntax error on 2.6
        yum_repos = {}
        for key, value in self.workflow.files.items():
            if key.startswith(YUM_REPOS_DIR):
                yum_repos[key] = value
        if self.wrap_commands:
            wrap_yum_commands(yum_repos, self.workflow.builder.df_path)
        else:
            if not yum_repos:
                return
            # absolute path in containers -> relative path within context
            repos_host_cont_mapping = {}
            host_repos_path = os.path.join(self.workflow.builder.df_dir, RELATIVE_REPOS_PATH)
            self.log.info("creating directory for yum repos: %s", host_repos_path)
            os.mkdir(host_repos_path)

            for repo, repo_content in self.workflow.files.items():
                repo_basename = os.path.basename(repo)
                repo_relative_path = os.path.join(RELATIVE_REPOS_PATH, repo_basename)
                repo_host_path = os.path.join(host_repos_path, repo_basename)
                self.log.info("writing repo to '%s'", repo_host_path)
                with open(repo_host_path, "wb") as fp:
                    fp.write(repo_content.encode("utf-8"))
                repos_host_cont_mapping[repo] = repo_relative_path

            # Find out the USER inherited from the base image
            inspect = self.workflow.builder.inspect_base_image()
            inherited_user = inspect['Config'].get('User', '')
            df = DockerfileParser(self.workflow.builder.df_path)
            df.lines = add_yum_repos_to_dockerfile(repos_host_cont_mapping,
                                                   df, inherited_user)
Esempio n. 6
0
    def __init__(self, filename):
        DockerfileParser.__init__(self, cache_content=True)

        if self.is_url(filename) is not None:
            response = urllib.request.urlopen(filename)
            if self.is_content_type_plain_text(response):
                self.content = response.read().decode('utf-8')

                App._dockerfile['is_remote'] = True
                App._dockerfile['url']       = filename
            else:
                print('ERROR: file format not supported. Plain text expected\n')
                exit(-1)

        elif os.path.isfile(filename):
            self.dockerfile_path = filename
            self.content = open(filename, encoding='utf-8').read()
            App._dockerfile['is_remote'] = False

        elif self.is_github_repo(filename):
            filename     = 'https://raw.githubusercontent.com/' + filename + '/master/Dockerfile'
            self.content = urllib.request.urlopen(filename).read().decode('utf-8')

            App._dockerfile['is_remote'] = True
            App._dockerfile['url']       = filename

        else:
            print('ERROR: file format not supported\n')
            exit(-1)

        self.commands = self.dict_to_command_object(self.structure)
def test_returndockerfile_plugin(tmpdir):
    df_content = """
FROM fedora
RUN yum install -y python-django
CMD blabla"""
    df = DockerfileParser(str(tmpdir))
    df.content = df_content

    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
    workflow.builder = X
    workflow.builder.df_path = df.dockerfile_path
    workflow.builder.df_dir = str(tmpdir)

    runner = PreBuildPluginsRunner(
        tasker,
        workflow,
        [{
            'name': CpDockerfilePlugin.key
        }]
    )
    runner.run()
    assert CpDockerfilePlugin.key is not None

    assert workflow.prebuild_results.get(CpDockerfilePlugin.key, "") == df_content
def test_adddockerfile_nvr_from_labels2(tmpdir):
    df_content = """
FROM fedora
RUN yum install -y python-django
CMD blabla"""
    df = DockerfileParser(str(tmpdir))
    df.content = df_content

    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
    flexmock(workflow, base_image_inspect={"Config": {"Labels": {}}})
    workflow.builder = X
    workflow.builder.df_path = df.dockerfile_path
    workflow.builder.df_dir = str(tmpdir)

    runner = PreBuildPluginsRunner(
        tasker,
        workflow,
        [{
            'name': AddLabelsPlugin.key,
            'args': {'labels': {'Name': 'jboss-eap-6-docker',
                                'Version': '6.4',
                                'Release': '77'}}
         },
         {
            'name': AddDockerfilePlugin.key
        }]
    )
    runner.run()
    assert AddDockerfilePlugin.key is not None

    assert "ADD Dockerfile-jboss-eap-6-docker-6.4-77 /root/buildinfo/Dockerfile-jboss-eap-6-docker-6.4-77" in df.content
def test_yuminject_plugin_notwrapped(tmpdir):
    df_content = """\
FROM fedora
RUN yum install -y python-django
CMD blabla"""
    df = DockerfileParser(str(tmpdir))
    df.content = df_content

    tasker, workflow = prepare(df.dockerfile_path)

    metalink = 'https://mirrors.fedoraproject.org/metalink?repo=fedora-$releasever&arch=$basearch'

    workflow.files[os.path.join(YUM_REPOS_DIR, DEFAULT_YUM_REPOFILE_NAME)] = render_yum_repo(OrderedDict(
        (('name', 'my-repo'),
         ('metalink', metalink),
         ('enabled', 1),
         ('gpgcheck', 0)),
    ))

    runner = PreBuildPluginsRunner(tasker, workflow, [{
        'name': InjectYumRepoPlugin.key,
        'args': {
            "wrap_commands": False
        }
    }])
    runner.run()
    assert InjectYumRepoPlugin.key is not None

    expected_output = r"""FROM fedora
ADD atomic-reactor-repos/* '/etc/yum.repos.d/'
RUN yum install -y python-django
CMD blabla
RUN rm -f '/etc/yum.repos.d/atomic-reactor-injected.repo'
"""
    assert expected_output == df.content
def test_assertlabels_plugin(tmpdir, df_content, req_labels, expected):
    df = DockerfileParser(str(tmpdir))
    df.content = df_content

    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
    workflow.builder = X
    workflow.builder.df_path = df.dockerfile_path
    workflow.builder.df_dir = str(tmpdir)

    runner = PreBuildPluginsRunner(
        tasker,
        workflow,
        [{
            'name': AssertLabelsPlugin.key,
            'args': {'required_labels': req_labels}
        }]
    )

    assert AssertLabelsPlugin.key is not None

    if isinstance(expected, PluginFailedException):
        with pytest.raises(PluginFailedException):
            runner.run()
    else:
        runner.run()
    def run(self):
        dockerfile = DockerfileParser(self.workflow.builder.df_path)

        image_name = ImageName.parse(dockerfile.baseimage)
        if image_name.namespace != 'koji' or image_name.repo != 'image-build' :
            self.log.info('Base image not supported: %s', dockerfile.baseimage)
            return
        image_build_conf = image_name.tag or 'image-build.conf'

        self.session = create_koji_session(self.koji_hub, self.koji_auth_info)

        task_id, filesystem_regex = self.build_filesystem(image_build_conf)

        task = TaskWatcher(self.session, task_id, self.poll_interval)
        task.wait()
        if task.failed():
            raise RuntimeError('Create filesystem task failed: {}'
                               .format(task_id))

        filesystem = self.download_filesystem(task_id, filesystem_regex)

        base_image = self.import_base_image(filesystem)
        dockerfile.baseimage = base_image

        return base_image
Esempio n. 12
0
def test_adddockerfile_nvr_from_labels(tmpdir):
    df_content = """
FROM fedora
RUN yum install -y python-django
LABEL Name="jboss-eap-6-docker" "Version"="6.4" "Release"=77
CMD blabla"""
    df = DockerfileParser(str(tmpdir))
    df.content = df_content

    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
    workflow.builder = X
    workflow.builder.df_path = df.dockerfile_path
    workflow.builder.df_dir = str(tmpdir)

    runner = PreBuildPluginsRunner(
        tasker,
        workflow,
        [{
            'name': AddDockerfilePlugin.key
        }]
    )
    runner.run()
    assert AddDockerfilePlugin.key is not None

    assert "ADD Dockerfile-jboss-eap-6-docker-6.4-77 /root/buildinfo/Dockerfile-jboss-eap-6-docker-6.4-77" in df.content
Esempio n. 13
0
def test_adddockerfile_todest(tmpdir):
    df_content = """
FROM fedora
RUN yum install -y python-django
CMD blabla"""
    df = DockerfileParser(str(tmpdir))
    df.content = df_content

    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
    workflow.builder = X
    workflow.builder.df_path = df.dockerfile_path
    workflow.builder.df_dir = str(tmpdir)

    runner = PreBuildPluginsRunner(
        tasker,
        workflow,
        [{
            'name': AddDockerfilePlugin.key,
            'args': {'nvr': 'jboss-eap-6-docker-6.4-77',
                     'destdir': '/usr/share/doc/'}
        }]
    )
    runner.run()
    assert AddDockerfilePlugin.key is not None

    expected_output = """
FROM fedora
RUN yum install -y python-django
ADD Dockerfile-jboss-eap-6-docker-6.4-77 /usr/share/doc/Dockerfile-jboss-eap-6-docker-6.4-77
CMD blabla"""
    assert df.content == expected_output
Esempio n. 14
0
    def test_constructor_cache(self, tmpdir):
        tmpdir_path = str(tmpdir.realpath())
        df1 = DockerfileParser(tmpdir_path)
        df1.lines = ["From fedora:latest\n", "LABEL a b\n"]

        df2 = DockerfileParser(tmpdir_path, True)
        assert df2.cached_content
Esempio n. 15
0
def test_add_labels_plugin_generated(tmpdir, docker_tasker, auto_label, value_re_part):
    df = DockerfileParser(str(tmpdir))
    df.content = DF_CONTENT

    if MOCK:
        mock_docker()

    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
    setattr(workflow, 'builder', X)
    flexmock(workflow, source=MockSource())
    flexmock(workflow, base_image_inspect=LABELS_CONF_BASE)
    setattr(workflow.builder, 'df_path', df.dockerfile_path)

    runner = PreBuildPluginsRunner(
        docker_tasker,
        workflow,
        [{
            'name': AddLabelsPlugin.key,
            'args': {'labels': {}, "dont_overwrite": [], "auto_labels": [auto_label],
                     'aliases': {'Build_Host': 'com.redhat.build-host'}}
        }]
    )

    runner.run()
    assert re.match(value_re_part, df.labels[auto_label])
    def test_get_parent_env_from_scratch(self, tmpdir):
        tmpdir_path = str(tmpdir.realpath())
        p_env = {"bar": "baz"}
        df1 = DockerfileParser(tmpdir_path, env_replace=True, parent_env=p_env)
        df1.lines = [
            "FROM scratch\n",
        ]

        assert not df1.envs
    def run(self):
        """
        run the plugin
        """
        dockerfile = DockerfileParser(self.workflow.builder.df_path)
        lines = dockerfile.lines

        # correct syntax is:
        #   LABEL "key"="value" "key2"="value2"

        # Make sure to escape '\' and '"' characters.
        try:
            # py3
            env_trans = str.maketrans({'\\': '\\\\',
                                       '"': '\\"'})
        except AttributeError:
            # py2
            env_trans = None

        def escape(s):
            if env_trans:
                return s.translate(env_trans)
            return s.replace('\\', '\\\\').replace('"', '\\"')

        labels = []
        for key, value in self.labels.items():
            try:
                base_image_value = self.workflow.base_image_inspect["Config"]["Labels"][key]
            except KeyError:
                self.log.info("label %s not present in base image", repr(key))
            except (AttributeError, TypeError):
                self.log.warning("base image was not inspected")
                break
            else:
                if base_image_value == value:
                    self.log.info("label %s is already set to %s", repr(key), repr(value))
                    continue
                else:
                    self.log.info("base image has label %s set to %s", repr(key), repr(base_image_value))
                    if key in self.dont_overwrite:
                        self.log.info("denying overwrite of label %s", repr(key))
                        continue

            label = '"%s"="%s"' % (escape(key), escape(value))
            self.log.info("setting label %s", label)
            labels.append(label)

        content = ""
        if labels:
            content = 'LABEL ' + " ".join(labels)
            # put it before last instruction
            lines.insert(-1, content + '\n')

            dockerfile.lines = lines

        return content
Esempio n. 18
0
def test_add_labels_aliases(tmpdir, docker_tasker, caplog,
                            df_old_as_plugin_arg, df_new_as_plugin_arg,
                            base_old, base_new, df_old, df_new, expected_old, expected_new, expected_log):
    if MOCK:
        mock_docker()

    df_content = "FROM fedora\n"
    plugin_labels = {}
    if df_old:
        if df_old_as_plugin_arg:
            plugin_labels["label_old"] = df_old
        else:
            df_content += 'LABEL label_old="{0}"\n'.format(df_old)
    if df_new:
        if df_new_as_plugin_arg:
            plugin_labels["label_new"] = df_new
        else:
            df_content += 'LABEL label_new="{0}"\n'.format(df_new)

    base_labels = {INSPECT_CONFIG: {"Labels": {}}}
    if base_old:
        base_labels[INSPECT_CONFIG]["Labels"]["label_old"] = base_old
    if base_new:
        base_labels[INSPECT_CONFIG]["Labels"]["label_new"] = base_new

    df = DockerfileParser(str(tmpdir))
    df.content = df_content

    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
    setattr(workflow, 'builder', X)
    flexmock(workflow, base_image_inspect=base_labels)
    setattr(workflow.builder, 'df_path', df.dockerfile_path)

    runner = PreBuildPluginsRunner(
        docker_tasker,
        workflow,
        [{
            'name': AddLabelsPlugin.key,
            'args': {
                'labels': plugin_labels,
                'dont_overwrite': [],
                'auto_labels': [],
                'aliases': {"label_old": "label_new"},
            }
        }]
    )

    runner.run()
    assert AddLabelsPlugin.key is not None
    result_old = df.labels.get("label_old") or base_labels[INSPECT_CONFIG]["Labels"].get("label_old")
    result_new = df.labels.get("label_new") or base_labels[INSPECT_CONFIG]["Labels"].get("label_new")
    assert result_old == expected_old
    assert result_new == expected_new

    if expected_log:
        assert expected_log in caplog.text()
 def run(self):
     """
     run the plugin
     """
     labels = DockerfileParser(self.workflow.builder.df_path).labels
     for label in self.required_labels:
         if labels.get(label) is None:
             msg = "Dockerfile is missing '{0}' label.".format(label)
             self.log.error(msg)
             raise AssertionError(msg)
def mock_workflow(tmpdir, dockerfile):
    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
    mock_source = MockSource(tmpdir)
    setattr(workflow, 'builder', X)
    workflow.builder.source = mock_source
    flexmock(workflow, source=mock_source)

    df = DockerfileParser(str(tmpdir))
    df.content = dockerfile
    setattr(workflow.builder, 'df_path', df.dockerfile_path)

    return workflow
    def test_remove_whitespace(self, tmpdir):
        """
        Verify keys are parsed correctly even if there is no final newline.

        """
        with open(os.path.join(str(tmpdir), 'Dockerfile'), 'w') as fp:
            fp.write('FROM scratch')
        tmpdir_path = str(tmpdir.realpath())
        df1 = DockerfileParser(tmpdir_path)
        df1.labels['foo'] = 'bar'

        df2 = DockerfileParser(tmpdir_path, True)
        assert df2.baseimage == 'scratch'
        assert df2.labels['foo'] == 'bar'
def test_yuminject_multiline_wrapped_with_chown(tmpdir):
    df_content = """\
FROM fedora
RUN yum install -y --setopt=tsflags=nodocs bind-utils gettext iproute v8314 mongodb24-mongodb mongodb24 && \
    yum clean all && \
    mkdir -p /var/lib/mongodb/data && chown -R mongodb:mongodb /var/lib/mongodb/ && \
    test "$(id mongodb)" = "uid=184(mongodb) gid=998(mongodb) groups=998(mongodb)" && \
    chmod o+w -R /var/lib/mongodb && chmod o+w -R /opt/rh/mongodb24/root/var/lib/mongodb
CMD blabla"""
    df = DockerfileParser(str(tmpdir))
    df.content = df_content

    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(SOURCE, "test-image")
    setattr(workflow, 'builder', X())

    metalink = r'https://mirrors.fedoraproject.org/metalink?repo=fedora-$releasever&arch=$basearch'

    workflow.files[os.path.join(YUM_REPOS_DIR, DEFAULT_YUM_REPOFILE_NAME)] = render_yum_repo(OrderedDict(
        (('name', 'my-repo'),
         ('metalink', metalink),
         ('enabled', 1),
         ('gpgcheck', 0)),
    ))
    setattr(workflow.builder, 'image_id', "asd123")
    setattr(workflow.builder, 'df_path', df.dockerfile_path)
    setattr(workflow.builder, 'df_dir', str(tmpdir))
    setattr(workflow.builder, 'base_image', ImageName(repo='Fedora', tag='21'))
    setattr(workflow.builder, 'git_dockerfile_path', None)
    setattr(workflow.builder, 'git_path', None)
    setattr(workflow.builder, 'source', X())
    setattr(workflow.builder.source, 'dockerfile_path', None)
    setattr(workflow.builder.source, 'path', '')
    runner = PreBuildPluginsRunner(tasker, workflow,
                                   [{'name': InjectYumRepoPlugin.key, 'args': {
                                       "wrap_commands": True
                                   }}])
    runner.run()
    assert InjectYumRepoPlugin.key is not None

    expected_output = """FROM fedora
RUN printf "[my-repo]\nname=my-repo\nmetalink=https://mirrors.fedoraproject.org/metalink?repo=fedora-\\$releasever&arch=\
\\$basearch\nenabled=1\ngpgcheck=0\n" >/etc/yum.repos.d/atomic-reactor-injected.repo && \
yum install -y --setopt=tsflags=nodocs bind-utils gettext iproute v8314 mongodb24-mongodb mongodb24 &&     \
yum clean all &&     mkdir -p /var/lib/mongodb/data && chown -R mongodb:mongodb /var/lib/mongodb/ &&     \
test "$(id mongodb)" = "uid=184(mongodb) gid=998(mongodb) groups=998(mongodb)" &&     \
chmod o+w -R /var/lib/mongodb && chmod o+w -R /opt/rh/mongodb24/root/var/lib/mongodb && \
yum clean all && rm -f /etc/yum.repos.d/atomic-reactor-injected.repo
CMD blabla"""
    assert df.content == expected_output
def bump_release(df_path, branch):
    parser = DockerfileParser(df_path)
    oldrelease = parser.labels["Release"]
    if not oldrelease:
        raise RuntimeError("Dockerfile has no Release label")

    m = re.match(r"(.*\D)?(\d+)", oldrelease)
    if not m:
        raise RuntimeError("Release does not end with number")

    num = int(m.group(2))
    newrelease = "{0}{1:03d}".format(m.group(1), num+1)

    parser.labels["Release"] = newrelease
    return newrelease
Esempio n. 24
0
def test_bump_release_indirect_correct(tmpdir, labelval):
    dflines = ['FROM fedora\n',
               'ENV RELEASE=1\n',
               'LABEL Release={0}\n'.format(labelval)]
    with DFWithRelease(lines=dflines) as (df_path, commit):
        dummy_workflow, dummy_args, runner = prepare(tmpdir, df_path, commit)
        labels_before = DockerfileParser(df_path, env_replace=False).labels

        runner.run()

        parser = DockerfileParser(df_path)
        assert parser.envs['RELEASE'] == '2'

        parser.env_replace = False
        assert parser.labels == labels_before
Esempio n. 25
0
def test_multiple_repourls(tmpdir):
    for df_content in DOCKERFILES.values():
        df = DockerfileParser(str(tmpdir))
        df.lines = df_content.lines_before_add + \
                   df_content.lines_before_remove
        tasker, workflow = prepare(df.dockerfile_path,
                                   df_content.inherited_user)
        filename1 = 'myrepo.repo'
        filename2 = 'repo-2.repo'
        repo_path1 = os.path.join(YUM_REPOS_DIR, filename1)
        repo_path2 = os.path.join(YUM_REPOS_DIR, filename2)
        workflow.files[repo_path1] = repocontent
        workflow.files[repo_path2] = repocontent
        runner = PreBuildPluginsRunner(tasker, workflow, [{
                'name': InjectYumRepoPlugin.key,
                'args': {'wrap_commands': False}}])
        runner.run()

        # Remove the repos/ directory.
        repos_dir = os.path.join(str(tmpdir), RELATIVE_REPOS_PATH)
        for repofile in [filename1, filename2]:
            os.remove(os.path.join(repos_dir, repofile))

        os.rmdir(repos_dir)

        # Examine the Dockerfile.
        newdf = df.lines
        before_add = len(df_content.lines_before_add)
        before_remove = len(df_content.lines_before_remove)

        # Start of file should be unchanged.
        assert newdf[:before_add] == df_content.lines_before_add

        # Should see a single add line.
        after_add = before_add + 1
        assert (newdf[before_add:after_add] ==
                    ["ADD %s* '/etc/yum.repos.d/'\n" % RELATIVE_REPOS_PATH])

        # Lines from there up to the remove line should be unchanged.
        before_remove = after_add + len(df_content.lines_before_remove)
        assert (newdf[after_add:before_remove] ==
                    df_content.lines_before_remove)

        # For the 'rm' line, they could be in either order
        remove = newdf[before_remove:]
        assert remove_lines_match(remove, df_content.remove_lines,
                                  [filename1, filename2])
Esempio n. 26
0
def test_single_repourl(tmpdir):
    for df_content in DOCKERFILES.values():
        df = DockerfileParser(str(tmpdir))
        df.lines = df_content.lines_before_add + \
                   df_content.lines_before_remove
        tasker, workflow = prepare(df.dockerfile_path,
                                   df_content.inherited_user)
        filename = 'test.repo'
        repo_path = os.path.join(YUM_REPOS_DIR, filename)
        workflow.files[repo_path] = repocontent
        runner = PreBuildPluginsRunner(tasker, workflow, [{
                'name': InjectYumRepoPlugin.key,
                'args': {'wrap_commands': False}}])
        runner.run()

        # Was it written correctly?
        repos_dir = os.path.join(str(tmpdir), RELATIVE_REPOS_PATH)
        repofile = os.path.join(repos_dir, filename)
        with open(repofile, "r") as fp:
            assert fp.read() == repocontent

        # Remove the repos/ directory.
        os.remove(repofile)
        os.rmdir(repos_dir)

        # Examine the Dockerfile.
        newdf = df.lines
        before_add = len(df_content.lines_before_add)
        before_remove = len(df_content.lines_before_remove)

        # Start of file should be unchanged.
        assert newdf[:before_add] == df_content.lines_before_add

        # Should see a single add line.
        after_add = before_add + 1
        assert (newdf[before_add:after_add] ==
                    ["ADD %s* '/etc/yum.repos.d/'\n" % RELATIVE_REPOS_PATH])

        # Lines from there up to the remove line should be unchanged.
        before_remove = after_add + len(df_content.lines_before_remove)
        assert (newdf[after_add:before_remove] ==
                df_content.lines_before_remove)

        # The 'rm' lines should match
        # There should be a final 'rm'
        remove = newdf[before_remove:]
        assert remove_lines_match(remove, df_content.remove_lines, [filename])
    def test_get_parent_env(self, tmpdir):
        tmpdir_path = str(tmpdir.realpath())
        p_env = {"bar": "baz"}
        df1 = DockerfileParser(tmpdir_path, env_replace=True, parent_env=p_env)
        df1.lines = [
            "FROM parent\n",
            "ENV foo=\"$bar\"\n",
            "LABEL label=\"$foo $bar\"\n"
        ]

        # Even though we inherit an ENV, this .envs count should only be for the
        # ENVs defined in *this* Dockerfile as we're parsing the Dockerfile and
        # the parent_env is only to satisfy use of inherited ENVs.
        assert len(df1.envs) == 1
        assert df1.envs.get('foo') == 'baz'
        assert len(df1.labels) == 1
        assert df1.labels.get('label') == 'baz baz'
Esempio n. 28
0
def test_no_repourls(tmpdir):
    for df_content in DOCKERFILES.values():
        df = DockerfileParser(str(tmpdir))
        df.lines = df_content.lines_before_add + \
                   df_content.lines_before_remove

        tasker, workflow = prepare(df.dockerfile_path,
                                   df_content.inherited_user)
        runner = PreBuildPluginsRunner(tasker, workflow, [{
            'name': InjectYumRepoPlugin.key,
        }])
        runner.run()
        assert InjectYumRepoPlugin.key is not None

        # Should be unchanged.
        assert df.lines == df_content.lines_before_add + \
                           df_content.lines_before_remove
def wrap_yum_commands(yum_repos, df_path):
    cmd_template = "RUN %(generate_repos)s&& %%(yum_command)s && yum clean all &&%(clean_repos)s"
    generate_repos = ""
    clean_repos = " rm -f"
    for repo, repo_content in yum_repos.items():
        generate_repos += 'printf "%s" >%s ' % (repo_content, repo)
        clean_repos += " %s" % repo

    wrap_cmd = cmd_template % {
        "generate_repos": generate_repos,
        "clean_repos": clean_repos,
    }

    logger.debug("wrap cmd is %s", repr(wrap_cmd))

    df = DockerfileParser(df_path)
    df_content = df.content
    df.content = alter_yum_commands(df_content, wrap_cmd)
def main():
    """lets start our task"""
    # clone the repo
    cleanup(LOCAL_WORK_COPY)
    try:
        r = Repo.clone_from(git_url, LOCAL_WORK_COPY)
    except GitCommandError as git_error:
        print(git_error)
        exit(-1)

    d = feedparser.parse(
        'https://github.com/mattermost/mattermost-server/releases.atom')
    release_version = d.entries[0].title[1:]

    # lets read the dockerfile of the current master
    dfp = DockerfileParser()

    with open('./mattermost-openshift-workdir/Dockerfile') as f:
        dfp.content = f.read()

    if 'MATTERMOST_VERSION' in dfp.envs:
        dockerfile_version = dfp.envs['MATTERMOST_VERSION']

    # Lets check if we got a new release
    if semver.compare(release_version, dockerfile_version) == 1:
        print("Updating from %s to %s" % (dockerfile_version, release_version))

        target_branch = 'bots-life/update-to-' + release_version

        if not pr_in_progress(target_branch):
            patch_and_push(dfp, r, target_branch, release_version)
            cleanup(LOCAL_WORK_COPY)

            create_pr_to_master(target_branch)
        else:
            print("There is an open PR for %s, aborting..." %
                  (target_branch))

    else:
        print("we are even with Mattermost %s, no need to update" %
              release_version)
Esempio n. 31
0
def get_base_image(dockerfile):
    return DockerfileParser(dockerfile).baseimage
Esempio n. 32
0
def write_wave_home_docker(**kwargs):
    """
        Function to Generate the Dockerfile of the worker nodes
    """
    dfp = DockerfileParser(path='home.Dockerfile')
    dfp.content =template_home.format(**kwargs)
Esempio n. 33
0
    def needs_rebuild(self):
        """
        Check whether the commit that we recorded in the distgit content (according to cgit)
        matches the commit of the source (according to git ls-remote) and has been built
        (according to brew). Nothing is cloned and no existing clones are consulted.
        Returns: (<bool>, message). If True, message describing the details is returned. If False,
                None is returned.
        """
        component_name = self.get_component_name(default='')
        if not component_name:
            # This can happen for RPMs if they have never been rebased into
            # distgit.
            return True, 'Could not find component name; assuming never built'

        latest_build = self.get_latest_build(default='')
        if not latest_build:
            # Truly never built.
            return True, f'Component {component_name} has never been built'

        latest_build_creation_event_id = latest_build['creation_event_id']
        with self.runtime.pooled_koji_client_session() as koji_api:
            # getEvent returns something like {'id': 31825801, 'ts': 1591039601.2667}
            latest_build_creation_ts = int(
                koji_api.getEvent(latest_build_creation_event_id)['ts'])

        dgr = self.distgit_repo()
        with Dir(dgr.distgit_dir):
            ts, _ = exectools.cmd_assert('git show -s --format=%ct HEAD')
            distgit_head_commit_millis = int(ts)

        one_hour = (1 * 60 * 60 * 1000)  # in milliseconds

        if not dgr.has_source():
            if distgit_head_commit_millis > latest_build_creation_ts:
                # Two options:
                # 1. A user has made a commit to this dist-git only branch and there has been no build attempt
                # 2. We've already tried a build and the build failed.
                # To balance these two options, if the diff > 1 hour, request a build.
                if (distgit_head_commit_millis -
                        latest_build_creation_ts) > one_hour:
                    return True, 'Distgit only repo commit is at least one hour older than most recent build'
            return False, 'Distgit only repo commit is older than most recent build'

        # We have source.
        with Dir(dgr.source_path()):
            upstream_commit_hash, _ = exectools.cmd_assert(
                'git rev-parse HEAD', strip=True)

        dgr_path = pathlib.Path(dgr.distgit_dir)
        if self.namespace == 'containers' or self.namespace == 'apbs':
            dockerfile_path = dgr_path.joinpath('Dockerfile')
            if not dockerfile_path.is_file():
                return True, 'Distgit dockerfile not found -- appears that no rebase has ever been performed'
            dfp = DockerfileParser(str(dockerfile_path))
            last_disgit_rebase_hash = dfp.envs.get('SOURCE_GIT_COMMIT', None)
            if last_disgit_rebase_hash != upstream_commit_hash:
                return True, f'Distgit contains SOURCE_GIT_COMMIT hash {last_disgit_rebase_hash} different from upstream HEAD {upstream_commit_hash}'
        elif self.namespace == 'rpms':
            specs = list(dgr_path.glob('*.spec'))
            if not specs:
                return True, 'Distgit .spec file not found -- appears that no rebase has ever been performed'
            with specs[0].open(mode='r', encoding='utf-8') as spec_handle:
                spec_content = spec_handle.read()
                if upstream_commit_hash not in spec_content:
                    return True, f'Distgit spec file does not contain upstream hash {upstream_commit_hash}'
        else:
            raise IOError(f'Unknown namespace type: {self.namespace}')

        if distgit_head_commit_millis > latest_build_creation_ts:
            # Distgit is ahead of the latest build.
            # We've likely made an attempt to rebase and the subsequent build failed.
            # Try again if we are at least 6 hours out from the build to avoid
            # pestering image owners will repeated build failures.
            if distgit_head_commit_millis - latest_build_creation_ts > (
                    6 * one_hour):
                return True, 'It has been 6 hours since last failed build attempt'
            return False, f'Distgit commit ts {distgit_head_commit_millis} ahead of last successful build ts {latest_build_creation_ts}, but holding off for at least 6 hours before rebuild'
        else:
            # The latest build is newer than the latest distgit commit. No change required.
            return False, 'Latest build is newer than latest upstream/distgit commit -- no build required'
Esempio n. 34
0
def get_base_image_from_dockerfile(docker_file_path):
    dockerfile_parser = DockerfileParser(docker_file_path)
    base_image, base_image_tag = dockerfile_parser.baseimage.split(":")
    return base_image, base_image_tag
Esempio n. 35
0
def write_heft_docker(**kwargs):
    """
				Function to Generate the Dockerfile of HEFT
		"""
    dfp = DockerfileParser(path='heft.Dockerfile')
    dfp.content = template_heft.format(**kwargs)
def write_circe_computing_worker_docker(**kwargs):
    """
        Function to Generate the Dockerfile of the worker nodes
    """
    dfp = DockerfileParser(path='computing_worker_node.Dockerfile')
    dfp.content =template_computing_worker.format(**kwargs)
def write_circe_worker_nondag(**kwargs):
    """
      Function to Generate the Dockerfile of the worker nodes of Execution Profiler.
    """
    dfp = DockerfileParser(path='nondag_worker.Dockerfile')
    dfp.content =template_nondag_worker.format(**kwargs)
Esempio n. 38
0
 def test_nonseekable_fileobj(self):
     with pytest.raises(AttributeError):
         DockerfileParser(fileobj=sys.stdin)
Esempio n. 39
0
 def test_path_and_fileobj_together(self):
     with pytest.raises(ValueError):
         DockerfileParser(path='.', fileobj=six.StringIO())
def write_exec_home_docker(**kwargs):
    """
      Function to Generate the Dockerfile of the home/master node
    """
    dfp = DockerfileParser(path='exec_home.Dockerfile')
    dfp.content = template_home.format(**kwargs)
Esempio n. 41
0
    def _run_assertions(self, assemblies=False):
        """
        Asserts standard labels are updated
        - version
        - release
        - commit labels
        Asserts that the default doozer injected environment variables are not present
        """
        target_ocp_build_data_commitish = '4c7701c8ad3f045f1fc1be826d55c5205a3e5b76'
        target_version = 'v5.6.777'
        target_release = '999.p0'
        uuid = '0000'
        upstream_commit_oeb = '5397b55f38a11c61749398f0a3759d4eab5b3960'
        upstream_commit_oeb_short = upstream_commit_oeb[:7]
        upstream_commit_ced = '0f7594616f7ea72e28f065ef2c172fa3d852abcf'
        upstream_commit_ced_short = upstream_commit_ced[:7]
        doozer_args = [
            '--assembly',
            'tester',  # This should only have an effect if assemblies=True
            '--group',
            f'openshift-4.6@{target_ocp_build_data_commitish}',
            '-i',
            'openshift-enterprise-base',
            '-i',
            'cluster-etcd-operator',
            '--lock-upstream',
            'openshift-enterprise-base',
            upstream_commit_oeb,
            '--lock-upstream',
            'cluster-etcd-operator',
            upstream_commit_ced,
            '--lock-runtime-uuid',
            uuid,
            'images:rebase',
            '--version',
            target_version,
            '--release',
            '999.p?',
            '-m',
            'test message'
        ]

        if assemblies:
            doozer_args.insert(0, '--enable-assemblies')
            target_release += '.assembly.tester'

        _, _ = self.run_doozer(*doozer_args)

        oeb_dfp = DockerfileParser(
            str(
                self.distgit_image_path('openshift-enterprise-base').joinpath(
                    'Dockerfile')))
        ced_dfp = DockerfileParser(
            str(
                self.distgit_image_path('cluster-etcd-operator').joinpath(
                    'Dockerfile')))

        for dfp in (oeb_dfp, ced_dfp):
            # Check that version /release are being populated
            self.assertEqual(dfp.labels['version'], target_version)
            self.assertEqual(dfp.labels['release'], target_release)
            self.assertEqual(dfp.envs['BUILD_VERSION'], target_version)
            self.assertEqual(dfp.envs['BUILD_RELEASE'], target_release)
            self.assertEqual(dfp.envs['OS_GIT_MAJOR'], '5')
            self.assertEqual(dfp.envs['OS_GIT_MINOR'], '6')
            self.assertEqual(dfp.envs['OS_GIT_PATCH'], '777')

        self.assertEqual(oeb_dfp.envs['OS_GIT_COMMIT'],
                         upstream_commit_oeb_short)
        self.assertEqual(
            oeb_dfp.envs['OS_GIT_VERSION'],
            f'{target_version}-{target_release}-{upstream_commit_oeb_short}'.
            lstrip('v'))
        self.assertEqual(oeb_dfp.envs['SOURCE_GIT_COMMIT'],
                         upstream_commit_oeb)
        self.assertEqual(oeb_dfp.labels['io.openshift.build.commit.id'],
                         upstream_commit_oeb)
        self.assertEqual(oeb_dfp.labels['io.openshift.build.source-location'],
                         'https://github.com/openshift/images')
        self.assertEqual(
            oeb_dfp.labels['io.openshift.build.commit.url'],
            f'https://github.com/openshift/images/commit/{upstream_commit_oeb}'
        )
        self.assertEqual(len(oeb_dfp.parent_images), 1)
        self.assertEqual(oeb_dfp.parent_images[0], 'openshift/ose-base:ubi8')
        self.assertTrue(
            f'{target_version}.{uuid}',
            self.distgit_image_path('openshift-enterprise-base').joinpath(
                'additional-tags').read_text())

        self.assertEqual(ced_dfp.envs['OS_GIT_COMMIT'],
                         upstream_commit_ced_short)
        self.assertEqual(
            ced_dfp.envs['OS_GIT_VERSION'],
            f'{target_version}-{target_release}-{upstream_commit_ced_short}'.
            lstrip('v'))
        self.assertEqual(ced_dfp.envs['SOURCE_GIT_COMMIT'],
                         upstream_commit_ced)
        self.assertEqual(ced_dfp.envs['SOURCE_DATE_EPOCH'], '1603368883')
        self.assertEqual(ced_dfp.labels['io.openshift.build.commit.id'],
                         upstream_commit_ced)
        self.assertEqual(ced_dfp.labels['io.openshift.build.source-location'],
                         'https://github.com/openshift/cluster-etcd-operator')
        self.assertEqual(
            ced_dfp.labels['io.openshift.build.commit.url'],
            f'https://github.com/openshift/cluster-etcd-operator/commit/{upstream_commit_ced}'
        )
        self.assertEqual(ced_dfp.labels['io.openshift.maintainer.product'],
                         'OpenShift Container Platform')
        self.assertEqual(ced_dfp.labels['io.openshift.maintainer.component'],
                         'Etcd')
        self.assertEqual(ced_dfp.labels['com.redhat.component'],
                         'cluster-etcd-operator-container')
        self.assertEqual(ced_dfp.labels['name'],
                         'openshift/ose-cluster-etcd-operator')
        self.assertEqual(ced_dfp.labels['io.openshift.release.operator'],
                         'true')
        self.assertEqual(len(ced_dfp.parent_images), 2)
        self.assertEqual(ced_dfp.parent_images[0],
                         'openshift/golang-builder:rhel_8_golang_1.15')
        self.assertEqual(ced_dfp.parent_images[1],
                         f'openshift/ose-base:{target_version}.{uuid}')
        self.assertTrue(
            f'{target_version}.{uuid}',
            self.distgit_image_path('cluster-etcd-operator').joinpath(
                'additional-tags').read_text())
Esempio n. 42
0
def images_streams_prs(runtime, github_access_token, bug, interstitial, ignore_ci_master, draft_prs, moist_run, add_labels):
    runtime.initialize(clone_distgits=False, clone_source=False)
    g = Github(login_or_token=github_access_token)
    github_user = g.get_user()

    major = runtime.group_config.vars['MAJOR']
    minor = runtime.group_config.vars['MINOR']
    interstitial = int(interstitial)

    master_major, master_minor = extract_version_fields(what_is_in_master(), at_least=2)
    if not ignore_ci_master and (major > master_major or minor > master_minor):
        # ART building a release before is is in master. Too early to open PRs.
        runtime.logger.warning(f'Target {major}.{minor} has not been in master yet (it is tracking {master_major}.{master_minor}); skipping PRs')
        exit(0)

    prs_in_master = (major == master_major and minor == master_minor) and not ignore_ci_master

    pr_links = {}  # map of distgit_key to PR URLs associated with updates
    new_pr_links = {}
    skipping_dgks = set()  # If a distgit key is skipped, it children will see it in this list and skip themselves.
    for image_meta in runtime.ordered_image_metas():
        dgk = image_meta.distgit_key
        logger = image_meta.logger
        logger.info('Analyzing image')

        alignment_prs_config = image_meta.config.content.source.ci_alignment.streams_prs

        if alignment_prs_config and alignment_prs_config.enabled is not Missing and not alignment_prs_config.enabled:
            # Make sure this is an explicit False. Missing means the default or True.
            logger.info('The image has alignment PRs disabled; ignoring')
            continue

        from_config = image_meta.config['from']
        if not from_config:
            logger.info('Skipping PRs since there is no configured .from')
            continue

        desired_parents = []
        builders = from_config.builder or []
        for builder in builders:
            upstream_image = resolve_upstream_from(runtime, builder)
            if not upstream_image:
                logger.warning(f'Unable to resolve upstream image for: {builder}')
                break
            desired_parents.append(upstream_image)

        parent_upstream_image = resolve_upstream_from(runtime, from_config)
        if len(desired_parents) != len(builders) or not parent_upstream_image:
            logger.warning('Unable to find all ART equivalent upstream images for this image')
            continue

        desired_parents.append(parent_upstream_image)
        desired_parent_digest = calc_parent_digest(desired_parents)
        logger.info(f'Found desired FROM state of: {desired_parents} with digest: {desired_parent_digest}')

        source_repo_url, source_repo_branch = _get_upstream_source(runtime, image_meta)

        if not source_repo_url:
            # No upstream to clone; no PRs to open
            continue

        public_repo_url, public_branch = runtime.get_public_upstream(source_repo_url)
        if not public_branch:
            public_branch = source_repo_branch

        # There are two standard upstream branching styles:
        # release-4.x   : CI fast-forwards from master when appropriate
        # openshift-4.x : Upstream team manages completely.
        # For the former style, we may need to open the PRs against master.
        # For the latter style, always open directly against named branch
        if public_branch.startswith('release-') and prs_in_master:
            # TODO: auto-detect default branch for repo instead of assuming master
            public_branch = 'master'

        _, org, repo_name = split_git_url(public_repo_url)

        public_source_repo = g.get_repo(f'{org}/{repo_name}')

        try:
            fork_repo_name = f'{github_user.login}/{repo_name}'
            fork_repo = g.get_repo(fork_repo_name)
        except UnknownObjectException:
            # Repo doesn't exist; fork it
            fork_repo = github_user.create_fork(public_source_repo)

        fork_branch_name = f'art-consistency-{runtime.group_config.name}-{dgk}'
        fork_branch_head = f'{github_user.login}:{fork_branch_name}'

        fork_branch = None
        try:
            fork_branch = fork_repo.get_branch(fork_branch_name)
        except UnknownObjectException:
            # Doesn't presently exist and will need to be created
            pass
        except GithubException as ge:
            # This API seems to return 404 instead of UnknownObjectException.
            # So allow 404 to pass through as well.
            if ge.status != 404:
                raise

        public_repo_url = convert_remote_git_to_ssh(public_repo_url)
        clone_dir = os.path.join(runtime.working_dir, 'clones', dgk)
        # Clone the private url to make the best possible use of our doozer_cache
        runtime.git_clone(source_repo_url, clone_dir)

        with Dir(clone_dir):
            exectools.cmd_assert(f'git remote add public {public_repo_url}')
            exectools.cmd_assert(f'git remote add fork {convert_remote_git_to_ssh(fork_repo.git_url)}')
            exectools.cmd_assert('git fetch --all')

            # The path to the Dockerfile in the target branch
            if image_meta.config.content.source.dockerfile is not Missing:
                # Be aware that this attribute sometimes contains path elements too.
                dockerfile_name = image_meta.config.content.source.dockerfile
            else:
                dockerfile_name = "Dockerfile"

            df_path = Dir.getpath()
            if image_meta.config.content.source.path:
                dockerfile_name = os.path.join(image_meta.config.content.source.path, dockerfile_name)

            df_path = df_path.joinpath(dockerfile_name)

            fork_branch_parent_digest = None
            fork_branch_parents = None
            if fork_branch:
                # If there is already an art reconciliation branch, get an MD5
                # of the FROM images in the Dockerfile in that branch.
                exectools.cmd_assert(f'git checkout fork/{fork_branch_name}')
                fork_branch_parent_digest, fork_branch_parents = extract_parent_digest(df_path)

            # Now change over to the target branch in the actual public repo
            exectools.cmd_assert(f'git checkout public/{public_branch}')

            source_branch_parent_digest, source_branch_parents = extract_parent_digest(df_path)

            if desired_parent_digest == source_branch_parent_digest:
                green_print('Desired digest and source digest match; Upstream is in a good state')
                continue

            yellow_print(f'Upstream dockerfile does not match desired state in {public_repo_url}/blob/{public_branch}/{dockerfile_name}')
            print(f'Desired parents: {desired_parents} ({desired_parent_digest})')
            print(f'Source parents: {source_branch_parents} ({source_branch_parent_digest})')
            print(f'Fork branch digest: {fork_branch_parents} ({fork_branch_parent_digest})')

            first_commit_line = f"Updating {image_meta.name} builder & base images to be consistent with ART"
            reconcile_info = f"Reconciling with {convert_remote_git_to_https(runtime.gitdata.origin_url)}/tree/{runtime.gitdata.commit_hash}/images/{os.path.basename(image_meta.config_filename)}"

            diff_text = None
            if fork_branch_parent_digest != desired_parent_digest:
                # The fork branch either does not exist, or does not have the desired parent image state
                # Let's create a local branch that will contain the Dockerfile in the state we desire.
                work_branch_name = '__mod'
                exectools.cmd_assert(f'git checkout public/{public_branch}')
                exectools.cmd_assert(f'git checkout -b {work_branch_name}')
                with df_path.open(mode='r+') as handle:
                    dfp = DockerfileParser(cache_content=True, fileobj=io.BytesIO())
                    dfp.content = handle.read()
                    dfp.parent_images = desired_parents
                    handle.truncate(0)
                    handle.seek(0)
                    handle.write(dfp.content)

                diff_text, _ = exectools.cmd_assert(f'git diff {str(df_path)}')

                if not moist_run:
                    exectools.cmd_assert(f'git add {str(df_path)}')
                    commit_prefix = ''
                    if repo_name.startswith('kubernetes'):
                        # couple repos have this requirement; openshift/kubernetes & openshift/kubernetes-autoscaler.
                        # This check may suffice  for now, but it may eventually need to be in doozer metadata.
                        commit_prefix = 'UPSTREAM: <carry>: '
                    commit_msg = f"""{commit_prefix}{first_commit_line}
{reconcile_info}
"""
                    exectools.cmd_assert(f'git commit -m "{commit_msg}"')  # Add a commit atop the public branch's current state
                    # Create or update the remote fork branch
                    exectools.cmd_assert(f'git push --force fork {work_branch_name}:{fork_branch_name}')

            # At this point, we have a fork branch in the proper state
            pr_body = f"""{first_commit_line}
{reconcile_info}

If you have any questions about this pull request, please reach out to `@art-team` in the `#aos-art` coreos slack channel.
"""

            parent_pr_url = None
            parent_meta = image_meta.resolve_parent()
            if parent_meta:
                if parent_meta.distgit_key in skipping_dgks:
                    skipping_dgks.add(image_meta.distgit_key)
                    yellow_print(f'Image has parent {parent_meta.distgit_key} which was skipped; skipping self: {image_meta.distgit_key}')
                    continue

                parent_pr_url = pr_links.get(parent_meta.distgit_key, None)
                if parent_pr_url:
                    if parent_meta.config.content.source.ci_alignment.streams_prs.merge_first:
                        skipping_dgks.add(image_meta.distgit_key)
                        yellow_print(f'Image has parent {parent_meta.distgit_key} open PR ({parent_pr_url}) and streams_prs.merge_first==True; skipping PR opening for this image {image_meta.distgit_key}')
                        continue

                    # If the parent has an open PR associated with it, make sure the
                    # child PR notes that the parent PR should merge first.
                    pr_body += f'\nDepends on {parent_pr_url} . Allow it to merge and then run `/test all` on this PR.'

            # Let's see if there is a PR opened
            open_prs = list(public_source_repo.get_pulls(state='open', head=fork_branch_head))
            if open_prs:
                existing_pr = open_prs[0]
                # Update body, but never title; The upstream team may need set something like a Bug XXXX: there.
                # Don't muck with it.

                if alignment_prs_config.auto_label and add_labels:
                    # If we are to automatically add labels to this upstream PR, do so.
                    existing_pr.set_labels(*alignment_prs_config.auto_label)

                existing_pr.edit(body=pr_body)
                pr_url = existing_pr.html_url
                pr_links[dgk] = pr_url
                yellow_print(f'A PR is already open requesting desired reconciliation with ART: {pr_url}')
                continue

            # Otherwise, we need to create a pull request
            if moist_run:
                pr_links[dgk] = f'MOIST-RUN-PR:{dgk}'
                green_print(f'Would have opened PR against: {public_source_repo.html_url}/blob/{public_branch}/{dockerfile_name}.')
                if parent_pr_url:
                    green_print(f'Would have identified dependency on PR: {parent_pr_url}.')
                if diff_text:
                    yellow_print(diff_text)
                else:
                    yellow_print(f'Fork from which PR would be created ({fork_branch_head}) is populated with desired state.')
            else:
                pr_title = first_commit_line
                if bug:
                    pr_title = f'Bug {bug}: {pr_title}'
                new_pr = public_source_repo.create_pull(title=pr_title, body=pr_body, base=public_branch, head=fork_branch_head, draft=draft_prs)
                if alignment_prs_config.auto_label and add_labels:
                    # If we are to automatically add labels to this upstream PR, do so.
                    new_pr.set_labels(*alignment_prs_config.auto_label)
                pr_msg = f'A new PR has been opened: {new_pr.html_url}'
                pr_links[dgk] = new_pr.html_url
                new_pr_links[dgk] = new_pr.html_url
                logger.info(pr_msg)
                yellow_print(pr_msg)
                print(f'Sleeping {interstitial} seconds before opening another PR to prevent flooding prow...')
                time.sleep(interstitial)

    if new_pr_links:
        print('Newly opened PRs:')
        print(yaml.safe_dump(new_pr_links))

    if pr_links:
        print('Currently open PRs:')
        print(yaml.safe_dump(pr_links))
Esempio n. 43
0
def extract_parent_digest(dockerfile_path):
    with dockerfile_path.open(mode='r') as handle:
        dfp = DockerfileParser(cache_content=True, fileobj=io.BytesIO())
        dfp.content = handle.read()
    return calc_parent_digest(dfp.parent_images), dfp.parent_images
Esempio n. 44
0
def get_df_parser(git_uri, git_ref, git_branch=None):
    with checkout_git_repo(git_uri, git_ref, git_branch) as code_dir:
        dfp = DockerfileParser(os.path.join(code_dir), cache_content=True)
    return dfp
Esempio n. 45
0
def run_covscan(cc: CoverityContext) -> bool:

    dg_path = cc.dg_path
    with Dir(dg_path):

        dockerfile_path = dg_path.joinpath('Dockerfile')
        if not dockerfile_path.exists():
            cc.logger.error(
                'Dockerfile does not exist in distgit; not rebased yet?')
            return False

        dfp = DockerfileParser(str(dockerfile_path))

        if cc.are_results_done():
            cc.logger.info(
                f'Scan results already exist for {cc.dg_commit_hash}; skipping scan'
            )
            # Even if it is complete, write a record for Jenkins so that results can be sent to prodsec.
            for i in range(len(dfp.parent_images)):
                records_results(cc,
                                stage_number=i + 1,
                                waived_cov_path_root=None,
                                write_only=True)
            return True

        def compute_parent_tag(parent_image_name):
            parent_sig = hashlib.md5(
                parent_image_name.encode("utf-8")).hexdigest()
            return f'parent-{parent_sig}'

        covscan_df = dg_path.joinpath('Dockerfile.covscan')

        with covscan_df.open(mode='w+') as df_out:

            df_line = 0
            stage_number = 0
            for entry in dfp.structure:

                def append_analysis(stage_number):
                    # We will have monitored compilation processes, but we need to scan for non-compiled code
                    # like python / nodejs.
                    # cov-capture will search for files like .js, typescript, python in the source directory;
                    # cov-analyze will then search for issues within those non-compiled files.
                    # https://community.synopsys.com/s/question/0D52H000054zcvZSAQ/i-would-like-to-know-the-coverity-static-analysis-process-for-node-js-could-you-please-provide-me-the-sample-steps-to-run-coverity-for-node-js

                    # Why cov-manage-emit?
                    # coverity requires a consistent hostname for each tool run. podman does not allow the hostname to be
                    # set and it varies over the course of the build. That is why we reset the hostname in emit before each
                    # tool run.

                    container_stage_cov_dir = str(
                        cc.container_stage_cov_path(stage_number))

                    analysis_script_name = f'_gen_{cc.image.image_name_short}_stage_{stage_number}_analysis.sh'
                    with open(dg_path.joinpath(analysis_script_name),
                              mode='w+',
                              encoding='utf-8') as sh:
                        sh.write(f'''
#!/bin/sh
set -o xtrace
set -eo pipefail

if [[ -f "{container_stage_cov_dir}/all_results.js" ]]; then
    echo "Results have already been analyzed for this this stage -- found all_results.js; skipping analysis"
    exit 0
fi

if [[ "{stage_number}" == "1" ]]; then
    # hostname changes between steps in the Dockerfile; reset to current before running coverity tools.
    # use || true because it is possible nothing has been emitted before this step
    cov-manage-emit --dir={container_stage_cov_dir} reset-host-name || true
    echo "Running un-compiled source search as hostname: $(hostname)"
    timeout 3h cov-capture --dir {container_stage_cov_dir} --source-dir /covscan-src || echo "Error running source detection"
fi

if ls {container_stage_cov_dir}/emit/*/config; then
    echo "Running analysis phase as hostname: $(hostname)"
    # hostname changes between steps in the Dockerfile; reset to current before running coverity tools.
    cov-manage-emit --dir={container_stage_cov_dir} reset-host-name || true
    if timeout 3h cov-analyze  --dir={container_stage_cov_dir} "--wait-for-license" "-co" "ASSERT_SIDE_EFFECT:macro_name_lacks:^assert_(return|se)\\$" "-co" "BAD_FREE:allow_first_field:true" "--include-java" "--fb-max-mem=4096" "--all" "--security" "--concurrency" --allow-unmerged-emits > /tmp/analysis.txt 2>&1 ; then
        echo "Analysis completed successfully"
        cat /tmp/analysis.txt
    else
        # In some cases, no translation units were emitted and analyze will exit with an error; ignore that error
        # if it is because nothing was emitted.
        cat /tmp/analysis.txt
        if cat /tmp/analysis.txt | grep "contains no translation units"; then
            echo "Nothing was emitted; ignoring analyze failure."
            exit 0
        else
            echo "Analysis failed for unknown reason!"
            exit 1
        fi
    fi
    cov-format-errors --json-output-v2 /dev/stdout --dir={container_stage_cov_dir} > {container_stage_cov_dir}/{COVSCAN_ALL_JS_FILENAME}
else
    echo "No units have been emitted for analysis by this stage; skipping analysis"
fi
''')
                    df_out.write(f'''
ADD {analysis_script_name} /
RUN chmod +x /{analysis_script_name}
# Finally, run the analysis step script.
# Route stderr to stdout so everything is in one stream; otherwise, it is hard to correlate a command with its stderr.
RUN /{analysis_script_name} 2>&1
''')

                    # Before running cov-analyze, make sure that all_js doesn't exist (i.e. we haven't already run it
                    # in this workspace AND summary.txt exist (i.e. at least one item in this stage emitted results).
                    df_out.write(f'''
# Dockerfile steps run as root; chang permissions back to doozer user before leaving stage
RUN chown -R {os.getuid()}:{os.getgid()} {container_stage_cov_dir}
''')

                df_line += 1
                content = entry['content']
                instruction = entry['instruction'].upper()

                if instruction == 'USER':
                    # Stay as root
                    continue

                if instruction == 'FROM':
                    stage_number += 1

                    if stage_number > 1:
                        # We are about to transition stages, do the analysis first.
                        append_analysis(stage_number - 1)

                    image_name_components = content.split(
                    )  # [ 'FROM', image-name, (possible 'AS', ...) ]
                    image_name = image_name_components[1]
                    parent_tag = compute_parent_tag(image_name)
                    if not _covscan_prepare_parent(cc, image_name, parent_tag):
                        return False

                    image_name_components[1] = parent_tag
                    df_out.write(' '.join(image_name_components) + '\n')
                    # Label these images so we can find a delete them later
                    df_out.write(
                        f'LABEL DOOZER_COVSCAN_RUNNER={cc.runtime.group_config.name}\n'
                    )
                    df_out.write(
                        f'LABEL DOOZER_COVSCAN_COMPONENT={cc.image.distgit_key}\n'
                    )
                    df_out.write(
                        'ENTRYPOINT []\n'
                    )  # Ensure all invocations use /bin/sh -c, the default docker entrypoint
                    df_out.write(
                        'USER 0\n')  # Just make sure all images are consistent

                    # Each stage will have its own cov output directory
                    df_out.write(f'''
RUN mkdir -p {cc.container_stage_cov_path(stage_number)}
# If we are reusing a workspace, coverity cannot pick up where it left off; clear anything already emitted
RUN rm -rf {cc.container_stage_cov_path(stage_number)}/emit
''')

                    # For each new stage, we also need to make sure we have the appropriate repos enabled for this image
                    df_out.write(f'''
# Ensure that the build process can access the same RPMs that the build can during a brew build
RUN curl {cc.image.cgit_file_url(".oit/" + cc.repo_type + ".repo")} --output /etc/yum.repos.d/oit.repo 2>&1
''')
                    continue

                if instruction in ('ENTRYPOINT', 'CMD'):
                    df_out.write(f'# Disabling: {content}')
                    continue

                if instruction == 'RUN':
                    container_stage_cov_dir = str(
                        cc.container_stage_cov_path(stage_number))

                    # For RUN commands, we need to execute the command under the watchful eye of coverity
                    # tools. Create a batch file that will wrap the command
                    command_to_run = content.strip()[4:]  # Remove 'RUN '
                    temp_script_name = f'_gen_{cc.image.image_name_short}_stage_{stage_number}_line_{df_line}.sh'
                    with open(dg_path.joinpath(temp_script_name),
                              mode='w+',
                              encoding='utf-8') as sh:
                        sh.write(f'''
#!/bin/sh
set -o xtrace
set -eo pipefail
echo "Running build as hostname: $(hostname)"
{command_to_run}
''')
                    df_out.write(f'''
ADD {temp_script_name} .
RUN chmod +x {temp_script_name}
# Finally, run the script while coverity is watching. If there is already a summary file, assume we have already run
# the build in this working directory.
# The hostname changes with each run, so reset-host-name before cov-build.
# Route stderr to stdout so everything is in one stream; otherwise, it is hard to tell which command failed.
RUN cov-manage-emit --dir={container_stage_cov_dir} reset-host-name; timeout 3h cov-build --dir={container_stage_cov_dir} ./{temp_script_name} 2>&1
''')
                else:  # e.g. COPY, ENV, WORKDIR...
                    # Just pass it straight through to the covscan Dockerfile
                    df_out.write(f'{content}\n')

            append_analysis(stage_number)

        # The dockerfile which will run the coverity builds and analysis for each stage has been created.
        # Now, run the build (and execute those steps). The output will be to <cov_path>/<stage_number>
        run_tag = f'{cc.image.image_name_short}_{cc.runtime.group_config.name}'
        rc, stdout, stderr = exectools.cmd_gather(
            f'{cc.podman_cmd} build -v {str(cc.cov_root_path)}:/cov:z -v {str(dg_path)}:/covscan-src:z -t {run_tag} -f {str(covscan_df)} {str(dg_path)}',
            set_env=cc.podman_env)
        cc.logger.info(f'''Output from covscan build for {cc.image.distgit_key}
stdout: {stdout}
stderr: {stderr}

''')

        _, cleanup_out, cleanup_err = exectools.cmd_gather(
            f'{cc.podman_cmd} rmi -f {run_tag}', set_env=cc.podman_env)
        cc.logger.info(f'''Output from image clean up {cc.image.distgit_key}
stdout: {cleanup_out}
stderr: {cleanup_err}
''')

        if rc != 0:
            cc.logger.error(
                f'Error running covscan build for {cc.image.distgit_key} ({str(covscan_df)})'
            )
            # TODO: log this as a record and make sure the pipeline warns artist
            return False

        # For each stage, let's now compute diffs & store results
        waived_cov_path_root = cc.find_nearest_waived_cov_root_path()
        for i in range(len(dfp.parent_images)):
            records_results(cc,
                            stage_number=i + 1,
                            waived_cov_path_root=waived_cov_path_root)

    return True
Esempio n. 46
0
def df_parser(df_path,
              workflow=None,
              cache_content=False,
              env_replace=True,
              parent_env=None):
    """
    Wrapper for dockerfile_parse's DockerfileParser that takes into account
    parent_env inheritance.

    :param df_path: string, path to Dockerfile (normally in DockerBuildWorkflow instance)
    :param workflow: DockerBuildWorkflow object instance, used to find parent image information
    :param cache_content: bool, tells DockerfileParser to cache Dockerfile content
    :param env_replace: bool, replace ENV declarations as part of DockerfileParser evaluation
    :param parent_env: dict, parent ENV key:value pairs to be inherited

    :return: DockerfileParser object instance
    """

    p_env = {}

    if parent_env:
        # If parent_env passed in, just use that
        p_env = parent_env

    elif workflow:

        # If parent_env is not provided, but workflow is then attempt to inspect
        # the workflow for the parent_env

        try:
            parent_config = workflow.base_image_inspect[INSPECT_CONFIG]
        except (AttributeError, TypeError, KeyError):
            logger.debug("base image unable to be inspected")
        else:
            try:
                tmp_env = parent_config["Env"]
                logger.debug("Parent Config ENV: %s" % tmp_env)

                if isinstance(tmp_env, dict):
                    p_env = tmp_env
                elif isinstance(tmp_env, list):
                    try:
                        for key_val in tmp_env:
                            key, val = key_val.split("=")
                            p_env[key] = val

                    except ValueError:
                        logger.debug(
                            "Unable to parse all of Parent Config ENV")

            except KeyError:
                logger.debug(
                    "Parent Environment not found, not applied to Dockerfile")

    try:
        dfparser = DockerfileParser(df_path,
                                    cache_content=cache_content,
                                    env_replace=env_replace,
                                    parent_env=p_env)
    except TypeError:
        logger.debug(
            "Old version of dockerfile-parse detected, unable to set inherited parent ENVs"
        )
        dfparser = DockerfileParser(
            df_path,
            cache_content=cache_content,
            env_replace=env_replace,
        )

    return dfparser
def write_circe_controller_nondag(**kwargs):
    """
        Function to Generate the Dockerfile of the worker nodes
    """
    dfp = DockerfileParser(path='controller_nondag_node.Dockerfile')
    dfp.content =template_nondag.format(**kwargs)
def write_profiler_worker_docker(**kwargs):
    """
        Function to Generate the Dockerfile of the worker nodes
    """
    dfp = DockerfileParser(path='profiler_worker.Dockerfile')
    dfp.content =template_worker.format(**kwargs)
Esempio n. 49
0
    def validate_file(self, f: Path):
        path = f.relative_to(BASE_DIR)
        self._error(f.suffix != '.yaml', f'file {path} has .yaml extension')
        self._error(f.name != '.gitkeep', f'{path} found, should be named .keep')

        if f.name == 'docker-compose.yml':
            with f.open() as file:
                dc = yaml.safe_load(file)

            for opt in DC_REQUIRED_OPTIONS:
                self._error(opt in dc, f'required option {opt} not in {path}')

            dc_version = float(dc['version'])
            self._error(
                2.4 <= dc_version < 3,
                f'invalid version in {path}, need >=2.4 and <3, got {dc_version}',
            )

            for opt in dc:
                self._error(
                    opt in DC_ALLOWED_OPTIONS,
                    f'option {opt} in {path} is not allowed',
                )

            services = []
            databases = []
            proxies = []
            dependencies = defaultdict(list)

            for container, container_conf in dc['services'].items():
                for opt in CONTAINER_REQUIRED_OPTIONS:
                    self._error(
                        opt in container_conf,
                        f'required option {opt} not in {path} for container {container}',
                    )

                for opt in container_conf:
                    self._error(
                        opt in CONTAINER_ALLOWED_OPTIONS,
                        f'option {opt} in {path} is not allowed for container {container}',
                    )

                if self._error(
                        'image' not in container_conf or 'build' not in container_conf,
                        f'both image and build options in {path} for container {container}'):
                    continue

                if 'image' in container_conf:
                    image = container_conf['image']
                else:
                    build = container_conf['build']
                    if isinstance(build, str):
                        dockerfile = f.parent / build / 'Dockerfile'
                    else:
                        context = build['context']
                        if 'dockerfile' in build:
                            dockerfile = f.parent / context / build['dockerfile']
                        else:
                            dockerfile = f.parent / context / 'Dockerfile'

                    with dockerfile.open() as file:
                        dfp = DockerfileParser(fileobj=file)
                        image = dfp.baseimage

                if 'depends_on' in container_conf:
                    for dependency in container_conf['depends_on']:
                        dependencies[container].append(dependency)

                is_service = True
                for database in DATABASES:
                    if database in image:
                        databases.append(container)
                        is_service = False

                for proxy in PROXIES:
                    if proxy in image:
                        proxies.append(container)
                        is_service = False

                for cleaner in CLEANERS:
                    if cleaner in image:
                        is_service = False

                if is_service:
                    services.append(container)
                    for opt in SERVICE_REQUIRED_OPTIONS:
                        self._warning(
                            opt in container_conf,
                            f'required option {opt} not in {path} for service {container}',
                        )

                    for opt in container_conf:
                        self._warning(
                            opt in SERVICE_ALLOWED_OPTIONS,
                            f'option {opt} in {path} is not allowed for service {container}',
                        )

            for service in services:
                for database in databases:
                    self._warning(
                        service in dependencies and database in dependencies[service],
                        f'service {service} may need to depends_on database {database}')

            for proxy in proxies:
                for service in services:
                    self._warning(
                        proxy in dependencies and service in dependencies[proxy],
                        f'proxy {proxy} may need to depends_on service {service}')
def write_profiler_home_docker(**kwargs):
    """
        Function to Generate the Dockerfile of the home/master node of Network Profiler.
    """
    dfp = DockerfileParser(path='profiler_home.Dockerfile')
    dfp.content =template_home.format(**kwargs)
Esempio n. 51
0
def command_sanity(args):
    "Implements the sanity subcommand"
    # Sanity check should:
    #   - Check docker versions
    #   - Check for existance of entrypoint (that isn't /bin/bash)
    #   - Check over copy commands // make sure they look reasonable.
    #   - Check the entrypoint if it's a shell script for keywords like "sudo"
    #     or chroot.
    if not args.skip_environment:
        logging.info("Checking local docker demon...")
        conn = utils.docker_client(args)
        docker_version = conn.version()
        logging.info("Docker version: %s", docker_version["Version"])

    if "docker_file" in args and args.docker_file is not None:
        try:
            docker_file = DockerfileParser(args.docker_file)
        except:
            logging.error("Could not parse Dockerfile at: %s",
                          args.docker_file)
        else:
            structure = docker_file.structure
            seen_entrypoint = False
            for cmd in structure:
                if cmd['instruction'].lower() == 'copy':
                    if not cmd['value'].startswith('/'):
                        logging.warn(
                            'Line %(lineno)s: Copy destination should always '
                            'start with a /.', {
                                "lineno": cmd['startline'],
                            })
                if cmd['instruction'].lower() == 'from':
                    if "ubuntu" in cmd['value']:
                        logging.info(
                            'Line %(lineno)s: We recommend using '
                            'debian, or other smaller base images.', {
                                "lineno": cmd['startline'],
                            })
                if cmd['instruction'].lower() == 'entrypoint':
                    if seen_entrypoint:
                        logging.warn(
                            'Line %(lineno)s: Re-defining entrypoint '
                            'of container.', {
                                "lineno": cmd['startline'],
                            })
                    seen_entrypoint = True
                    if 'bash' in cmd['value']:
                        logging.warn(
                            'Line %(lineno)s: Please mark your grading script '
                            'or binary as the ENTRYPOINT, and not bash', {
                                'lineno': cmd['startline'],
                            })
                if cmd['instruction'].lower() == 'expose':
                    logging.warn(
                        'Line %(lineno)s: EXPOSE commands do not work for '
                        'graders', {
                            'lineno': cmd['startline'],
                        })
                if cmd['instruction'].lower() == 'env':
                    logging.warn(
                        'Line %(lineno)s: ENV-based environment variables are '
                        'stripped in the production environment for security '
                        'reasons. Please set any environment variables you '
                        'need in your grading script.', {
                            'lineno': cmd['startline'],
                        })
                if cmd['instruction'].lower() == 'volume':
                    logging.warn(
                        'Line %(lineno)s: VOLUME commands are stripped in '
                        'the production environment, and will likely not work '
                        'as expected.', {
                            'lineno': cmd['startline'],
                        })
            if not seen_entrypoint:
                logging.warn('Your Dockerfile must define an ENTRYPOINT.')
    else:
        logging.info("No Dockerfile provided... skipping file checks.")
Esempio n. 52
0
def get_repo_info(git_uri, git_ref, git_branch=None):
    with checkout_git_repo(git_uri, git_ref, git_branch) as code_dir:
        dfp = DockerfileParser(os.path.join(code_dir), cache_content=True)
        config = RepoConfiguration(dir_path=code_dir)
        tags_config = AdditionalTagsConfig(dir_path=code_dir)
    return RepoInfo(dfp, config, tags_config)
Esempio n. 53
0
def write_decoupled_pricing_circe_compute_home_docker(**kwargs):
    """
        Function to Generate the Dockerfile of the home/master node of CIRCE
    """
    dfp = DockerfileParser(path='home_compute.Dockerfile')
    dfp.content =template_home_compute.format(**kwargs)
Esempio n. 54
0
def write_decoupled_pricing_circe_compute_worker_docker(**kwargs):
    """
        Function to Generate the Dockerfile of the worker nodes
    """
    dfp = DockerfileParser(path='worker_compute.Dockerfile')
    dfp.content =template_worker_compute.format(**kwargs)
    def run(self):
        """
        run the plugin
        """
        try:
            config = self.workflow.base_image_inspect["Config"]
        except (AttributeError, TypeError):
            message = "base image was not inspected"
            self.log.error(message)
            raise RuntimeError(message)
        else:
            base_image_labels = config["Labels"] or {}

        dockerfile = DockerfileParser(self.workflow.builder.df_path)
        lines = dockerfile.lines

        self.add_aliases(base_image_labels, dockerfile.labels, self.labels)

        # correct syntax is:
        #   LABEL "key"="value" "key2"="value2"

        # Make sure to escape '\' and '"' characters.
        try:
            # py3
            env_trans = str.maketrans({'\\': '\\\\', '"': '\\"'})
        except AttributeError:
            # py2
            env_trans = None

        def escape(s):
            if env_trans:
                return s.translate(env_trans)
            return s.replace('\\', '\\\\').replace('"', '\\"')

        labels = []
        for key, value in self.labels.items():
            try:
                base_image_value = base_image_labels[key]
            except KeyError:
                self.log.info("label %r not present in base image", key)
            else:
                if base_image_value == value:
                    self.log.info("label %r is already set to %r", key, value)
                    continue
                else:
                    self.log.info("base image has label %r set to %r", key,
                                  base_image_value)
                    if key in self.dont_overwrite:
                        self.log.info("denying overwrite of label %r", key)
                        continue

            label = '"%s"="%s"' % (escape(key), escape(value))
            self.log.info("setting label %r", label)
            labels.append(label)

        content = ""
        if labels:
            content = 'LABEL ' + " ".join(labels)
            # put labels at the end of dockerfile (since they change metadata and do not interact
            # with FS, this should cause no harm)
            lines.append('\n' + content + '\n')
            dockerfile.lines = lines

        return content
Esempio n. 56
0
    def extract_kube_env_vars(self) -> Dict[str, str]:
        """
        Analyzes the source_base_dir for the hyperkube Dockerfile in which the release's k8s version
        is defined. Side effect is cloning distgit
        and upstream source if it has not already been done.
        :return: A Dict of environment variables that should be added to the Dockerfile / rpm spec.
                Variables like KUBE_GIT_VERSION, KUBE_GIT_COMMIT, KUBE_GIT_MINOR, ...
                May be empty if there is no kube information in the source dir.
        """
        envs = dict()
        upstream_source_path: pathlib.Path = pathlib.Path(self.runtime.resolve_source(self))
        if not upstream_source_path:
            # distgit only. Return empty.
            return envs

        with Dir(upstream_source_path):
            out, _ = exectools.cmd_assert(["git", "rev-parse", "HEAD"])
            source_full_sha = out

        use_path = None
        path_4x = upstream_source_path.joinpath('openshift-hack/images/hyperkube/Dockerfile.rhel')  # for >= 4.6: https://github.com/openshift/kubernetes/blob/fcff70a54d3f0bde19e879062e8f1489ba5d0cb0/openshift-hack/images/hyperkube/Dockerfile.rhel#L16
        if path_4x.exists():
            use_path = path_4x

        path_3_11 = upstream_source_path.joinpath('images/hyperkube/Dockerfile.rhel')  # for 3.11: https://github.com/openshift/ose/blob/master/images/hyperkube/Dockerfile.rhel
        if not use_path and path_3_11.exists():
            use_path = path_3_11

        kube_version_fields = []
        if use_path:
            dfp = DockerfileParser(cache_content=True, fileobj=io.BytesIO(use_path.read_bytes()))
            build_versions = dfp.labels.get('io.openshift.build.versions', None)
            if not build_versions:
                raise IOError(f'Unable to find io.openshift.build.versions label in {str(use_path)}')

            # Find something like kubernetes=1.22.1 and extract version as group
            m = re.match(r"^.*[^\w]*kubernetes=([\d.]+).*", build_versions)
            if not m:
                raise IOError(f'Unable to find `kubernetes=...` in io.openshift.build.versions label from {str(use_path)}')

            base_kube_version = m.group(1).lstrip('v')
            kube_version_fields = base_kube_version.split('.')  # 1.17.1 => [ '1', '17', '1']

            # upstream kubernetes creates a tag for each version. Go find its sha.
            rc, out, err = exectools.cmd_gather(f'git ls-remote https://github.com/kubernetes/kubernetes v{base_kube_version}')
            out = out.strip()
            if rc == 0 and out:
                # Expecting something like 'a26dc584ac121d68a8684741bce0bcba4e2f2957	refs/tags/v1.19.0-rc.2'
                kube_commit_hash = out.split()[0]
            else:
                # That's strange, but let's not kill the build for it.  Poke in our repo's hash.
                self.logger.warning(f'Unable to find upstream git tag v{base_kube_version} in https://github.com/kubernetes/kubernetes')
                kube_commit_hash = source_full_sha

        if kube_version_fields:
            # For historical consistency with tito's flow, we add +OS_GIT_COMMIT[:7] to the kube version
            envs['KUBE_GIT_VERSION'] = f"v{'.'.join(kube_version_fields)}+{source_full_sha[:7]}"
            envs['KUBE_GIT_MAJOR'] = '0' if len(kube_version_fields) < 1 else kube_version_fields[0]
            godep_kube_minor = '0' if len(kube_version_fields) < 2 else kube_version_fields[1]
            envs['KUBE_GIT_MINOR'] = f'{godep_kube_minor}+'  # For historical reasons, append a '+' since OCP patches its vendored kube.
            envs['KUBE_GIT_COMMIT'] = kube_commit_hash
            envs['KUBE_GIT_TREE_STATE'] = 'clean'
        elif self.name in ('openshift-enterprise-hyperkube', 'openshift', 'atomic-openshift'):
            self.logger.critical(f'Unable to acquire KUBE vars for {self.name}. This must be fixed or platform addons can break: https://bugzilla.redhat.com/show_bug.cgi?id=1861097')
            raise IOError(f'Unable to determine KUBE vars for {self.name}')

        return envs
def set_initial_release(df_path, branch):
    parser = DockerfileParser(df_path)
    oldrelease = parser.labels.get("Release", "1")
    newrelease = "{}.{}.iteration001".format(oldrelease, branch)
    parser.labels["Release"] = newrelease
    return newrelease
#!/usr/bin/env python

import json
import re
import glob
import os
import markdown
from pprint import pprint
from dockerfile_parse import DockerfileParser

apps = []

dfp = DockerfileParser()

# Find all Dockerfiles

for dockerfile in glob.glob('../docker-images/*/Dockerfile', recursive=True):
    app_name = dockerfile.split("/")[-2]
    print("Found " + app_name)
    readme_file = os.path.dirname(dockerfile) + "/README.md"

    file = open(dockerfile, "r")

    dfp.content = file.read()

    readme = open(readme_file, "r")
    readme_text = readme.read()

    labels = dfp.labels

    app = {}
Esempio n. 59
0
def images_streams_gen_buildconfigs(runtime, streams, output, as_user, apply, live_test_mode):
    """
    ART has a mandate to make "ART equivalent" images available usptream for CI workloads. This enables
    CI to compile with the same golang version ART is using and use identical UBI8 images, etc. To accomplish
    this, streams.yml contains metadata which is extraneous to the product build, but critical to enable
    a high fidelity CI signal.
    It may seem at first that all we would need to do was mirror the internal brew images we use
    somewhere accessible by CI, but it is not that simple:
    1. When a CI build yum installs, it needs to pull RPMs from an RPM mirroring service that runs in
       CI. That mirroring service subsequently pulls and caches files ART syncs using reposync.
    2. There is a variation of simple golang builders that CI uses to compile test cases. These
       images are configured in ci-operator config's 'build_root' and they are used to build
       and run test cases. Sometimes called 'CI release' image, these images contain tools that
       are not part of the typical golang builder (e.g. tito).
    Both of these differences require us to 'transform' the image ART uses in brew into an image compatible
    for use in CI. A challenge to this transformation is that they must be performed in the CI context
    as they depend on the services only available in ci (e.g. base-4-6-rhel8.ocp.svc is used to
    find the current yum repo configuration which should be used).
    To accomplish that, we don't build the images ourselves. We establish OpenShift buildconfigs on the CI
    cluster which process intermediate images we push into the final, CI consumable image.
    These buildconfigs are generated dynamically by this sub-verb.
    The verb will also create a daemonset for the group on the CI cluster. This daemonset overcomes
    a bug in OpenShift 3.11 where the kubelet can garbage collection an image that the build process
    is about to use (because the kubelet & build do not communicate). The daemonset forces the kubelet
    to know the image is in use. These daemonsets can like be eliminated when CI infra moves fully to
    4.x.
    """
    runtime.initialize(clone_distgits=False, clone_source=False)
    runtime.assert_mutation_is_permitted()

    # Record whether this is for all streams or just user specified
    all_streams = not streams

    if not streams:
        # If not specified, use all.
        streams = runtime.get_stream_names()

    transform_rhel_7_base_repos = 'rhel-7/base-repos'
    transform_rhel_8_base_repos = 'rhel-8/base-repos'
    transform_rhel_7_golang = 'rhel-7/golang'
    transform_rhel_8_golang = 'rhel-8/golang'
    transform_rhel_7_ci_build_root = 'rhel-7/ci-build-root'
    transform_rhel_8_ci_build_root = 'rhel-8/ci-build-root'

    # The set of valid transforms
    transforms = set([
        transform_rhel_7_base_repos,
        transform_rhel_8_base_repos,
        transform_rhel_7_golang,
        transform_rhel_8_golang,
        transform_rhel_7_ci_build_root,
        transform_rhel_8_ci_build_root,
    ])

    major = runtime.group_config.vars['MAJOR']
    minor = runtime.group_config.vars['MINOR']

    rpm_repos_conf = runtime.group_config.repos or {}

    group_label = runtime.group_config.name
    if live_test_mode:
        group_label += '.test'

    buildconfig_definitions = []
    ds_container_definitions = []
    streams_config = runtime.streams
    for stream in streams:
        if streams_config[stream] is Missing:
            raise IOError(f'Did not find stream {stream} in streams.yml for this group')

        config = streams_config[stream]

        transform = config.transform
        if transform is Missing:
            # No buildconfig is necessary
            continue

        if transform not in transforms:
            raise IOError(f'Unable to render buildconfig for stream {stream} - transform {transform} not found within {transforms}')

        upstream_dest = config.upstream_image
        upstream_intermediate_image = config.upstream_image_base
        if upstream_dest is Missing or upstream_intermediate_image is Missing:
            raise IOError(f'Unable to render buildconfig for stream {stream} - you must define upstream_image_base AND upstream_image')

        # split a pullspec like registry.svc.ci.openshift.org/ocp/builder:rhel-8-golang-openshift-{MAJOR}.{MINOR}.art
        # into  OpenShift namespace, imagestream, and tag
        _, intermediate_ns, intermediate_istag = upstream_intermediate_image.rsplit('/', maxsplit=2)
        if live_test_mode:
            intermediate_istag += '.test'
        intermediate_imagestream, intermediate_tag = intermediate_istag.split(':')

        _, dest_ns, dest_istag = upstream_dest.rsplit('/', maxsplit=2)
        if live_test_mode:
            dest_istag += '.test'
        dest_imagestream, dest_tag = dest_istag.split(':')

        python_file_dir = os.path.dirname(__file__)

        # should align with files like: doozerlib/cli/streams_transforms/rhel-7/base-repos
        transform_template = os.path.join(python_file_dir, 'streams_transforms', transform, 'Dockerfile')
        with open(transform_template, mode='r', encoding='utf-8') as tt:
            transform_template_content = tt.read()

        dfp = DockerfileParser(cache_content=True, fileobj=io.BytesIO())
        dfp.content = transform_template_content

        # Make sure that upstream images can discern they are building in CI with ART equivalent images
        dfp.envs['OPENSHIFT_CI'] = 'true'

        dfp.labels['io.k8s.display-name'] = f'{dest_imagestream}-{dest_tag}'
        dfp.labels['io.k8s.description'] = f'ART equivalent image {group_label}-{stream} - {transform}'

        def add_localdev_repo_profile(profile):
            """
            The images we push to CI are used in two contexts:
            1. In CI builds, running on the CI clusters.
            2. In local development (e.g. docker build).
            This method is enabling the latter. If a developer is connected to the VPN,
            they will not be able to resolve RPMs through the RPM mirroring service running
            on CI, but they will be able to pull RPMs directly from the sources ART does.
            Since skip_if_unavailable is True globally, it doesn't matter if they can't be
            accessed via CI.
            """
            for repo_name in rpm_repos_conf.keys():
                repo_desc = rpm_repos_conf[repo_name]
                localdev_repo_name = f'localdev-{repo_name}'
                repo_conf = repo_desc.conf
                ci_alignment = repo_conf.ci_alignment
                if ci_alignment.localdev.enabled and profile in ci_alignment.profiles:
                    # CI only really deals with with x86_64 at this time.
                    if repo_conf.baseurl.unsigned:
                        x86_64_url = repo_conf.baseurl.unsigned.x86_64
                    else:
                        x86_64_url = repo_conf.baseurl.x86_64
                    if not x86_64_url:
                        raise IOError(f'Expected x86_64 baseurl for repo {repo_name}')
                    dfp.add_lines(f"RUN echo -e '[{localdev_repo_name}]\\nname = {localdev_repo_name}\\nid = {localdev_repo_name}\\nbaseurl = {x86_64_url}\\nenabled = 1\\ngpgcheck = 0\\n' > /etc/yum.repos.d/{localdev_repo_name}.repo")

        if transform == transform_rhel_8_base_repos or config.transform == transform_rhel_8_golang:
            # The repos transform create a build config that will layer the base image with CI appropriate yum
            # repository definitions.
            dfp.add_lines(f'RUN rm -rf /etc/yum.repos.d/*.repo && curl http://base-{major}-{minor}-rhel8.ocp.svc > /etc/yum.repos.d/ci-rpm-mirrors.repo')

            # Allow the base repos to be used BEFORE art begins mirroring 4.x to openshift mirrors.
            # This allows us to establish this locations later -- only disrupting CI for those
            # components that actually need reposync'd RPMs from the mirrors.
            dfp.add_lines('RUN yum config-manager --setopt=skip_if_unavailable=True --save')
            add_localdev_repo_profile('el8')

        if transform == transform_rhel_7_base_repos or config.transform == transform_rhel_7_golang:
            # The repos transform create a build config that will layer the base image with CI appropriate yum
            # repository definitions.
            dfp.add_lines(f'RUN rm -rf /etc/yum.repos.d/*.repo && curl http://base-{major}-{minor}.ocp.svc > /etc/yum.repos.d/ci-rpm-mirrors.repo')
            # Allow the base repos to be used BEFORE art begins mirroring 4.x to openshift mirrors.
            # This allows us to establish this locations later -- only disrupting CI for those
            # components that actually need reposync'd RPMs from the mirrors.
            dfp.add_lines("RUN yum-config-manager --save '--setopt=*.skip_if_unavailable=True'")
            add_localdev_repo_profile('el7')

        # We've arrived at a Dockerfile.
        dockerfile_content = dfp.content

        # Now to create a buildconfig for it.
        buildconfig = {
            'apiVersion': 'v1',
            'kind': 'BuildConfig',
            'metadata': {
                'name': f'{dest_imagestream}-{dest_tag}--art-builder',
                'namespace': 'ci',
                'labels': {
                    'art-builder-group': group_label,
                    'art-builder-stream': stream,
                },
                'annotations': {
                    'description': 'Generated by the ART pipeline by doozer. Processes raw ART images into ART equivalent images for CI.'
                }
            },
            'spec': {
                'failedBuildsHistoryLimit': 2,
                'output': {
                    'to': {
                        'kind': 'ImageStreamTag',
                        'namespace': dest_ns,
                        'name': dest_istag
                    }
                },
                'source': {
                    'dockerfile': dockerfile_content,
                    'type': 'Dockerfile'
                },
                'strategy': {
                    'dockerStrategy': {
                        'from': {
                            'kind': 'ImageStreamTag',
                            'name': intermediate_istag,
                            'namespace': intermediate_ns,
                        },
                        'imageOptimizationPolicy': 'SkipLayers',
                    },
                },
                'successfulBuildsHistoryLimit': 2,
                'triggers': [{
                    'imageChange': {},
                    'type': 'ImageChange'
                }]
            }
        }

        buildconfig_definitions.append(buildconfig)

        # define a daemonset container that will keep this image running on all nodes so that it will
        # not be garbage collected by the kubelet in 3.11.
        ds_container_definitions.append({
            "image": f"registry.svc.ci.openshift.org/{dest_ns}/{dest_istag}",
            "command": [
                "/bin/bash",
                "-c",
                "#!/bin/bash\nset -euo pipefail\ntrap 'jobs -p | xargs -r kill || true; exit 0' TERM\nwhile true; do\n  sleep 600 &\n  wait $!\ndone\n"
            ],
            "name": f"{dest_ns}-{dest_imagestream}-{dest_tag}".replace('.', '-'),
            "resources": {
                    "requests": {
                        "cpu": "50m"
                    }
            },
            "imagePullPolicy": "Always"
        })

    ds_name = 'art-managed-' + group_label + '-dont-gc-me-bro'
    daemonset_definition = {
        "kind": "DaemonSet",
        "spec": {
            "revisionHistoryLimit": 1,
            "template": {
                "spec": {
                    "containers": ds_container_definitions
                },
                "metadata": {
                    "labels": {
                        "app": ds_name
                    }
                }
            },
            "selector": {
                "matchLabels": {
                    "app": ds_name
                }
            },
            "templateGeneration": 1,
            "updateStrategy": {
                "rollingUpdate": {
                    "maxUnavailable": "50%"
                },
                "type": "RollingUpdate"
            }
        },
        "apiVersion": "apps/v1",
        "metadata": {
            "labels": {
                "app": ds_name,
                'art-builder-group': runtime.group_config.name,
            },
            "namespace": "ci",
            "name": ds_name
        }
    }

    with open(output, mode='w+', encoding='utf-8') as f:
        objects = list()
        objects.extend(buildconfig_definitions)
        if buildconfig_definitions and all_streams:
            # Don't update the daemonset unless all streams are accounted for
            objects.append(daemonset_definition)
        yaml.dump_all(objects, f, default_flow_style=False)

    if apply:
        if buildconfig_definitions:
            print('Applying buildconfigs...')
            cmd = f'oc apply -f {output}'
            if as_user:
                cmd += f' --as {as_user}'
            exectools.cmd_assert(cmd)
        else:
            print('No buildconfigs were generated; skipping apply.')
Esempio n. 60
0
for crt_id in ids:
    #this_tools_labels = list()
    this_tools_labels = {}
    ##Adding the tool's name to the labels
    #this_tools_labels.append(["software",crt_id.split("/")[-1]])
    this_tools_labels["software"] = crt_id.split("/")[-1]
    print("#####Current tool: " + repr(this_tools_labels["software"]) +
          "#####")
    crt_Dockerfile = get_tool_dockerfile(DOCKERFILESROOt, crt_id)
    lines = crt_Dockerfile.split("\n")
    ##Extracting what we can from the comments
    for line in lines:
        if re.search(cmt_line_re, line):
            crt_label = parse_comment_line(line, METADESC)
            if not crt_label == None:
                #this_tools_labels.append(crt_label)
                this_tools_labels[crt_label[0]] = crt_label[1]
    ##Parsing the Dockerfile with DockerfileParser library
    dfp = DockerfileParser()
    dfp.content = crt_Dockerfile
    ##Extracting labels coming from instructions
    for crt_meta in METADESC:
        if not crt_meta.get("context") == "comment":
            crt_label = look_for_this_meta(crt_meta, dfp)
            if not crt_label == None:
                #this_tools_labels.append(crt_label)
                this_tools_labels[crt_label[0]] = crt_label[1]
    #print (this_tools_labels)
    output_dockerfile(this_tools_labels, dfp, OUTROOT)
    print("##########")