예제 #1
0
    def test_select_comments(self):
        c_api = CommentAPI(self.api.apiurl)
        staging_b = 'openSUSE:Factory:Staging:B'
        comments = c_api.get_comments(project_name=staging_b)

        # First select
        self.assertEqual(True, SelectCommand(self.api, staging_b).perform(['gcc', 'wine']))
        first_select_comments = c_api.get_comments(project_name=staging_b)
        last_id = sorted(first_select_comments.keys())[-1]
        first_select_comment = first_select_comments[last_id]
        # Only one comment is added
        self.assertEqual(len(first_select_comments), len(comments) + 1)
        # With the right content
        self.assertTrue('request#123 for package gcc submitted by Admin' in first_select_comment['comment'])

        # Second select
        self.assertEqual(True, SelectCommand(self.api, staging_b).perform(['puppet']))
        second_select_comments = c_api.get_comments(project_name=staging_b)
        last_id = sorted(second_select_comments.keys())[-1]
        second_select_comment = second_select_comments[last_id]
        # The number of comments increased by one
        self.assertEqual(len(second_select_comments) - 1, len(first_select_comments))
        self.assertNotEqual(second_select_comment['comment'], first_select_comment['comment'])
        # The new comments contains new, but not old
        self.assertFalse('request#123 for package gcc submitted by Admin' in second_select_comment['comment'])
        self.assertTrue('added request#321 for package puppet submitted by Admin' in second_select_comment['comment'])
예제 #2
0
    def test_select_comments(self):
        self.wf.setup_rings()

        staging_b = self.wf.create_staging('B', freeze=True)

        c_api = CommentAPI(self.wf.api.apiurl)
        comments = c_api.get_comments(project_name=staging_b.name)

        r1 = self.wf.create_submit_request('devel:wine', 'wine')
        r2 = self.wf.create_submit_request('devel:gcc', 'gcc')

        # First select
        self.assertEqual(True, SelectCommand(self.wf.api, staging_b.name).perform(['gcc', 'wine']))
        first_select_comments = c_api.get_comments(project_name=staging_b.name)
        last_id = sorted(first_select_comments.keys())[-1]
        first_select_comment = first_select_comments[last_id]
        # Only one comment is added
        self.assertEqual(len(first_select_comments), len(comments) + 1)
        # With the right content
        expected = 'request#{} for package gcc submitted by Admin'.format(r2.reqid)
        self.assertTrue(expected in first_select_comment['comment'])

        # Second select
        r3 = self.wf.create_submit_request('devel:gcc', 'gcc8')
        self.assertEqual(True, SelectCommand(self.wf.api, staging_b.name).perform(['gcc8']))
        second_select_comments = c_api.get_comments(project_name=staging_b.name)
        last_id = sorted(second_select_comments.keys())[-1]
        second_select_comment = second_select_comments[last_id]
        # The number of comments increased by one
        self.assertEqual(len(second_select_comments) - 1, len(first_select_comments))
        self.assertNotEqual(second_select_comment['comment'], first_select_comment['comment'])
        # The new comments contains new, but not old
        self.assertFalse('request#{} for package gcz submitted by Admin'.format(r2.reqid) in second_select_comment['comment'])
        self.assertTrue('added request#{} for package gcc8 submitted by Admin'.format(r3.reqid) in second_select_comment['comment'])
예제 #3
0
    def test_select_comments(self):
        c_api = CommentAPI(self.api.apiurl)
        staging_b = 'openSUSE:Factory:Staging:B'
        comments = c_api.get_comments(project_name=staging_b)

        # First select
        self.assertEqual(True, SelectCommand(self.api).perform(staging_b, ['gcc', 'wine']))
        first_select_comments = c_api.get_comments(project_name=staging_b)
        last_id = sorted(first_select_comments.keys())[-1]
        first_select_comment = first_select_comments[last_id]
        # Only one comment is added
        self.assertEqual(len(first_select_comments), len(comments) + 1)
        # With the right content
        self.assertTrue('Request#123 for package gcc submitted by @Admin' in first_select_comment['comment'])

        # Second select
        self.assertEqual(True, SelectCommand(self.api).perform(staging_b, ['puppet']))
        second_select_comments = c_api.get_comments(project_name=staging_b)
        last_id = sorted(second_select_comments.keys())[-1]
        second_select_comment = second_select_comments[last_id]
        # The number of comments remains, but they are different
        self.assertEqual(len(second_select_comments), len(first_select_comments))
        self.assertNotEqual(second_select_comment['comment'], first_select_comment['comment'])
        # The new comments contents both old and new information
        self.assertTrue('Request#123 for package gcc submitted by @Admin' in second_select_comment['comment'])
        self.assertTrue('Request#321 for package puppet submitted by @Admin' in second_select_comment['comment'])
예제 #4
0
    def test_select_comments(self):
        c_api = CommentAPI(self.api.apiurl)
        staging_b = 'openSUSE:Factory:Staging:B'
        comments = c_api.get_comments(project_name=staging_b)

        # First select
        self.assertEqual(
            True,
            SelectCommand(self.api, staging_b).perform(['gcc', 'wine']))
        first_select_comments = c_api.get_comments(project_name=staging_b)
        last_id = sorted(first_select_comments.keys())[-1]
        first_select_comment = first_select_comments[last_id]
        # Only one comment is added
        self.assertEqual(len(first_select_comments), len(comments) + 1)
        # With the right content
        self.assertTrue('Request#123 for package gcc submitted by @Admin' in
                        first_select_comment['comment'])

        # Second select
        self.assertEqual(
            True,
            SelectCommand(self.api, staging_b).perform(['puppet']))
        second_select_comments = c_api.get_comments(project_name=staging_b)
        last_id = sorted(second_select_comments.keys())[-1]
        second_select_comment = second_select_comments[last_id]
        # The number of comments remains, but they are different
        self.assertEqual(len(second_select_comments),
                         len(first_select_comments))
        self.assertNotEqual(second_select_comment['comment'],
                            first_select_comment['comment'])
        # The new comments contents both old and new information
        self.assertTrue('Request#123 for package gcc submitted by @Admin' in
                        second_select_comment['comment'])
        self.assertTrue('Request#321 for package puppet submitted by @Admin' in
                        second_select_comment['comment'])
예제 #5
0
def remind_comment(apiurl, repeat_age, request_id, project, package=None):
    comment_api = CommentAPI(apiurl)
    comments = comment_api.get_comments(request_id=request_id)
    comment, _ = comment_api.comment_find(comments, BOT_NAME)

    if comment:
        delta = datetime.utcnow() - comment['when']
        if delta.days < repeat_age:
            print('  skipping due to previous reminder from {} days ago'.format(delta.days))
            return

        # Repeat notification so remove old comment.
        try:
            comment_api.delete(comment['id'])
        except HTTPError as e:
            if e.code == 403:
                # Gracefully skip when previous reminder was by another user.
                print('  unable to remove previous reminder')
                return
            raise e

    userids = sorted(maintainers_get(apiurl, project, package))
    if len(userids):
        users = ['@' + userid for userid in userids]
        message = '{}: {}'.format(', '.join(users), REMINDER)
    else:
        message = REMINDER
    print('  ' + message)
    message = comment_api.add_marker(message, BOT_NAME)
    comment_api.add_comment(request_id=request_id, comment=message)
예제 #6
0
def remind_comment(apiurl, repeat_age, request_id, project, package=None):
    comment_api = CommentAPI(apiurl)
    comments = comment_api.get_comments(request_id=request_id)
    comment, _ = comment_api.comment_find(comments, BOT_NAME)

    if comment:
        delta = datetime.utcnow() - comment['when']
        if delta.days < repeat_age:
            print(
                '  skipping due to previous reminder from {} days ago'.format(
                    delta.days))
            return

        # Repeat notification so remove old comment.
        try:
            comment_api.delete(comment['id'])
        except HTTPError as e:
            if e.code == 403:
                # Gracefully skip when previous reminder was by another user.
                print('  unable to remove previous reminder')
                return
            raise e

    userids = sorted(maintainers_get(apiurl, project, package))
    if len(userids):
        users = ['@' + userid for userid in userids]
        message = '{}: {}'.format(', '.join(users), REMINDER)
    else:
        message = REMINDER
    print('  ' + message)
    message = comment_api.add_marker(message, BOT_NAME)
    comment_api.add_comment(request_id=request_id, comment=message)
예제 #7
0
    def test_accept_comments(self):
        c_api = CommentAPI(self.api.apiurl)
        staging_c = 'openSUSE:Factory:Staging:C'
        comments = c_api.get_comments(project_name=staging_c)

        # Accept staging C (containing apparmor and mariadb)
        self.assertEqual(True, AcceptCommand(self.api).perform(staging_c))

        # Comments are cleared up
        accepted_comments = c_api.get_comments(project_name=staging_c)
        self.assertNotEqual(len(comments), 0)
        self.assertEqual(len(accepted_comments), 0)

        # But the comment was written at some point
        self.assertEqual(len(self.obs.comment_bodies), 1)
        comment = self.obs.comment_bodies[0]
        self.assertTrue('The following packages have been submitted to openSUSE:Factory' in comment)
        self.assertTrue('apparmor' in comment)
        self.assertTrue('mariadb' in comment)
예제 #8
0
def check_comment(apiurl, bot, **kwargs):
    if not len(kwargs):
        return False

    api = CommentAPI(apiurl)
    comments = api.get_comments(**kwargs)
    comment = api.comment_find(comments, bot)[0]
    if comment:
        return (datetime.utcnow() - comment['when']).total_seconds()

    return False
예제 #9
0
def check_comment(apiurl, bot, **kwargs):
    if not len(kwargs):
        return False

    api = CommentAPI(apiurl)
    comments = api.get_comments(**kwargs)
    comment = api.comment_find(comments, bot)[0]
    if comment:
        return (datetime.utcnow() - comment['when']).total_seconds()

    return False
예제 #10
0
class TestAccept(unittest.TestCase):

    def setup_vcr(self):
        wf = OBSLocal.StagingWorkflow()
        wf.setup_rings()

        self.c_api = CommentAPI(wf.api.apiurl)

        staging_b = wf.create_staging('B', freeze=True)
        self.prj = staging_b.name

        self.winerq = wf.create_submit_request('devel:wine', 'wine', text='Hallo World')
        self.assertEqual(True, SelectCommand(wf.api, self.prj).perform(['wine']))
        self.comments = self.c_api.get_comments(project_name=self.prj)
        self.assertGreater(len(self.comments), 0)
        return wf

    def test_accept_comments(self):
        wf = self.setup_vcr()

        self.assertEqual(True, AcceptCommand(wf.api).perform(self.prj))

        # Comments are cleared up
        accepted_comments = self.c_api.get_comments(project_name=self.prj)
        self.assertEqual(len(accepted_comments), 0)

    def test_accept_final_comment(self):
        wf = self.setup_vcr()

        # snipe out cleanup to see the comments before the final countdown
        wf.api.staging_deactivate = MagicMock(return_value=True)

        self.assertEqual(True, AcceptCommand(wf.api).perform(self.prj))

        comments = self.c_api.get_comments(project_name=self.prj)
        self.assertGreater(len(comments), len(self.comments))

        # check which id was added
        new_id = (set(comments.keys()) - set(self.comments.keys())).pop()
        comment = comments[new_id]['comment']
        self.assertEqual('Project "{}" accepted. The following packages have been submitted to openSUSE:Factory: wine.'.format(self.prj), comment)
예제 #11
0
    def test_accept_comments(self):
        c_api = CommentAPI(self.api.apiurl)
        staging_c = 'openSUSE:Factory:Staging:C'
        comments = c_api.get_comments(project_name=staging_c)

        # Accept staging C (containing apparmor and mariadb)
        self.assertEqual(True, AcceptCommand(self.api).perform(staging_c))

        # Comments are cleared up
        accepted_comments = c_api.get_comments(project_name=staging_c)
        self.assertNotEqual(len(comments), 0)
        self.assertEqual(len(accepted_comments), 0)

        # But the comment was written at some point
        self.assertEqual(len(self.obs.comment_bodies), 1)
        comment = self.obs.comment_bodies[0]
        self.assertTrue(
            'The following packages have been submitted to openSUSE:Factory' in
            comment)
        self.assertTrue('apparmor' in comment)
        self.assertTrue('mariadb' in comment)
    def create_comments(self, state):
        comments = dict()
        for source, details in state['check'].items():
            rebuild = dateutil.parser.parse(details["rebuild"])
            if datetime.now() - rebuild < timedelta(days=2):
                self.logger.debug(f"Ignore {source} - problem too recent")
                continue
            _, _, arch, rpm = source.split('/')
            rpm = rpm.split(':')[0]
            comments.setdefault(rpm, {})
            comments[rpm][arch] = details['problem']

        url = makeurl(self.apiurl, ['comments', 'user'])
        root = ET.parse(http_GET(url)).getroot()
        for comment in root.findall('.//comment'):
            if comment.get('project') != self.project:
                continue
            if comment.get('package') in comments:
                continue
            self.logger.info("Removing comment for package {}".format(
                comment.get('package')))
            url = makeurl(self.apiurl, ['comment', comment.get('id')])
            http_DELETE(url)

        commentapi = CommentAPI(self.apiurl)
        MARKER = 'Installcheck'

        for package in comments:
            newcomment = ''
            for arch in sorted(comments[package]):
                newcomment += f"\n\n**Installcheck problems for {arch}**\n\n"
                for problem in sorted(comments[package][arch]):
                    newcomment += "+ " + problem + "\n"

            newcomment = commentapi.add_marker(newcomment.strip(), MARKER)
            oldcomments = commentapi.get_comments(project_name=self.project,
                                                  package_name=package)
            oldcomment, _ = commentapi.comment_find(oldcomments, MARKER)
            if oldcomment and oldcomment['comment'] == newcomment:
                continue

            if oldcomment:
                commentapi.delete(oldcomment['id'])
            self.logger.debug("Adding comment to {}/{}".format(
                self.project, package))
            commentapi.add_comment(project_name=self.project,
                                   package_name=package,
                                   comment=newcomment)
예제 #13
0
    def update_status_comments(self, project, command):
        """
        Refresh the status comments, used for notification purposes, based on
        the current list of requests. To ensure that all involved users
        (and nobody else) get notified, old status comments are deleted and
        a new one is created.
        :param project: project name
        :param command: name of the command to include in the message
        """

        # TODO: we need to discuss the best way to keep track of status
        # comments. Right now they are marked with an initial markdown
        # comment. Maybe a cleaner approach would be to store something
        # like 'last_status_comment_id' in the pseudometa. But the current
        # OBS API for adding comments doesn't return the id of the created
        # comment.

        comment_api = CommentAPI(self.apiurl)

        comments = comment_api.get_comments(project_name=project)
        for comment in comments.values():
            # TODO: update the comment removing the user mentions instead of
            # deleting the whole comment. But there is currently not call in
            # OBS API to update a comment
            if comment['comment'].startswith('<!--- osc staging'):
                comment_api.delete(comment['id'])
                break  # There can be only one! (if we keep deleting them)

        meta = self.get_prj_pseudometa(project)
        lines = ['<!--- osc staging %s --->' % command]
        lines.append('The list of requests tracked in %s has changed:\n' %
                     project)
        for req in meta['requests']:
            author = req.get('autor', None)
            if not author:
                # Old style metadata
                author = get_request(self.apiurl, str(req['id'])).get_creator()
            lines.append('  * Request#%s for package %s submitted by @%s' %
                         (req['id'], req['package'], author))
        msg = '\n'.join(lines)
        comment_api.add_comment(project_name=project, comment=msg)
예제 #14
0
    def update_status_comments(self, project, command):
        """
        Refresh the status comments, used for notification purposes, based on
        the current list of requests. To ensure that all involved users
        (and nobody else) get notified, old status comments are deleted and
        a new one is created.
        :param project: project name
        :param command: name of the command to include in the message
        """

        # TODO: we need to discuss the best way to keep track of status
        # comments. Right now they are marked with an initial markdown
        # comment. Maybe a cleaner approach would be to store something
        # like 'last_status_comment_id' in the pseudometa. But the current
        # OBS API for adding comments doesn't return the id of the created
        # comment.

        comment_api = CommentAPI(self.apiurl)

        comments = comment_api.get_comments(project_name=project)
        for comment in comments.values():
            # TODO: update the comment removing the user mentions instead of
            # deleting the whole comment. But there is currently not call in
            # OBS API to update a comment
            if comment['comment'].startswith('<!--- osc staging'):
                comment_api.delete(comment['id'])
                break  # There can be only one! (if we keep deleting them)

        meta = self.get_prj_pseudometa(project)
        lines = ['<!--- osc staging %s --->' % command]
        lines.append('The list of requests tracked in %s has changed:\n' % project)
        for req in meta['requests']:
            author = req.get('autor', None)
            if not author:
                # Old style metadata
                author = get_request(self.apiurl, str(req['id'])).get_creator()
            lines.append('  * Request#%s for package %s submitted by @%s' % (req['id'], req['package'], author))
        msg = '\n'.join(lines)
        comment_api.add_comment(project_name=project, comment=msg)
class InstallChecker(object):
    def __init__(self, api, config):
        self.api = api
        self.config = conf.config[api.project]
        self.logger = logging.getLogger('InstallChecker')
        self.commentapi = CommentAPI(api.apiurl)

        self.arch_whitelist = self.config.get('repo_checker-arch-whitelist')
        if self.arch_whitelist:
            self.arch_whitelist = set(self.arch_whitelist.split(' '))

        self.ring_whitelist = set(
            self.config.get('repo_checker-binary-whitelist-ring',
                            '').split(' '))

        self.cycle_packages = self.config.get('repo_checker-allowed-in-cycles')
        self.calculate_allowed_cycles()

        self.ignore_duplicated = set(
            self.config.get('installcheck-ignore-duplicated-binaries',
                            '').split(' '))
        self.ignore_conflicts = set(
            self.config.get('installcheck-ignore-conflicts', '').split(' '))
        self.ignore_deletes = str2bool(
            self.config.get('installcheck-ignore-deletes', 'False'))

    def check_required_by(self, fileinfo, provides, requiredby, built_binaries,
                          comments):
        if requiredby.get('name') in built_binaries:
            return True
        # extract >= and the like
        provide = provides.get('dep')
        provide = provide.split(' ')[0]
        comments.append('{} provides {} required by {}'.format(
            fileinfo.find('name').text, provide, requiredby.get('name')))
        url = api.makeurl([
            'build', api.project, api.cmain_repo, 'x86_64', '_repository',
            requiredby.get('name') + '.rpm'
        ], {'view': 'fileinfo_ext'})
        reverse_fileinfo = ET.parse(osc.core.http_GET(url)).getroot()
        for require in reverse_fileinfo.findall('requires_ext'):
            # extract >= and the like here too
            dep = require.get('dep').split(' ')[0]
            if dep != provide:
                continue
            for provided_by in require.findall('providedby'):
                if provided_by.get('name') in built_binaries:
                    continue
                comments.append('  also provided by {} -> ignoring'.format(
                    provided_by.get('name')))
                return True
        comments.append(
            'Error: missing alternative provides for {}'.format(provide))
        return False

    def check_delete_request(self, req, to_ignore, comments):
        package = req.get('package')
        if package in to_ignore or self.ignore_deletes:
            self.logger.info(
                'Delete request for package {} ignored'.format(package))
            return True

        built_binaries = set([])
        file_infos = []
        for fileinfo in fileinfo_ext_all(self.api.apiurl, self.api.project,
                                         self.api.cmain_repo, 'x86_64',
                                         package):
            built_binaries.add(fileinfo.find('name').text)
            file_infos.append(fileinfo)

        result = True
        for fileinfo in file_infos:
            for provides in fileinfo.findall('provides_ext'):
                for requiredby in provides.findall('requiredby[@name]'):
                    result = result and self.check_required_by(
                        fileinfo, provides, requiredby, built_binaries,
                        comments)

        what_depends_on = depends_on(api.apiurl, api.project, api.cmain_repo,
                                     [package], True)

        # filter out dependency on package itself (happens with eg
        # java bootstrapping itself with previous build)
        if package in what_depends_on:
            what_depends_on.remove(package)

        if len(what_depends_on):
            comments.append(
                '{} is still a build requirement of:\n\n- {}'.format(
                    package, '\n- '.join(sorted(what_depends_on))))
            return False

        return result

    def packages_to_ignore(self, project):
        comments = self.commentapi.get_comments(project_name=project)
        ignore_re = re.compile(r'^installcheck: ignore (?P<args>.*)$',
                               re.MULTILINE)

        # the last wins, for now we don't care who said it
        args = []
        for comment in comments.values():
            match = ignore_re.search(comment['comment'].replace('\r', ''))
            if not match:
                continue
            args = match.group('args').strip()
            # allow space and comma to seperate
            args = args.replace(',', ' ').split(' ')
        return set(args)

    def staging(self, project, force=False):
        api = self.api

        repository = self.api.cmain_repo

        # fetch the build ids at the beginning - mirroring takes a while
        buildids = {}
        try:
            architectures = self.target_archs(project, repository)
        except HTTPError as e:
            if e.code == 404:
                # adi disappear all the time, so don't worry
                return False
            raise e

        all_done = True
        for arch in architectures:
            pra = '{}/{}/{}'.format(project, repository, arch)
            buildid = self.buildid(project, repository, arch)
            if not buildid:
                self.logger.error('No build ID in {}'.format(pra))
                return False
            buildids[arch] = buildid
            url = self.report_url(project, repository, arch, buildid)
            try:
                root = ET.parse(osc.core.http_GET(url)).getroot()
                check = root.find('check[@name="installcheck"]/state')
                if check is not None and check.text != 'pending':
                    self.logger.info('{} already "{}", ignoring'.format(
                        pra, check.text))
                else:
                    all_done = False
            except HTTPError:
                self.logger.info('{} has no status report'.format(pra))
                all_done = False

        if all_done and not force:
            return True

        repository_pairs = repository_path_expand(api.apiurl, project,
                                                  repository)
        result_comment = []

        result = True
        to_ignore = self.packages_to_ignore(project)
        status = api.project_status(project)
        if status is None:
            self.logger.error('no project status for {}'.format(project))
            return False

        for req in status.findall('staged_requests/request'):
            if req.get('type') == 'delete':
                result = result and self.check_delete_request(
                    req, to_ignore, result_comment)

        for arch in architectures:
            # hit the first repository in the target project (if existant)
            target_pair = None
            directories = []
            for pair_project, pair_repository in repository_pairs:
                # ignore repositories only inherited for config
                if repository_arch_state(self.api.apiurl, pair_project,
                                         pair_repository, arch):
                    if not target_pair and pair_project == api.project:
                        target_pair = [pair_project, pair_repository]

                    directories.append(
                        mirror(self.api.apiurl, pair_project, pair_repository,
                               arch))

            if not api.is_adi_project(project):
                # For "leaky" ring packages in letter stagings, where the
                # repository setup does not include the target project, that are
                # not intended to to have all run-time dependencies satisfied.
                whitelist = self.ring_whitelist
            else:
                whitelist = set()

            whitelist |= to_ignore
            ignore_conflicts = self.ignore_conflicts | to_ignore

            check = self.cycle_check(project, repository, arch)
            if not check.success:
                self.logger.warning('Cycle check failed')
                result_comment.append(check.comment)
                result = False

            check = self.install_check(directories, arch, whitelist,
                                       ignore_conflicts)
            if not check.success:
                self.logger.warning('Install check failed')
                result_comment.append(check.comment)
                result = False

        duplicates = duplicated_binaries_in_repo(self.api.apiurl, project,
                                                 repository)
        # remove white listed duplicates
        for arch in list(duplicates):
            for binary in self.ignore_duplicated:
                duplicates[arch].pop(binary, None)
            if not len(duplicates[arch]):
                del duplicates[arch]
        if len(duplicates):
            self.logger.warning('Found duplicated binaries')
            result_comment.append(
                yaml.dump(duplicates, default_flow_style=False))
            result = False

        if result:
            self.report_state('success', self.gocd_url(), project, repository,
                              buildids)
        else:
            result_comment.insert(
                0, 'Generated from {}\n'.format(self.gocd_url()))
            self.report_state('failure',
                              self.upload_failure(project, result_comment),
                              project, repository, buildids)
            self.logger.warning('Not accepting {}'.format(project))
            return False

        return result

    def upload_failure(self, project, comment):
        print(project, '\n'.join(comment))
        url = self.api.makeurl(
            ['source', 'home:repo-checker', 'reports', project])
        osc.core.http_PUT(url, data='\n'.join(comment))

        url = self.api.apiurl.replace('api.', 'build.')
        return '{}/package/view_file/home:repo-checker/reports/{}'.format(
            url, project)

    def report_state(self, state, report_url, project, repository, buildids):
        architectures = self.target_archs(project, repository)
        for arch in architectures:
            self.report_pipeline(state, report_url, project, repository, arch,
                                 buildids[arch])

    def gocd_url(self):
        if not os.environ.get('GO_SERVER_URL'):
            # placeholder :)
            return 'http://stephan.kulow.org/'
        report_url = os.environ.get('GO_SERVER_URL').replace(':8154', '')
        return report_url + '/tab/build/detail/{}/{}/{}/{}/{}#tab-console'.format(
            os.environ.get('GO_PIPELINE_NAME'),
            os.environ.get('GO_PIPELINE_COUNTER'),
            os.environ.get('GO_STAGE_NAME'),
            os.environ.get('GO_STAGE_COUNTER'), os.environ.get('GO_JOB_NAME'))

    def buildid(self, project, repository, architecture):
        url = self.api.makeurl(['build', project, repository, architecture],
                               {'view': 'status'})
        root = ET.parse(osc.core.http_GET(url)).getroot()
        buildid = root.find('buildid')
        if buildid is None:
            return False
        return buildid.text

    def report_url(self, project, repository, architecture, buildid):
        return self.api.makeurl([
            'status_reports', 'built', project, repository, architecture,
            'reports', buildid
        ])

    def report_pipeline(self, state, report_url, project, repository,
                        architecture, buildid):
        url = self.report_url(project, repository, architecture, buildid)
        name = 'installcheck'
        xml = self.check_xml(report_url, state, name)
        try:
            osc.core.http_POST(url, data=xml)
        except HTTPError:
            print('failed to post status to ' + url)
            sys.exit(1)

    def check_xml(self, url, state, name):
        check = ET.Element('check')
        if url:
            se = ET.SubElement(check, 'url')
            se.text = url
        se = ET.SubElement(check, 'state')
        se.text = state
        se = ET.SubElement(check, 'name')
        se.text = name
        return ET.tostring(check)

    def target_archs(self, project, repository):
        archs = target_archs(self.api.apiurl, project, repository)

        # Check for arch whitelist and use intersection.
        if self.arch_whitelist:
            archs = list(self.arch_whitelist.intersection(set(archs)))

        # Trick to prioritize x86_64.
        return sorted(archs, reverse=True)

    def install_check(self, directories, arch, whitelist, ignored_conflicts):
        self.logger.info('install check: start (whitelist:{})'.format(
            ','.join(whitelist)))
        parts = installcheck(directories, arch, whitelist, ignored_conflicts)
        if len(parts):
            header = '### [install check & file conflicts for {}]'.format(arch)
            return CheckResult(
                False,
                header + '\n\n' + ('\n' + ('-' * 80) + '\n\n').join(parts))

        self.logger.info('install check: passed')
        return CheckResult(True, None)

    def calculate_allowed_cycles(self):
        self.allowed_cycles = []
        if self.cycle_packages:
            for comma_list in self.cycle_packages.split(';'):
                self.allowed_cycles.append(comma_list.split(','))

    def cycle_check(self, project, repository, arch):
        self.logger.info('cycle check: start %s/%s/%s' %
                         (project, repository, arch))
        comment = []

        depinfo = builddepinfo(self.api.apiurl,
                               project,
                               repository,
                               arch,
                               order=False)
        for cycle in depinfo.findall('cycle'):
            for package in cycle.findall('package'):
                package = package.text
                allowed = False
                for acycle in self.allowed_cycles:
                    if package in acycle:
                        allowed = True
                        break
                if not allowed:
                    cycled = [p.text for p in cycle.findall('package')]
                    comment.append('Package {} appears in cycle {}'.format(
                        package, '/'.join(cycled)))

        if len(comment):
            # New cycles, post comment.
            self.logger.info('cycle check: failed')
            return CheckResult(False, '\n'.join(comment) + '\n')

        self.logger.info('cycle check: passed')
        return CheckResult(True, None)

    def project_pseudometa_file_name(self, project, repository):
        filename = 'repo_checker'

        main_repo = Config.get(self.api.apiurl, project).get('main-repo')
        if not main_repo:
            filename += '.' + repository

        return filename
예제 #16
0
class OpenQABot(ReviewBot.ReviewBot):
    """ check ABI of library packages
    """
    def __init__(self, *args, **kwargs):
        super(OpenQABot, self).__init__(*args, **kwargs)
        self.tgt_repo = {}
        self.project_settings = {}
        self.api_map = {}
        self.bot_name = 'openqa'
        self.force = False
        self.openqa = None
        self.commentapi = CommentAPI(self.apiurl)

    def gather_test_builds(self):
        for prj, u in self.tgt_repo[self.openqa.baseurl].items():
            buildnr = 0
            cjob = 0
            for j in self.jobs_for_target(u):
                # avoid going backwards in job ID
                if cjob > int(j['id']):
                    continue
                buildnr = j['settings']['BUILD']
                cjob = int(j['id'])
            self.update_test_builds[prj] = buildnr
            jobs = self.jobs_for_target(u, build=buildnr)
            self.openqa_jobs[prj] = jobs
            if self.calculate_qa_status(jobs) == QA_INPROGRESS:
                self.pending_target_repos.add(prj)

    # reimplemention from baseclass
    def check_requests(self):

        # to be filled by repos of active
        self.incident_repos = dict()
        self.update_test_builds = {}
        self.pending_target_repos = set()
        self.openqa_jobs = {}

        if self.ibs:
            self.check_suse_incidents()
        else:
            self.check_opensuse_incidents()

        # first calculate the latest build number for current jobs
        self.gather_test_builds()

        super(OpenQABot, self).check_requests()

        # now make sure the jobs are for current repo
        for prj, u in self.tgt_repo[self.openqa.baseurl].items():
            if prj in self.pending_target_repos:
                self.logger.debug("Do not trigger for " + prj)
                continue
            self.trigger_build_for_target(prj, u)

    # check a set of repos for their primary checksums
    @staticmethod
    def calculate_repo_hash(repos, incidents):
        m = hashlib.md5()
        # if you want to force it, increase this number
        m.update(b'b')
        for url in repos:
            url += '/repodata/repomd.xml'
            try:
                root = ET.parse(osc.core.http_GET(url)).getroot()
            except HTTPError:
                raise
            cs = root.find(
                './/{http://linux.duke.edu/metadata/repo}data[@type="primary"]/{http://linux.duke.edu/metadata/repo}checksum'
            )
            m.update(cs.text.encode('utf-8'))
        # now add the open incidents
        m.update(json.dumps(incidents, sort_keys=True).encode('utf-8'))
        digest = m.hexdigest()
        open_incidents = sorted(incidents.keys())
        if open_incidents:
            digest += ':' + ','.join(open_incidents)
        return digest

    def is_incident_in_testing(self, incident):
        # hard coded for now as we only run this code for SUSE Maintenance workflow
        project = 'SUSE:Maintenance:{}'.format(incident)

        xpath = "(state/@name='review') and (action/source/@project='{}' and action/@type='maintenance_release')".format(
            project)
        res = osc.core.search(self.apiurl, request=xpath)['request']
        # return the one and only (or None)
        return res.find('request')

    def calculate_incidents(self, incidents):
        """
        get incident numbers from SUSE:Maintenance:Test project
        returns dict with openQA var name : string with numbers
        """
        self.logger.debug("calculate_incidents: {}".format(pformat(incidents)))
        l_incidents = []
        for kind, prj in incidents.items():
            packages = osc.core.meta_get_packagelist(self.apiurl, prj)
            incidents = []
            # filter out incidents in staging
            for incident in packages:
                # remove patchinfo. prefix
                incident = incident.replace('_', '.').split('.')[1]
                req = self.is_incident_in_testing(incident)
                # without release request it's in staging
                if not req:
                    continue

                # skip kgraft patches from aggregation
                req_ = osc.core.Request()
                req_.read(req)
                src_prjs = {a.src_project for a in req_.actions}
                if SUSEUpdate.kgraft_target(self.apiurl, src_prjs.pop()):
                    self.logger.debug(
                        "calculate_incidents: Incident is kgraft - {} ".format(
                            incident))
                    continue

                incidents.append(incident)

            l_incidents.append((kind + '_TEST_ISSUES', ','.join(incidents)))
        self.logger.debug("Calculate incidents:{}".format(
            pformat(l_incidents)))
        return l_incidents

    def jobs_for_target(self, data, build=None):
        settings = data['settings']
        values = {
            'distri': settings['DISTRI'],
            'version': settings['VERSION'],
            'arch': settings['ARCH'],
            'flavor': settings['FLAVOR'],
            'scope': 'relevant',
            'latest': '1',
        }
        if build:
            values['build'] = build
        else:
            values['test'] = data['test']
        self.logger.debug("Get jobs: {}".format(pformat(values)))
        return self.openqa.openqa_request('GET', 'jobs', values)['jobs']

    # we don't know the current BUILD and querying all jobs is too expensive
    # so we need to check for one known TEST first
    # if that job doesn't contain the proper hash, we trigger a new one
    # and then we know the build
    def trigger_build_for_target(self, prj, data):
        today = date.today().strftime("%Y%m%d")

        try:
            repohash = self.calculate_repo_hash(
                data['repos'], self.incident_repos.get(prj, {}))
        except HTTPError as e:
            self.logger.debug(
                "REPOHASH not calculated with response {}".format(e))
            return

        buildnr = None
        jobs = self.jobs_for_target(data)
        for job in jobs:
            if job['settings'].get('REPOHASH', '') == repohash:
                # take the last in the row
                buildnr = job['settings']['BUILD']
        self.update_test_builds[prj] = buildnr
        # ignore old build numbers, we want a fresh run every day
        # to find regressions in the tests and to get data about
        # randomly failing tests
        if buildnr and buildnr.startswith(today):
            return

        buildnr = 0

        # not found, then check for the next free build nr
        for job in jobs:
            build = job['settings']['BUILD']
            if build and build.startswith(today):
                try:
                    nr = int(build.split('-')[1])
                    if nr > buildnr:
                        buildnr = nr
                except ValueError:
                    continue

        buildnr = "{!s}-{:d}".format(today, buildnr + 1)

        s = data['settings']
        # now schedule it for real
        if 'incidents' in data.keys():
            for x, y in self.calculate_incidents(data['incidents']):
                s[x] = y
        s['BUILD'] = buildnr
        s['REPOHASH'] = repohash
        s['_OBSOLETE'] = '1'
        self.logger.debug("Prepared: {}".format(pformat(s)))
        if not self.dryrun:
            try:
                self.logger.info("Openqa isos POST {}".format(pformat(s)))
                self.openqa.openqa_request('POST', 'isos', data=s, retries=1)
            except Exception as e:
                self.logger.error(e)
        self.update_test_builds[prj] = buildnr

    def request_get_openqa_status(self, req):
        types = {a.type for a in req.actions}
        if 'maintenance_release' not in types:
            return [], QA_UNKNOWN

        src_prjs = {a.src_project for a in req.actions}
        if len(src_prjs) != 1:
            raise Exception(
                "can't handle maintenance_release from different incidents")
        build = src_prjs.pop()
        incident_id = build.split(':')[-1]
        tgt_prjs = {a.tgt_project for a in req.actions}
        jobs = self.openqa_jobs.get(build, [])
        qa_status = self.calculate_qa_status(jobs)
        if qa_status == QA_UNKNOWN or qa_status == QA_INPROGRESS:
            return jobs, qa_status

        # check if the repo jobs include the incident
        repo_jobs = []
        for prj in sorted(tgt_prjs):
            repo_settings = self.tgt_repo.get(self.openqa.baseurl, {})
            if prj in repo_settings:
                repo_jobs += self.openqa_jobs[prj]
        for job in repo_jobs:
            foundissue = False
            for key, value in job['settings'].items():
                if key.endswith('_TEST_ISSUES'):
                    if incident_id in value.split(','):
                        foundissue = True
            if not foundissue:
                self.logger.info("Repo job {} not for {} - ignoring".format(
                    job['id'], incident_id))
                return jobs, QA_INPROGRESS
            # print(foundissue, incident_id, json.dumps(job['settings'], indent=4))

        jobs += repo_jobs
        return jobs, self.calculate_qa_status(jobs)

    def calculate_qa_status(self, jobs=None):
        if not jobs:
            return QA_UNKNOWN

        j = {}
        has_failed = False
        in_progress = False

        for job in jobs:
            if job['clone_id']:
                continue
            name = job['name']

            if name in j and int(job['id']) < int(j[name]['id']):
                continue
            j[name] = job

            if job['state'] not in ('cancelled', 'done'):
                in_progress = True
            else:
                if job['result'] != 'passed' and job['result'] != 'softfailed':
                    has_failed = True

        if not j:
            return QA_UNKNOWN
        if in_progress:
            return QA_INPROGRESS
        if has_failed:
            return QA_FAILED

        return QA_PASSED

    # escape markdown
    @staticmethod
    def emd(str):
        return str.replace('_', r'\_')

    @staticmethod
    def get_step_url(testurl, modulename):
        failurl = testurl + '/modules/{!s}/fails'.format(
            quote_plus(modulename))
        fails = requests.get(failurl).json()
        failed_step = fails.get('first_failed_step', 1)
        return "[{!s}]({!s}#step/{!s}/{:d})".format(OpenQABot.emd(modulename),
                                                    testurl, modulename,
                                                    failed_step)

    @staticmethod
    def job_test_name(job):
        return "{!s}@{!s}".format(OpenQABot.emd(job['settings']['TEST']),
                                  OpenQABot.emd(job['settings']['MACHINE']))

    def summarize_one_openqa_job(self, job):
        testurl = osc.core.makeurl(self.openqa.baseurl,
                                   ['tests', str(job['id'])])
        if not job['result'] in ['passed', 'failed', 'softfailed']:
            rstring = job['result']
            if rstring == 'none':
                return None
            return '\n- [{!s}]({!s}) is {!s}'.format(self.job_test_name(job),
                                                     testurl, rstring)

        modstrings = []
        for module in job['modules']:
            if module['result'] != 'failed':
                continue
            modstrings.append(self.get_step_url(testurl, module['name']))

        if modstrings:
            return '\n- [{!s}]({!s}) failed in {!s}'.format(
                self.job_test_name(job), testurl, ','.join(modstrings))
        elif job['result'] == 'failed':  # rare case: fail without module fails
            return '\n- [{!s}]({!s}) failed'.format(self.job_test_name(job),
                                                    testurl)
        return ''

    def summarize_openqa_jobs(self, jobs):
        groups = {}
        for job in jobs:
            gl = "{!s}@{!s}".format(self.emd(job['group']),
                                    self.emd(job['settings']['FLAVOR']))
            if gl not in groups:
                groupurl = osc.core.makeurl(
                    self.openqa.baseurl, ['tests', 'overview'], {
                        'version': job['settings']['VERSION'],
                        'groupid': job['group_id'],
                        'flavor': job['settings']['FLAVOR'],
                        'distri': job['settings']['DISTRI'],
                        'build': job['settings']['BUILD'],
                    })
                groups[gl] = {
                    'title': "__Group [{!s}]({!s})__\n".format(gl, groupurl),
                    'passed': 0,
                    'unfinished': 0,
                    'failed': []
                }

            job_summary = self.summarize_one_openqa_job(job)
            if job_summary is None:
                groups[gl]['unfinished'] = groups[gl]['unfinished'] + 1
                continue
            # None vs ''
            if not len(job_summary):
                groups[gl]['passed'] = groups[gl]['passed'] + 1
                continue
            # if there is something to report, hold the request
            # TODO: what is this ?
            # qa_state = QA_FAILED
            # gmsg = groups[gl]

            groups[gl]['failed'].append(job_summary)

        msg = ''
        for group in sorted(groups.keys()):
            msg += "\n\n" + groups[group]['title']
            infos = []
            if groups[group]['passed']:
                infos.append("{:d} tests passed".format(
                    groups[group]['passed']))
            if len(groups[group]['failed']):
                infos.append("{:d} tests failed".format(
                    len(groups[group]['failed'])))
            if groups[group]['unfinished']:
                infos.append("{:d} unfinished tests".format(
                    groups[group]['unfinished']))
            msg += "(" + ', '.join(infos) + ")\n"
            for fail in groups[group]['failed']:
                msg += fail
        return msg.rstrip('\n')

    def check_one_request(self, req):
        try:
            jobs, qa_state = self.request_get_openqa_status(req)
            self.logger.debug("request %s state %s", req.reqid, qa_state)
            msg = None
            if qa_state == QA_UNKNOWN:
                incident_id = req.to_xml().findall('.action/source')[0].get(
                    'project').split(":")[-1]
                if not jobs and incident_id not in self.wait_for_build:
                    msg = "no openQA tests defined"
                    self.comment_write(state='done',
                                       message=msg,
                                       request=req,
                                       result='accepted')
                    return True
                else:
                    self.logger.debug("request {} waits for build".format(
                        req.reqid))
            elif qa_state == QA_FAILED or qa_state == QA_PASSED:
                if qa_state == QA_PASSED:
                    msg = "openQA tests passed\n"
                    result = 'accepted'
                    ret = True
                else:
                    msg = "openQA tests problematic\n"
                    result = 'declined'
                    ret = False

                msg += self.summarize_openqa_jobs(jobs)
                self.comment_write(state='done',
                                   message=msg,
                                   result=result,
                                   request=req)
                return ret
            elif qa_state == QA_INPROGRESS:
                self.logger.info("request %s still in progress", req.reqid)
            else:
                raise Exception("unknown QA state %d", qa_state)

        except Exception:
            import traceback
            self.logger.error("unhandled exception in openQA Bot")
            self.logger.error(traceback.format_exc())
            return None

        return

    def find_obs_request_comment(self, request_id=None, project_name=None):
        """Return previous comments (should be one)."""
        comments = self.commentapi.get_comments(request_id=request_id,
                                                project_name=project_name)
        comment, info = self.commentapi.comment_find(comments, self.bot_name)
        if comment:
            # we only care for two fields
            return {'id': comment['id'], 'revision': info['revision']}

        return {}

    def check_product_arch(self, job, product_prefix, pmap, arch):
        need = False
        settings = {'VERSION': pmap['version']}
        settings['ARCH'] = arch if arch else 'x86_64'
        settings['DISTRI'] = pmap.get('distri', 'sle')
        issues = pmap.get('issues', {})
        issues['OS_TEST_ISSUES'] = issues.get('OS_TEST_ISSUES', product_prefix)
        required_issue = pmap.get('required_issue', False)
        for key, prefix in issues.items():
            # self.logger.debug("KP {} {}".format(key, prefix) + str(job))
            channel = prefix
            if arch:
                channel += arch
            if channel in job['channels']:
                settings[key] = str(job['id'])
                need = True
        if required_issue:
            if required_issue not in settings:
                need = False

        if not need:
            return []

        product_key = product_prefix
        if arch:
            product_key += arch
        update = self.project_settings[product_key]
        update.apiurl = self.apiurl
        update.logger = self.logger
        posts = []
        for j in update.settings(
                update.maintenance_project + ':' + str(job['id']),
                product_key):
            if not job.get('openqa_build'):
                job['openqa_build'] = update.get_max_revision(job)
            if not job.get('openqa_build'):
                self.wait_for_build.add(str(job['id']))
                return []
            self.incident_repos.setdefault(product_key, dict())[str(
                job['id'])] = job.get('openqa_build')
            j['BUILD'] += '.' + str(job['openqa_build'])
            j.update(settings)
            # kGraft jobs can have different version
            if 'real_version' in j:
                j['VERSION'] = j['real_version']
                del j['real_version']
            posts.append(j)
        return posts

    def check_product(self, job, product_prefix):
        pmap = self.api_map[product_prefix]
        posts = []
        if 'archs' in pmap:
            for arch in pmap['archs']:
                posts += self.check_product_arch(job, product_prefix, pmap,
                                                 arch)
        else:
            posts += self.check_product_arch(job, product_prefix, pmap, None)

        self.logger.debug("Pmap: {} Posts: {}".format(pmap, posts))
        return posts

    def incident_openqa_jobs(self, s):
        return self.openqa.openqa_request(
            'GET', 'jobs', {
                'distri': s['DISTRI'],
                'version': s['VERSION'],
                'arch': s['ARCH'],
                'flavor': s['FLAVOR'],
                'build': s['BUILD'],
                'scope': 'relevant',
                'latest': '1'
            })['jobs']

    # for SUSE we use mesh, for openSUSE we limit the jobs to open release requests
    def check_opensuse_incidents(self):
        requests = dict()  # collecting unique requests
        self.wait_for_build = set()
        for prj in self.tgt_repo[self.openqa.baseurl].keys():
            for r in self.ids_project(prj, 'maintenance_release'):
                requests[r.reqid] = r

        # to be stored in settings
        issues = dict()
        for req in sorted(requests.keys()):
            req = requests[req]
            types = set([a.type for a in req.actions])
            if 'maintenance_release' not in types:
                continue

            src_prjs = set([a.src_project for a in req.actions])
            if len(src_prjs) != 1:
                raise Exception(
                    "can't handle maintenance_release from different incidents"
                )
            build = src_prjs.pop()
            incident_id = build.split(':')[-1]
            tgt_prjs = set()
            for a in req.actions:
                prj = a.tgt_project
                # ignore e.g. Backports
                if prj not in self.project_settings:
                    continue

                issues.setdefault(prj, set()).add(incident_id)
                tgt_prjs.add(prj)

            self.test_job({
                'project': build,
                'id': incident_id,
                'channels': list(tgt_prjs)
            })

        for prj in self.tgt_repo[self.openqa.baseurl].keys():
            s = self.tgt_repo[self.openqa.baseurl][prj]['settings']
            s['OS_TEST_ISSUES'] = ','.join(sorted(issues.get(prj, set())))

    def check_suse_incidents(self):
        self.wait_for_build = set()
        for inc in requests.get(
                'https://maintenance.suse.de/api/incident/active/').json():
            self.logger.info("Incident number: {}".format(inc))

            mesh_job = requests.get(
                'https://maintenance.suse.de/api/incident/' + inc).json()

            if mesh_job['meta']['state'] in ['final', 'gone']:
                continue
            # required in mesh_job: project, id, channels
            self.test_job(mesh_job['base'])

    def test_job(self, mesh_job):
        self.logger.debug("Called test_job with: {}".format(mesh_job))
        incident_project = str(mesh_job['project'])
        try:
            comment_info = self.find_obs_request_comment(
                project_name=incident_project)
        except HTTPError as e:
            self.logger.debug("Couldn't load comments - {}".format(e))
            return
        comment_build = str(comment_info.get('revision', ''))

        openqa_posts = []
        for prod in self.api_map.keys():
            self.logger.debug("{} -- product in apimap".format(prod))
            openqa_posts += self.check_product(mesh_job, prod)
        openqa_jobs = []
        for s in openqa_posts:
            if 'skip_job' in s:
                self.wait_for_build.add(str(mesh_job['id']))
                continue
            jobs = self.incident_openqa_jobs(s)
            # take the project comment as marker for not posting jobs
            if not len(jobs) and comment_build != str(
                    mesh_job['openqa_build']):
                if self.dryrun:
                    self.logger.info('WOULD POST:{}'.format(
                        pformat(json.dumps(s, sort_keys=True))))
                else:
                    self.logger.info("Posted: {}".format(
                        pformat(json.dumps(s, sort_keys=True))))
                    self.openqa.openqa_request('POST',
                                               'isos',
                                               data=s,
                                               retries=1)
                    openqa_jobs += self.incident_openqa_jobs(s)
            else:
                self.logger.info("{} got {}".format(pformat(s), len(jobs)))
                openqa_jobs += jobs

        self.openqa_jobs[incident_project] = openqa_jobs

        if len(openqa_jobs) == 0:
            self.logger.debug("No openqa jobs defined")
            return
        # print openqa_jobs
        msg = self.summarize_openqa_jobs(openqa_jobs)
        state = 'seen'
        result = 'none'
        qa_status = self.calculate_qa_status(openqa_jobs)
        if qa_status == QA_PASSED:
            result = 'accepted'
            state = 'done'
        if qa_status == QA_FAILED:
            result = 'declined'
            state = 'done'
        self.comment_write(
            project=str(incident_project),
            state=state,
            result=result,
            message=msg,
            info_extra={'revision': str(mesh_job.get('openqa_build'))})
예제 #17
0
class ReviewBot(object):
    """
    A generic obs request reviewer
    Inherit from this class and implement check functions for each action type:

    def check_action_<type>(self, req, action):
        return (None|True|False)
    """

    DEFAULT_REVIEW_MESSAGES = {'accepted': 'ok', 'declined': 'review failed'}
    REVIEW_CHOICES = ('normal', 'no', 'accept', 'accept-onpass',
                      'fallback-onfail', 'fallback-always')

    COMMENT_MARKER_REGEX = re.compile(
        r'<!-- (?P<bot>[^ ]+) state=(?P<state>[^ ]+)(?: result=(?P<result>[^ ]+))? -->'
    )

    # map of default config entries
    config_defaults = {
        # list of tuples (prefix, apiurl, submitrequestprefix)
        # set this if the obs instance maps another instance into it's
        # namespace
        'project_namespace_api_map': [
            ('openSUSE.org:', 'https://api.opensuse.org', 'obsrq'),
        ],
    }

    def __init__(self,
                 apiurl=None,
                 dryrun=False,
                 logger=None,
                 user=None,
                 group=None):
        self.apiurl = apiurl
        self.ibs = apiurl.startswith('https://api.suse.de')
        self.dryrun = dryrun
        self.logger = logger
        self.review_user = user
        self.review_group = group
        self.requests = []
        self.review_messages = ReviewBot.DEFAULT_REVIEW_MESSAGES
        self._review_mode = 'normal'
        self.fallback_user = None
        self.fallback_group = None
        self.comment_api = CommentAPI(self.apiurl)
        self.bot_name = self.__class__.__name__
        self.only_one_action = False
        self.request_default_return = None
        self.comment_handler = False
        self.override_allow = True
        self.override_group_key = '{}-override-group'.format(
            self.bot_name.lower())
        self.request_age_min_default = 0
        self.request_age_min_key = '{}-request-age-min'.format(
            self.bot_name.lower())
        self.lookup = PackageLookup(self.apiurl)

        self.load_config()

    def _load_config(self, handle=None):
        d = self.__class__.config_defaults
        y = yaml.safe_load(handle) if handle is not None else {}
        return namedtuple('BotConfig', sorted(
            d.keys()))(*[y.get(p, d[p]) for p in sorted(d.keys())])

    def load_config(self, filename=None):
        if filename:
            with open(filename, 'r') as fh:
                self.config = self._load_config(fh)
        else:
            self.config = self._load_config()

    def has_staging(self, project):
        try:
            url = osc.core.makeurl(self.apiurl,
                                   ('staging', project, 'staging_projects'))
            osc.core.http_GET(url)
            return True
        except HTTPError as e:
            if e.code != 404:
                self.logger.error('ERROR in URL %s [%s]' % (url, e))
                raise
            pass
        return False

    def staging_api(self, project):
        # Allow for the Staging subproject to be passed directly from config
        # which should be stripped before initializing StagingAPI. This allows
        # for NonFree subproject to utilize StagingAPI for main project.
        if project.endswith(':Staging'):
            project = project[:-8]

        if project not in self.staging_apis:
            Config.get(self.apiurl, project)
            self.staging_apis[project] = StagingAPI(self.apiurl, project)

        return self.staging_apis[project]

    @property
    def review_mode(self):
        return self._review_mode

    @review_mode.setter
    def review_mode(self, value):
        if value not in self.REVIEW_CHOICES:
            raise Exception("invalid review option: %s" % value)
        self._review_mode = value

    def set_request_ids(self, ids):
        for rqid in ids:
            u = osc.core.makeurl(self.apiurl, ['request', rqid],
                                 {'withfullhistory': '1'})
            r = osc.core.http_GET(u)
            root = ET.parse(r).getroot()
            req = osc.core.Request()
            req.read(root)
            self.requests.append(req)

    # function called before requests are reviewed
    def prepare_review(self):
        pass

    def check_requests(self):
        self.staging_apis = {}

        # give implementations a chance to do something before single requests
        self.prepare_review()
        return_value = 0

        for req in self.requests:
            self.logger.info("checking %s" % req.reqid)
            self.request = req
            with sentry_sdk.configure_scope() as scope:
                scope.set_extra('request.id', self.request.reqid)

            # XXX: this is a hack. Annotating the request with staging_project.
            # OBS itself should provide an API for that but that's currently not the case
            # https://github.com/openSUSE/openSUSE-release-tools/pull/2377
            if not hasattr(req, 'staging_project'):
                staging_project = None
                for r in req.reviews:
                    if r.state == 'new' and r.by_project and ":Staging:" in r.by_project:
                        staging_project = r.by_project
                        break
                setattr(req, 'staging_project', staging_project)

            try:
                good = self.check_one_request(req)
            except Exception as e:
                good = None

                import traceback
                traceback.print_exc()
                return_value = 1

                sentry_sdk.capture_exception(e)

            if self.review_mode == 'no':
                good = None
            elif self.review_mode == 'accept':
                good = True

            if good is None:
                self.logger.info("%s ignored" % req.reqid)
            elif good:
                self._set_review(req, 'accepted')
            elif self.review_mode != 'accept-onpass':
                self._set_review(req, 'declined')

        return return_value

    @memoize(session=True)
    def request_override_check_users(self, project):
        """Determine users allowed to override review in a comment command."""
        config = Config.get(self.apiurl, project)

        users = []
        group = config.get('staging-group')
        if group:
            users += group_members(self.apiurl, group)

        if self.override_group_key:
            override_group = config.get(self.override_group_key)
            if override_group:
                users += group_members(self.apiurl, override_group)

        return users

    def request_override_check(self, force=False):
        """Check for a comment command requesting review override."""
        if not force and not self.override_allow:
            return None

        for args, who in self.request_commands('override'):
            message = 'overridden by {}'.format(who)
            override = args[1] if len(args) >= 2 else 'accept'
            if override == 'accept':
                self.review_messages['accepted'] = message
                return True

            if override == 'decline':
                self.review_messages['declined'] = message
                return False

    def request_commands(self,
                         command,
                         who_allowed=None,
                         request=None,
                         action=None,
                         include_description=True):
        if not request:
            request = self.request
        if not action:
            action = self.action
        if not who_allowed:
            who_allowed = self.request_override_check_users(action.tgt_project)

        comments = self.comment_api.get_comments(request_id=request.reqid)
        if include_description:
            request_comment = self.comment_api.request_as_comment_dict(request)
            comments[request_comment['id']] = request_comment

        yield from self.comment_api.command_find(comments, self.review_user,
                                                 command, who_allowed)

    def _set_review(self, req, state):
        doit = self.can_accept_review(req.reqid)
        if doit is None:
            self.logger.info(
                "can't change state, %s does not have the reviewer" %
                (req.reqid))

        newstate = state

        by_user = self.fallback_user
        by_group = self.fallback_group

        msg = self.review_messages[
            state] if state in self.review_messages else state
        self.logger.info("%s %s: %s" % (req.reqid, state, msg))

        if state == 'declined':
            if self.review_mode == 'fallback-onfail':
                self.logger.info("%s needs fallback reviewer" % req.reqid)
                self.add_review(
                    req,
                    by_group=by_group,
                    by_user=by_user,
                    msg="Automated review failed. Needs fallback reviewer.")
                newstate = 'accepted'
        elif self.review_mode == 'fallback-always':
            self.add_review(req,
                            by_group=by_group,
                            by_user=by_user,
                            msg='Adding fallback reviewer')

        if doit == True:
            self.logger.debug("setting %s to %s" % (req.reqid, state))
            if not self.dryrun:
                try:
                    osc.core.change_review_state(apiurl=self.apiurl,
                                                 reqid=req.reqid,
                                                 newstate=newstate,
                                                 by_group=self.review_group,
                                                 by_user=self.review_user,
                                                 message=msg)
                except HTTPError as e:
                    if e.code != 403:
                        raise e
                    self.logger.info(
                        'unable to change review state (likely superseded or revoked)'
                    )
        else:
            self.logger.debug("%s review not changed" % (req.reqid))

    # allow_duplicate=True should only be used if it makes sense to force a
    # re-review in a scenario where the bot adding the review will rerun.
    # Normally a declined review will automatically be reopened along with the
    # request and any other bot reviews already added will not be touched unless
    # the issuing bot is rerun which does not fit normal workflow.
    def add_review(self,
                   req,
                   by_group=None,
                   by_user=None,
                   by_project=None,
                   by_package=None,
                   msg=None,
                   allow_duplicate=False):
        query = {'cmd': 'addreview'}
        if by_group:
            query['by_group'] = by_group
        elif by_user:
            query['by_user'] = by_user
        elif by_project:
            query['by_project'] = by_project
            if by_package:
                query['by_package'] = by_package
        else:
            raise osc.oscerr.WrongArgs("missing by_*")

        for r in req.reviews:
            if (r.by_group == by_group and r.by_project == by_project
                    and r.by_package == by_package and r.by_user == by_user and
                    # Only duplicate when allow_duplicate and state != new.
                (not allow_duplicate or r.state == 'new')):
                del query['cmd']
                self.logger.debug(
                    'skipped adding duplicate review for {}'.format('/'.join(
                        query.values())))
                return

        u = osc.core.makeurl(self.apiurl, ['request', req.reqid], query)
        if self.dryrun:
            self.logger.info('POST %s' % u)
            return

        if self.multiple_actions:
            key = request_action_key(self.action)
            msg = yaml.dump({key: msg}, default_flow_style=False)

        try:
            r = osc.core.http_POST(u, data=msg)
        except HTTPError as e:
            if e.code != 403:
                raise e
            del query['cmd']
            self.logger.info('unable to add review {} with message: {}'.format(
                query, msg))
            return

        code = ET.parse(r).getroot().attrib['code']
        if code != 'ok':
            raise Exception('non-ok return code: {}'.format(code))

    def devel_project_review_add(self,
                                 request,
                                 project,
                                 package,
                                 message='adding devel project review'):
        devel_project, devel_package = devel_project_fallback(
            self.apiurl, project, package)
        if not devel_project:
            self.logger.warning('no devel project found for {}/{}'.format(
                project, package))
            return False

        self.add_review(request,
                        by_project=devel_project,
                        by_package=devel_package,
                        msg=message)

        return True

    def devel_project_review_ensure(self,
                                    request,
                                    project,
                                    package,
                                    message='submitter not devel maintainer'):
        if not self.devel_project_review_needed(request, project, package):
            self.logger.debug('devel project review not needed')
            return True

        return self.devel_project_review_add(request, project, package,
                                             message)

    def devel_project_review_needed(self, request, project, package):
        author = request.get_creator()
        maintainers = set(maintainers_get(self.apiurl, project, package))

        if author in maintainers:
            return False

        # Carried over from maintbot, but seems haphazard.
        for review in request.reviews:
            if review.by_user in maintainers:
                return False

        return True

    def check_one_request(self, req):
        """
        check all actions in one request.

        calls helper functions for each action type

        return None if nothing to do, True to accept, False to reject
        """

        if len(req.actions) > 1:
            if self.only_one_action:
                self.review_messages[
                    'declined'] = 'Only one action per request supported'
                return False

            # Will cause added reviews and overall review message to include
            # each actions message prefixed by an action key.
            self.multiple_actions = True
            review_messages_multi = {}
        else:
            self.multiple_actions = False

            # Copy original values to revert changes made to them.
            self.review_messages = self.DEFAULT_REVIEW_MESSAGES.copy()

        if self.comment_handler is not False:
            self.comment_handler_add()

        overall = True
        for a in req.actions:
            if self.multiple_actions:
                self.review_messages = self.DEFAULT_REVIEW_MESSAGES.copy()

            # Store in-case sub-classes need direct access to original values.
            self.action = a
            key = request_action_key(a)
            with sentry_sdk.configure_scope() as scope:
                scope.set_extra('action.key', key)

            override = self.request_override_check()
            if override is not None:
                ret = override
            else:
                func = getattr(self, self.action_method(a))
                ret = func(req, a)

            # In the case of multiple actions take the "lowest" result where the
            # order from lowest to highest is: False, None, True.
            if overall is not False:
                if ((overall is True and ret is not True)
                        or (overall is None and ret is False)):
                    overall = ret

            if self.multiple_actions and ret is not None:
                message_key = self.review_message_key(ret)
                review_messages_multi[key] = self.review_messages[message_key]

        message_key = self.review_message_key(overall)
        if self.multiple_actions:
            message_combined = yaml.dump(review_messages_multi,
                                         default_flow_style=False)
            self.review_messages[message_key] = message_combined
        elif type(self.review_messages[message_key]) is dict:
            self.review_messages[message_key] = yaml.dump(
                self.review_messages[message_key], default_flow_style=False)

        return overall

    def action_method(self, action):
        method_prefix = 'check_action'
        method_type = action.type
        method_suffix = None

        if method_type == 'delete':
            method_suffix = 'project'
            if action.tgt_package is not None:
                method_suffix = 'package'
            elif action.tgt_repository is not None:
                method_suffix = 'repository'

        if method_suffix:
            method = '_'.join([method_prefix, method_type, method_suffix])
            if hasattr(self, method):
                return method

        method = '_'.join([method_prefix, method_type])
        if hasattr(self, method):
            return method

        method_type = '_default'
        return '_'.join([method_prefix, method_type])

    def review_message_key(self, result):
        return 'accepted' if result else 'declined'

    def check_action_maintenance_incident(self, req, a):
        if action_is_patchinfo(a):
            self.logger.debug('ignoring patchinfo action')
            return True

        # Duplicate src_package as tgt_package since prior to assignment to a
        # specific incident project there is no target package (odd API). After
        # assignment it is still assumed the target will match the source. Since
        # the ultimate goal is the tgt_releaseproject the incident is treated
        # similar to staging in that the intermediate result is not the final
        # and thus the true target project (ex. openSUSE:Maintenance) is not
        # used for check_source_submission().
        tgt_package = a.src_package
        if a.tgt_releaseproject is not None:
            suffix = '.' + a.tgt_releaseproject.replace(':', '_')
            if tgt_package.endswith(suffix):
                tgt_package = tgt_package[:-len(suffix)]

        # Note tgt_releaseproject (product) instead of tgt_project (maintenance).
        return self.check_source_submission(a.src_project, a.src_package,
                                            a.src_rev, a.tgt_releaseproject,
                                            tgt_package)

    def check_action_maintenance_release(self, req, a):
        pkgname = a.src_package
        if action_is_patchinfo(a):
            self.logger.debug('ignoring patchinfo action')
            return True

        linkpkg = self._get_linktarget_self(a.src_project, pkgname)
        if linkpkg is not None:
            pkgname = linkpkg
        # packages in maintenance have links to the target. Use that
        # to find the real package name
        (linkprj, linkpkg) = self._get_linktarget(a.src_project, pkgname)
        if linkpkg is None or linkprj is None or linkprj != a.tgt_project:
            self.logger.warning("%s/%s is not a link to %s" %
                                (a.src_project, pkgname, a.tgt_project))
            return self.check_source_submission(a.src_project, a.src_package,
                                                a.src_rev, a.tgt_project,
                                                a.tgt_package)
        else:
            pkgname = linkpkg
        return self.check_source_submission(a.src_project, a.src_package, None,
                                            a.tgt_project, pkgname)

    def check_action_submit(self, req, a):
        return self.check_source_submission(a.src_project, a.src_package,
                                            a.src_rev, a.tgt_project,
                                            a.tgt_package)

    def check_action__default(self, req, a):
        # Disable any comment handler to avoid making a comment even if
        # comment_write() is called by another bot wrapping __default().
        self.comment_handler_remove()

        message = 'unhandled request type {}'.format(a.type)
        self.logger.info(message)
        self.review_messages['accepted'] += ': ' + message
        return self.request_default_return

    def check_source_submission(self, src_project, src_package, src_rev,
                                target_project, target_package):
        """ default implemention does nothing """
        self.logger.info("%s/%s@%s -> %s/%s" %
                         (src_project, src_package, src_rev, target_project,
                          target_package))
        return None

    @staticmethod
    @memoize(session=True)
    def _get_sourceinfo(apiurl, project, package, rev=None):
        query = {'view': 'info'}
        if rev is not None:
            query['rev'] = rev
        url = osc.core.makeurl(apiurl, ('source', project, package),
                               query=query)
        try:
            return ET.parse(osc.core.http_GET(url)).getroot()
        except (HTTPError, URLError):
            return None

    def get_originproject(self, project, package, rev=None):
        root = ReviewBot._get_sourceinfo(self.apiurl, project, package, rev)
        if root is None:
            return None

        originproject = root.find('originproject')
        if originproject is not None:
            return originproject.text

        return None

    def get_sourceinfo(self, project, package, rev=None):
        root = ReviewBot._get_sourceinfo(self.apiurl, project, package, rev)
        if root is None:
            return None

        props = ('package', 'rev', 'vrev', 'srcmd5', 'lsrcmd5', 'verifymd5')
        return namedtuple('SourceInfo', props)(*[root.get(p) for p in props])

    # TODO: what if there is more than _link?
    def _get_linktarget_self(self, src_project, src_package):
        """ if it's a link to a package in the same project return the name of the package"""
        prj, pkg = self._get_linktarget(src_project, src_package)
        if prj is None or prj == src_project:
            return pkg

    def _get_linktarget(self, src_project, src_package):

        query = {}
        url = osc.core.makeurl(self.apiurl,
                               ('source', src_project, src_package),
                               query=query)
        try:
            root = ET.parse(osc.core.http_GET(url)).getroot()
        except HTTPError:
            return (None, None)

        if root is not None:
            linkinfo = root.find("linkinfo")
            if linkinfo is not None:
                return (linkinfo.get('project'), linkinfo.get('package'))

        return (None, None)

    def _has_open_review_by(self, root, by_what, reviewer):
        states = set([
            review.get('state') for review in root.findall('review')
            if review.get(by_what) == reviewer
        ])
        if not states:
            return None
        elif 'new' in states:
            return True
        return False

    def can_accept_review(self, request_id):
        """return True if there is a new review for the specified reviewer"""
        states = set()
        url = osc.core.makeurl(self.apiurl, ('request', str(request_id)))
        try:
            root = ET.parse(osc.core.http_GET(url)).getroot()
            if self.review_user and self._has_open_review_by(
                    root, 'by_user', self.review_user):
                return True
            if self.review_group and self._has_open_review_by(
                    root, 'by_group', self.review_group):
                return True
        except HTTPError as e:
            print('ERROR in URL %s [%s]' % (url, e))
        return False

    def set_request_ids_search_review(self):
        review = None
        if self.review_user:
            review = "@by_user='******' and @state='new'" % self.review_user
        if self.review_group:
            review = osc.core.xpath_join(
                review, "@by_group='%s' and @state='new'" % self.review_group)
        url = osc.core.makeurl(
            self.apiurl, ('search', 'request'), {
                'match': "state/@name='review' and review[%s]" % review,
                'withfullhistory': 1
            })
        root = ET.parse(osc.core.http_GET(url)).getroot()

        self.requests = []

        for request in root.findall('request'):
            req = osc.core.Request()
            req.read(request)
            self.requests.append(req)

    # also used by openqabot
    def ids_project(self, project, typename):
        url = osc.core.makeurl(
            self.apiurl, ('search', 'request'), {
                'match':
                "(state/@name='review' or state/@name='new') and (action/target/@project='%s' and action/@type='%s')"
                % (project, typename),
                'withfullhistory':
                1
            })
        root = ET.parse(osc.core.http_GET(url)).getroot()

        ret = []

        for request in root.findall('request'):
            req = osc.core.Request()
            req.read(request)
            ret.append(req)
        return ret

    def set_request_ids_project(self, project, typename):
        self.requests = self.ids_project(project, typename)

    def comment_handler_add(self, level=logging.INFO):
        """Add handler to start recording log messages for comment."""
        self.comment_handler = CommentFromLogHandler(level)
        self.logger.addHandler(self.comment_handler)

    def comment_handler_remove(self):
        self.logger.removeHandler(self.comment_handler)

    def comment_handler_lines_deduplicate(self):
        self.comment_handler.lines = list(
            OrderedDict.fromkeys(self.comment_handler.lines))

    def comment_write(self,
                      state='done',
                      result=None,
                      project=None,
                      package=None,
                      request=None,
                      message=None,
                      identical=False,
                      only_replace=False,
                      info_extra=None,
                      info_extra_identical=True,
                      bot_name_suffix=None):
        """Write comment if not similar to previous comment and replace old one.

        The state, result, and info_extra (dict) are combined to create the info
        that is passed to CommentAPI methods for creating a marker and finding
        previous comments. self.bot_name, which defaults to class, will be used
        as the primary matching key. When info_extra_identical is set to False
        info_extra will not be included when finding previous comments to
        compare message against.

        A comment from the same bot will be replaced when a new comment is
        written. The only_replace flag will restrict to only writing a comment
        if a prior one is being replaced. This can be useful for writing a final
        comment that indicates a change from previous uncompleted state, but
        only makes sense to post if a prior comment was posted.

        The project, package, and request variables control where the comment is
        placed. If no value is given the default is the request being reviewed.

        If no message is provided the content will be extracted from
        self.comment_handler.line which is provided by CommentFromLogHandler. To
        use this call comment_handler_add() at the point which messages should
        start being collected. Alternatively the self.comment_handler setting
        may be set to True to automatically set one on each request.

        The previous comment body line count is compared to see if too similar
        to bother posting another comment which is useful for avoiding
        re-posting comments that contain irrelevant minor changes. To force an
        exact match use the identical flag to replace any non-identical
        comment body.
        """
        if project:
            kwargs = {'project_name': project}
            if package:
                kwargs['package_name'] = package
        else:
            if request is None:
                request = self.request
            kwargs = {'request_id': request.reqid}
        debug_key = '/'.join(kwargs.values())

        if message is None:
            if not len(self.comment_handler.lines):
                self.logger.debug(
                    'skipping empty comment for {}'.format(debug_key))
                return
            message = '\n\n'.join(self.comment_handler.lines)

        bot_name = self.bot_name
        if bot_name_suffix:
            bot_name = '::'.join([bot_name, bot_name_suffix])

        info = {'state': state, 'result': result}
        if info_extra and info_extra_identical:
            info.update(info_extra)

        comments = self.comment_api.get_comments(**kwargs)
        comment, _ = self.comment_api.comment_find(comments, bot_name, info)

        if info_extra and not info_extra_identical:
            # Add info_extra once comment has already been matched.
            info.update(info_extra)

        message = self.comment_api.add_marker(message, bot_name, info)
        message = self.comment_api.truncate(message.strip())

        if (comment is not None and
            ((
                identical and
                # Remove marker from comments since handled during comment_find().
                self.comment_api.remove_marker(comment['comment'])
                == self.comment_api.remove_marker(message)) or
             (not identical
              and comment['comment'].count('\n') == message.count('\n')))):
            # Assume same state/result and number of lines in message is duplicate.
            self.logger.debug(
                'previous comment too similar on {}'.format(debug_key))
            return

        if comment is None:
            self.logger.debug(
                'broadening search to include any state on {}'.format(
                    debug_key))
            comment, _ = self.comment_api.comment_find(comments, bot_name)
        if comment is not None:
            self.logger.debug(
                'removing previous comment on {}'.format(debug_key))
            if not self.dryrun:
                self.comment_api.delete(comment['id'])
        elif only_replace:
            self.logger.debug(
                'no previous comment to replace on {}'.format(debug_key))
            return

        self.logger.debug('adding comment to {}: {}'.format(
            debug_key, message))
        if not self.dryrun:
            self.comment_api.add_comment(comment=message, **kwargs)

        self.comment_handler_remove()

    def _check_matching_srcmd5(self, project, package, rev, history_limit=5):
        """check if factory sources contain the package and revision. check head and history"""
        self.logger.debug("checking %s in %s" % (package, project))
        try:
            osc.core.show_package_meta(self.apiurl, project, package)
        except (HTTPError, URLError):
            self.logger.debug("new package")
            return None

        si = self.get_sourceinfo(project, package)
        if rev == si.verifymd5:
            self.logger.debug("srcmd5 matches")
            return True

        if history_limit:
            self.logger.debug("%s not the latest version, checking history",
                              rev)
            u = osc.core.makeurl(self.apiurl,
                                 ['source', project, package, '_history'],
                                 {'limit': history_limit})
            try:
                r = osc.core.http_GET(u)
            except HTTPError as e:
                self.logger.debug("package has no history!?")
                return None

            root = ET.parse(r).getroot()
            # we need this complicated construct as obs doesn't honor
            # the 'limit' parameter use above for obs interconnect:
            # https://github.com/openSUSE/open-build-service/issues/2545
            for revision, i in zip(reversed(root.findall('revision')),
                                   count()):
                node = revision.find('srcmd5')
                if node is None:
                    continue
                self.logger.debug("checking %s" % node.text)
                if node.text == rev:
                    self.logger.debug("got it, rev %s" % revision.get('rev'))
                    return True
                if i == history_limit:
                    break

            self.logger.debug("srcmd5 not found in history either")

        return False

    def request_age_wait(self,
                         age_min=None,
                         request=None,
                         target_project=None):
        if not request:
            request = self.request

        if not target_project:
            target_project = self.action.tgt_project

        if age_min is None or isinstance(age_min, str):
            key = self.request_age_min_key if age_min is None else age_min
            age_min = int(
                Config.get(self.apiurl,
                           target_project).get(key,
                                               self.request_age_min_default))

        age = request_age(request).total_seconds()
        if age < age_min:
            self.logger.info(
                'skipping {} of age {:.2f}s since it is younger than {}s'.
                format(request.reqid, age, age_min))
            return True

        return False
예제 #18
0
class ABIChecker(ReviewBot.ReviewBot):
    """ check ABI of library packages
    """
    def __init__(self, *args, **kwargs):
        ReviewBot.ReviewBot.__init__(self, *args, **kwargs)

        self.no_review = False
        self.force = False

        self.ts = rpm.TransactionSet()
        self.ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)

        # reports of source submission
        self.reports = []
        # textual report summary for use in accept/decline message
        # or comments
        self.text_summary = ''

        self.session = DB.db_session()

        self.dblogger = LogToDB(self.session)

        self.logger.addFilter(self.dblogger)

        self.commentapi = CommentAPI(self.apiurl)

        self.current_request = None

    def check_source_submission(self, src_project, src_package, src_rev,
                                dst_project, dst_package):

        # happens for maintenance incidents
        if dst_project == None and src_package == 'patchinfo':
            return None

        if dst_project in PROJECT_BLACKLIST:
            self.logger.info(PROJECT_BLACKLIST[dst_project])
            #            self.text_summary += PROJECT_BLACKLIST[dst_project] + "\n"
            return True

        # default is to accept the review, just leave a note if
        # there were problems.
        ret = True

        if self.has_staging(dst_project):
            # if staged we don't look at the request source but what
            # is in staging
            if self.current_request.staging_project:
                src_project = self.current_request.staging_project
                src_package = dst_package
                src_rev = None
            else:
                self.logger.debug("request not staged yet")
                return None

        ReviewBot.ReviewBot.check_source_submission(self, src_project,
                                                    src_package, src_rev,
                                                    dst_project, dst_package)

        report = Report(src_project, src_package, src_rev, dst_project,
                        dst_package, [], None)

        dst_srcinfo = self.get_sourceinfo(dst_project, dst_package)
        self.logger.debug('dest sourceinfo %s', pformat(dst_srcinfo))
        if dst_srcinfo is None:
            msg = "%s/%s seems to be a new package, no need to review" % (
                dst_project, dst_package)
            self.logger.info(msg)
            self.reports.append(report)
            return True
        src_srcinfo = self.get_sourceinfo(src_project, src_package, src_rev)
        self.logger.debug('src sourceinfo %s', pformat(src_srcinfo))
        if src_srcinfo is None:
            msg = "%s/%s@%s does not exist!? can't check" % (
                src_project, src_package, src_rev)
            self.logger.error(msg)
            self.text_summary += msg + "\n"
            self.reports.append(report)
            return False

        if os.path.exists(UNPACKDIR):
            shutil.rmtree(UNPACKDIR)

        try:
            # compute list of common repos to find out what to compare
            myrepos = self.findrepos(src_project, src_srcinfo, dst_project,
                                     dst_srcinfo)
        except NoBuildSuccess as e:
            self.logger.info(e)
            self.text_summary += "**Error**: %s\n" % e
            self.reports.append(report)
            return False
        except NotReadyYet as e:
            self.logger.info(e)
            self.reports.append(report)
            return None
        except SourceBroken as e:
            self.logger.error(e)
            self.text_summary += "**Error**: %s\n" % e
            self.reports.append(report)
            return False

        if not myrepos:
            self.text_summary += "**Error**: %s does not build against %s, can't check library ABIs\n\n" % (
                src_project, dst_project)
            self.logger.info("no matching repos, can't compare")
            self.reports.append(report)
            return False

        # *** beware of nasty maintenance stuff ***
        # if the destination is a maintained project we need to
        # mangle our comparison target and the repo mapping
        try:
            originproject, originpackage, origin_srcinfo, new_repo_map = self._maintenance_hack(
                dst_project, dst_srcinfo, myrepos)
            if originproject is not None:
                dst_project = originproject
            if originpackage is not None:
                dst_package = originpackage
            if origin_srcinfo is not None:
                dst_srcinfo = origin_srcinfo
            if new_repo_map is not None:
                myrepos = new_repo_map
        except MaintenanceError as e:
            self.text_summary += "**Error**: %s\n\n" % e
            self.logger.error('%s', e)
            self.reports.append(report)
            return False
        except NoBuildSuccess as e:
            self.logger.info(e)
            self.text_summary += "**Error**: %s\n" % e
            self.reports.append(report)
            return False
        except NotReadyYet as e:
            self.logger.info(e)
            self.reports.append(report)
            return None
        except SourceBroken as e:
            self.logger.error(e)
            self.text_summary += "**Error**: %s\n" % e
            self.reports.append(report)
            return False

        notes = []
        libresults = []

        overall = None

        missing_debuginfo = []

        for mr in myrepos:
            try:
                dst_libs, dst_libdebug = self.extract(dst_project, dst_package,
                                                      dst_srcinfo, mr.dstrepo,
                                                      mr.arch)
                # nothing to fetch, so no libs
                if dst_libs is None:
                    continue
            except DistUrlMismatch as e:
                self.logger.error(
                    "%s/%s %s/%s: %s" %
                    (dst_project, dst_package, mr.dstrepo, mr.arch, e))
                if ret == True:  # need to check again
                    ret = None
                continue
            except MissingDebugInfo as e:
                missing_debuginfo.append(str(e))
                ret = False
                continue
            except FetchError as e:
                self.logger.error(e)
                if ret == True:  # need to check again
                    ret = None
                continue

            try:
                src_libs, src_libdebug = self.extract(src_project, src_package,
                                                      src_srcinfo, mr.srcrepo,
                                                      mr.arch)
                if src_libs is None:
                    if dst_libs:
                        self.text_summary += "*Warning*: the submission does not contain any libs anymore\n\n"
                    continue
            except DistUrlMismatch as e:
                self.logger.error(
                    "%s/%s %s/%s: %s" %
                    (src_project, src_package, mr.srcrepo, mr.arch, e))
                if ret == True:  # need to check again
                    ret = None
                continue
            except MissingDebugInfo as e:
                missing_debuginfo.append(str(e))
                ret = False
                continue
            except FetchError as e:
                self.logger.error(e)
                if ret == True:  # need to check again
                    ret = None
                continue

            # create reverse index for aliases in the source project
            src_aliases = dict()
            for lib in src_libs.keys():
                for a in src_libs[lib]:
                    src_aliases.setdefault(a, set()).add(lib)

            # for each library in the destination project check if the same lib
            # exists in the source project. If not check the aliases (symlinks)
            # to catch soname changes. Generate pairs of matching libraries.
            pairs = set()
            for lib in dst_libs.keys():
                if lib in src_libs:
                    pairs.add((lib, lib))
                else:
                    self.logger.debug(
                        "%s not found in submission, checking aliases", lib)
                    found = False
                    for a in dst_libs[lib]:
                        if a in src_aliases:
                            for l in src_aliases[a]:
                                pairs.add((lib, l))
                                found = True
                    if found == False:
                        self.text_summary += "*Warning*: %s no longer packaged\n\n" % lib

            self.logger.debug("to diff: %s", pformat(pairs))

            # for each pair dump and compare the abi
            for old, new in pairs:
                # abi dump of old lib
                old_base = os.path.join(UNPACKDIR, dst_project, dst_package,
                                        mr.dstrepo, mr.arch)
                old_dump = os.path.join(CACHEDIR, 'old.dump')
                # abi dump of new lib
                new_base = os.path.join(UNPACKDIR, src_project, src_package,
                                        mr.srcrepo, mr.arch)
                new_dump = os.path.join(CACHEDIR, 'new.dump')

                def cleanup():
                    if os.path.exists(old_dump):
                        os.unlink(old_dump)
                    if os.path.exists(new_dump):
                        os.unlink(new_dump)

                cleanup()

                # we just need that to pass a name to abi checker
                m = so_re.match(old)
                htmlreport = 'report-%s-%s-%s-%s-%s-%08x.html' % (
                    mr.srcrepo, os.path.basename(old), mr.dstrepo,
                    os.path.basename(new), mr.arch, int(time.time()))

                # run abichecker
                if m \
                    and self.run_abi_dumper(old_dump, old_base, old, dst_libdebug[old]) \
                    and self.run_abi_dumper(new_dump, new_base, new, src_libdebug[new]):
                    reportfn = os.path.join(CACHEDIR, htmlreport)
                    r = self.run_abi_checker(m.group(1), old_dump, new_dump,
                                             reportfn)
                    if r is not None:
                        self.logger.debug('report saved to %s, compatible: %d',
                                          reportfn, r)
                        libresults.append(
                            LibResult(mr.srcrepo,
                                      os.path.basename(old), mr.dstrepo,
                                      os.path.basename(new), mr.arch,
                                      htmlreport, r))
                        if overall is None:
                            overall = r
                        elif overall == True and r == False:
                            overall = r
                else:
                    self.logger.error('failed to compare %s <> %s' %
                                      (old, new))
                    self.text_summary += "**Error**: ABI check failed on %s vs %s\n\n" % (
                        old, new)
                    if ret == True:  # need to check again
                        ret = None

                cleanup()

        if missing_debuginfo:
            self.text_summary += 'debug information is missing for the following packages, can\'t check:\n<pre>'
            self.text_summary += ''.join(missing_debuginfo)
            self.text_summary += '</pre>\nplease enable debug info in your project config.\n'

        self.reports.append(report._replace(result=overall,
                                            reports=libresults))

        # upload reports

        if os.path.exists(UNPACKDIR):
            shutil.rmtree(UNPACKDIR)

        return ret

    def _maintenance_hack(self, dst_project, dst_srcinfo, myrepos):
        pkg = dst_srcinfo.package
        originproject = None
        originpackage = None

        # find the maintenance project
        url = osc.core.makeurl(
            self.apiurl, ('search', 'project', 'id'),
            "match=(maintenance/maintains/@project='%s'+and+attribute/@name='%s')"
            % (dst_project, osc.conf.config['maintenance_attribute']))
        root = ET.parse(osc.core.http_GET(url)).getroot()
        if root is not None:
            node = root.find('project')
            if node is not None:
                # check if target project is a project link where the
                # sources don't actually build (like openSUSE:...:Update). That
                # is the case if no update was released yet.
                # XXX: TODO: do check for whether the package builds here first
                originproject = self.get_originproject(dst_project, pkg)
                if originproject is not None:
                    self.logger.debug("origin project %s", originproject)
                    url = osc.core.makeurl(self.apiurl,
                                           ('build', dst_project, '_result'),
                                           {'package': pkg})
                    root = ET.parse(osc.core.http_GET(url)).getroot()
                    alldisabled = True
                    for node in root.findall('status'):
                        if node.get('code') != 'disabled':
                            alldisabled = False
                    if alldisabled:
                        self.logger.debug(
                            "all repos disabled, using originproject %s" %
                            originproject)
                    else:
                        originproject = None
                else:
                    mproject = node.attrib['name']
                    # packages are only a link to packagename.incidentnr
                    (linkprj, linkpkg) = self._get_linktarget(dst_project, pkg)
                    if linkpkg is not None and linkprj == dst_project:
                        self.logger.debug("%s/%s links to %s" %
                                          (dst_project, pkg, linkpkg))
                        regex = re.compile(r'.*\.(\d+)$')
                        m = regex.match(linkpkg)
                        if m is None:
                            raise MaintenanceError(
                                "%s/%s -> %s/%s is not a proper maintenance link (must match /%s/)"
                                % (dst_project, pkg, linkprj, linkpkg,
                                   regex.pattern))
                        incident = m.group(1)
                        self.logger.debug("is maintenance incident %s" %
                                          incident)

                        originproject = "%s:%s" % (mproject, incident)
                        originpackage = pkg + '.' + dst_project.replace(
                            ':', '_')

                        origin_srcinfo = self.get_sourceinfo(
                            originproject, originpackage)
                        if origin_srcinfo is None:
                            raise MaintenanceError(
                                "%s/%s invalid" %
                                (originproject, originpackage))

                        # find the map of maintenance incident repos to destination repos
                        originrepos = self.findrepos(originproject,
                                                     origin_srcinfo,
                                                     dst_project, dst_srcinfo)
                        mapped = dict()
                        for mr in originrepos:
                            mapped[(mr.dstrepo, mr.arch)] = mr

                        self.logger.debug("mapping: %s", pformat(mapped))

                        # map the repos of the original request to the maintenance incident repos
                        matchrepos = set()
                        for mr in myrepos:
                            if not (mr.dstrepo, mr.arch) in mapped:
                                # sometimes a previously released maintenance
                                # update didn't cover all architectures. We can
                                # only ignore that then.
                                self.logger.warning(
                                    "couldn't find repo %s/%s in %s/%s" %
                                    (mr.dstrepo, mr.arch, originproject,
                                     originpackage))
                                continue
                            matchrepos.add(
                                MR(mr.srcrepo,
                                   mapped[(mr.dstrepo, mr.arch)].srcrepo,
                                   mr.arch))

                        myrepos = matchrepos
                        dst_srcinfo = origin_srcinfo
                        self.logger.debug("new repo map: %s", pformat(myrepos))

        return (originproject, originpackage, dst_srcinfo, myrepos)

    def find_abichecker_comment(self, req):
        """Return previous comments (should be one)."""
        comments = self.commentapi.get_comments(request_id=req.reqid)
        for c in comments.values():
            m = comment_marker_re.match(c['comment'])
            if m:
                return c['id'], m.group('state'), m.group('result')
        return None, None, None

    def check_one_request(self, req):

        self.review_messages = ReviewBot.ReviewBot.DEFAULT_REVIEW_MESSAGES

        if self.no_review and not self.force and self.check_request_already_done(
                req.reqid):
            self.logger.info("skip request %s which is already done",
                             req.reqid)
            # TODO: check if the request was seen before and we
            # didn't reach a final state for too long
            return None

        commentid, state, result = self.find_abichecker_comment(req)
        ## using comments instead of db would be an options for bots
        ## that use no db
        #        if self.no_review:
        #            if state == 'done':
        #                self.logger.debug("request %s already done, result: %s"%(req.reqid, result))
        #                return

        self.dblogger.request_id = req.reqid

        self.current_request = req

        self.reports = []
        self.text_summary = ''
        try:
            ret = ReviewBot.ReviewBot.check_one_request(self, req)
        except Exception as e:
            import traceback
            self.logger.error("unhandled exception in ABI checker")
            self.logger.error(traceback.format_exc())
            ret = None

        result = None
        if ret is not None:
            state = 'done'
            result = 'accepted' if ret else 'declined'
        else:
            # we probably don't want abichecker to spam here
            # FIXME don't delete comment in this case
            #if state is None and not self.text_summary:
            #    self.text_summary = 'abichecker will take a look later'
            state = 'seen'

        self.save_reports_to_db(req, state, result)
        if ret is not None and self.text_summary == '':
            # if for some reason save_reports_to_db didn't produce a
            # summary we add one
            self.text_summary = "ABI checker result: [%s](%s/request/%s)" % (
                result, WEB_URL, req.reqid)

        if commentid and not self.dryrun:
            self.commentapi.delete(commentid)

        self.post_comment(req, state, result)

        self.review_messages = {
            'accepted': self.text_summary,
            'declined': self.text_summary
        }

        if self.no_review:
            ret = None

        self.dblogger.request_id = None

        self.current_request = None

        return ret

    def check_request_already_done(self, reqid):
        try:
            request = self.session.query(
                DB.Request).filter(DB.Request.id == reqid).one()
            if request.state == 'done':
                return True
        except sqlalchemy.orm.exc.NoResultFound as e:
            pass

        return False

    def save_reports_to_db(self, req, state, result):
        try:
            request = self.session.query(
                DB.Request).filter(DB.Request.id == req.reqid).one()
            for i in self.session.query(DB.ABICheck).filter(
                    DB.ABICheck.request_id == request.id).all():
                # yeah, we could be smarter here and update existing reports instead
                self.session.delete(i)
            self.session.flush()
            request.state = state
            request.result = result
        except sqlalchemy.orm.exc.NoResultFound as e:
            request = DB.Request(
                id=req.reqid,
                state=state,
                result=result,
            )
            self.session.add(request)
        self.session.commit()
        for r in self.reports:
            abicheck = DB.ABICheck(request=request,
                                   src_project=r.src_project,
                                   src_package=r.src_package,
                                   src_rev=r.src_rev,
                                   dst_project=r.dst_project,
                                   dst_package=r.dst_package,
                                   result=r.result)
            self.session.add(abicheck)
            self.session.commit()
            if r.result is None:
                continue
            elif r.result:
                self.text_summary += "Good news from ABI check, "
                self.text_summary += "%s seems to be ABI [compatible](%s/request/%s):\n\n" % (
                    r.dst_package, WEB_URL, req.reqid)
            else:
                self.text_summary += "Warning: bad news from ABI check, "
                self.text_summary += "%s may be ABI [**INCOMPATIBLE**](%s/request/%s):\n\n" % (
                    r.dst_package, WEB_URL, req.reqid)
            for lr in r.reports:
                libreport = DB.LibReport(
                    abicheck=abicheck,
                    src_repo=lr.src_repo,
                    src_lib=lr.src_lib,
                    dst_repo=lr.dst_repo,
                    dst_lib=lr.dst_lib,
                    arch=lr.arch,
                    htmlreport=lr.htmlreport,
                    result=lr.result,
                )
                self.session.add(libreport)
                self.session.commit()
                self.text_summary += "* %s (%s): [%s](%s/report/%d)\n" % (
                    lr.dst_lib, lr.arch, "compatible" if lr.result else
                    "***INCOMPATIBLE***", WEB_URL, libreport.id)

        self.reports = []

    def post_comment(self, req, state, result):
        if not self.text_summary:
            return

        msg = "<!-- abichecker state=%s%s -->\n" % (state, ' result=%s' %
                                                    result if result else '')
        msg += self.text_summary

        self.logger.info("add comment: %s" % msg)
        if not self.dryrun:
            #self.commentapi.delete_from_where_user(self.review_user, request_id = req.reqid)
            self.commentapi.add_comment(request_id=req.reqid, comment=msg)

    def run_abi_checker(self, libname, old, new, output):
        cmd = [
            'abi-compliance-checker', '-lib', libname, '-old', old, '-new',
            new, '-report-path', output
        ]
        self.logger.debug(cmd)
        r = subprocess.Popen(cmd, close_fds=True, cwd=CACHEDIR).wait()
        if not r in (0, 1):
            self.logger.error('abi-compliance-checker failed')
            # XXX: record error
            return None
        return r == 0

    def run_abi_dumper(self, output, base, filename, debuglib):
        cmd = [
            'abi-dumper', '-o', output, '-lver',
            os.path.basename(filename), '/'.join([base, filename])
        ]
        cmd.append('/'.join([base, debuglib]))
        self.logger.debug(cmd)
        r = subprocess.Popen(cmd, close_fds=True, cwd=CACHEDIR).wait()
        if r != 0:
            self.logger.error("failed to dump %s!" % filename)
            # XXX: record error
            return False
        return True

    def extract(self, project, package, srcinfo, repo, arch):
        # fetch cpio headers
        # check file lists for library packages
        fetchlist, liblist, debuglist = self.compute_fetchlist(
            project, package, srcinfo, repo, arch)

        if not fetchlist:
            msg = "no libraries found in %s/%s %s/%s" % (project, package,
                                                         repo, arch)
            self.logger.info(msg)
            return None, None

        # mtimes in cpio are not the original ones, so we need to fetch
        # that separately :-(
        mtimes = self._getmtimes(project, package, repo, arch)

        self.logger.debug("fetchlist %s", pformat(fetchlist))
        self.logger.debug("liblist %s", pformat(liblist))
        self.logger.debug("debuglist %s", pformat(debuglist))

        debugfiles = debuglist.values()

        # fetch binary rpms
        downloaded = self.download_files(project, package, repo, arch,
                                         fetchlist, mtimes)

        # extract binary rpms
        tmpfile = os.path.join(CACHEDIR, "cpio")
        for fn in fetchlist:
            self.logger.debug("extract %s" % fn)
            with open(tmpfile, 'wb') as tmpfd:
                if not fn in downloaded:
                    raise FetchError("%s was not downloaded!" % fn)
                self.logger.debug(downloaded[fn])
                r = subprocess.call(['rpm2cpio', downloaded[fn]],
                                    stdout=tmpfd,
                                    close_fds=True)
                if r != 0:
                    raise FetchError("failed to extract %s!" % fn)
                tmpfd.close()
                os.unlink(downloaded[fn])
                cpio = CpioRead(tmpfile)
                cpio.read()
                for ch in cpio:
                    fn = ch.filename.decode('utf-8')
                    if fn.startswith('./'):  # rpm payload is relative
                        fn = fn[1:]
                    self.logger.debug("cpio fn %s", fn)
                    if not fn in liblist and not fn in debugfiles:
                        continue
                    dst = os.path.join(UNPACKDIR, project, package, repo, arch)
                    dst += fn
                    if not os.path.exists(os.path.dirname(dst)):
                        os.makedirs(os.path.dirname(dst))
                    self.logger.debug("dst %s", dst)
                    # the filehandle in the cpio archive is private so
                    # open it again
                    with open(tmpfile, 'rb') as cpiofh:
                        cpiofh.seek(ch.dataoff, os.SEEK_SET)
                        with open(dst, 'wb') as fh:
                            while True:
                                buf = cpiofh.read(4096)
                                if buf is None or buf == b'':
                                    break
                                fh.write(buf)
        os.unlink(tmpfile)

        return liblist, debuglist

    def download_files(self, project, package, repo, arch, filenames, mtimes):
        downloaded = dict()
        for fn in filenames:
            if not fn in mtimes:
                raise FetchError(
                    "missing mtime information for %s, can't check" % fn)
            repodir = os.path.join(DOWNLOADS, package, project, repo)
            if not os.path.exists(repodir):
                os.makedirs(repodir)
            t = os.path.join(repodir, fn)
            self._get_binary_file(project, repo, arch, package, fn, t,
                                  mtimes[fn])
            downloaded[fn] = t
        return downloaded

    def _get_binary_file(self, project, repository, arch, package, filename,
                         target, mtime):
        """Get a binary file from OBS."""
        # Used to cache, but dropped as part of python3 migration.
        osc.core.get_binary_file(self.apiurl,
                                 project,
                                 repository,
                                 arch,
                                 filename,
                                 package=package,
                                 target_filename=target)

    def readRpmHeaderFD(self, fd):
        h = None
        try:
            h = self.ts.hdrFromFdno(fd)
        except rpm.error as e:
            if str(e) == "public key not available":
                print(str(e))
            if str(e) == "public key not trusted":
                print(str(e))
            if str(e) == "error reading package header":
                print(str(e))
            h = None
        return h

    def _fetchcpioheaders(self, project, package, repo, arch):
        u = osc.core.makeurl(self.apiurl,
                             ['build', project, repo, arch, package],
                             ['view=cpioheaders'])
        try:
            r = osc.core.http_GET(u)
        except HTTPError as e:
            raise FetchError('failed to fetch header information: %s' % e)
        tmpfile = NamedTemporaryFile(prefix="cpio-", delete=False)
        for chunk in r:
            tmpfile.write(chunk)
        tmpfile.close()
        cpio = CpioRead(tmpfile.name)
        cpio.read()
        rpm_re = re.compile('(.+\.rpm)-[0-9A-Fa-f]{32}$')
        for ch in cpio:
            # ignore errors
            if ch.filename == '.errors':
                continue
            # the filehandle in the cpio archive is private so
            # open it again
            with open(tmpfile.name, 'rb') as fh:
                fh.seek(ch.dataoff, os.SEEK_SET)
                h = self.readRpmHeaderFD(fh)
                if h is None:
                    raise FetchError("failed to read rpm header for %s" %
                                     ch.filename)
                m = rpm_re.match(ch.filename.decode('utf-8'))
                if m:
                    yield m.group(1), h
        os.unlink(tmpfile.name)

    def _getmtimes(self, prj, pkg, repo, arch):
        """ returns a dict of filename: mtime """
        url = osc.core.makeurl(self.apiurl, ('build', prj, repo, arch, pkg))
        try:
            root = ET.parse(osc.core.http_GET(url)).getroot()
        except HTTPError:
            return None

        return dict([(node.attrib['filename'], node.attrib['mtime'])
                     for node in root.findall('binary')])

    # modified from repochecker
    def _last_build_success(self, src_project, tgt_project, src_package, rev):
        """Return the last build success XML document from OBS."""
        try:
            query = {
                'lastsuccess': 1,
                'package': src_package,
                'pathproject': tgt_project,
                'srcmd5': rev
            }
            url = osc.core.makeurl(self.apiurl,
                                   ('build', src_project, '_result'), query)
            return ET.parse(osc.core.http_GET(url)).getroot()
        except HTTPError as e:
            if e.code != 404:
                self.logger.error('ERROR in URL %s [%s]' % (url, e))
                raise
            pass
        return None

    def get_buildsuccess_repos(self, src_project, tgt_project, src_package,
                               rev):
        root = self._last_build_success(src_project, tgt_project, src_package,
                                        rev)
        if root is None:
            return None

        # build list of repos as set of (name, arch) tuples
        repos = set()
        for repo in root.findall('repository'):
            name = repo.attrib['name']
            for node in repo.findall('arch'):
                repos.add((name, node.attrib['arch']))

        self.logger.debug("success repos: %s", pformat(repos))

        return repos

    def get_dstrepos(self, project):
        url = osc.core.makeurl(self.apiurl, ('source', project, '_meta'))
        try:
            root = ET.parse(osc.core.http_GET(url)).getroot()
        except HTTPError:
            return None

        repos = set()
        for repo in root.findall('repository'):
            name = repo.attrib['name']
            if project in REPO_WHITELIST and name not in REPO_WHITELIST[
                    project]:
                continue

            for node in repo.findall('arch'):
                arch = node.text

                if project in ARCH_WHITELIST and arch not in ARCH_WHITELIST[
                        project]:
                    continue

                if project in ARCH_BLACKLIST and arch in ARCH_BLACKLIST[
                        project]:
                    continue

                repos.add((name, arch))

        return repos

    def ensure_settled(self, src_project, src_srcinfo, matchrepos):
        """ make sure current build state is final so we're not
        tricked with half finished results"""
        rmap = dict()
        results = osc.core.get_package_results(
            self.apiurl,
            src_project,
            src_srcinfo.package,
            repository=[mr.srcrepo for mr in matchrepos],
            arch=[mr.arch for mr in matchrepos])
        for result in results:
            for res, _ in osc.core.result_xml_to_dicts(result):
                if not 'package' in res or res[
                        'package'] != src_srcinfo.package:
                    continue
                rmap[(res['repository'], res['arch'])] = res

        for mr in matchrepos:
            if not (mr.srcrepo, mr.arch) in rmap:
                self.logger.warning("%s/%s had no build success" %
                                    (mr.srcrepo, mr.arch))
                raise NotReadyYet(src_project, src_srcinfo.package,
                                  "no result")
            if rmap[(mr.srcrepo, mr.arch)]['dirty']:
                self.logger.warning("%s/%s dirty" % (mr.srcrepo, mr.arch))
                raise NotReadyYet(src_project, src_srcinfo.package, "dirty")
            code = rmap[(mr.srcrepo, mr.arch)]['code']
            if code == 'broken':
                raise SourceBroken(src_project, src_srcinfo.package)
            if code != 'succeeded' and code != 'locked' and code != 'excluded':
                self.logger.warning("%s/%s not succeeded (%s)" %
                                    (mr.srcrepo, mr.arch, code))
                raise NotReadyYet(src_project, src_srcinfo.package, code)

    def findrepos(self, src_project, src_srcinfo, dst_project, dst_srcinfo):

        # get target repos that had a successful build
        dstrepos = self.get_dstrepos(dst_project)
        if dstrepos is None:
            return None

        url = osc.core.makeurl(self.apiurl, ('source', src_project, '_meta'))
        try:
            root = ET.parse(osc.core.http_GET(url)).getroot()
        except HTTPError:
            return None

        # set of source repo name, target repo name, arch
        matchrepos = set()
        # XXX: another staging hack
        if self.current_request.staging_project:
            for node in root.findall("repository[@name='standard']/arch"):
                arch = node.text
                self.logger.debug('arch %s', arch)
                matchrepos.add(MR('standard', 'standard', arch))
        else:
            for repo in root.findall('repository'):
                name = repo.attrib['name']
                path = repo.findall('path')
                if path is None or len(path) != 1:
                    self.logger.error("repo %s has more than one path" % name)
                    continue
                prj = path[0].attrib['project']
                if prj == 'openSUSE:Tumbleweed':
                    prj = 'openSUSE:Factory'  # XXX: hack
                if prj != dst_project:
                    continue
                for node in repo.findall('arch'):
                    arch = node.text
                    dstname = path[0].attrib['repository']
                    if prj == 'openSUSE:Factory' and dstname == 'snapshot':
                        dstname = 'standard'  # XXX: hack
                    if (dstname, arch) in dstrepos:
                        matchrepos.add(MR(name, dstname, arch))

        if not matchrepos:
            return None
        else:
            self.logger.debug('matched repos %s', pformat(matchrepos))

        # make sure it's not dirty
        self.ensure_settled(src_project, src_srcinfo, matchrepos)

        # now check if all matched repos built successfully
        srcrepos = self.get_buildsuccess_repos(src_project, dst_project,
                                               src_srcinfo.package,
                                               src_srcinfo.verifymd5)
        if srcrepos is None:
            raise NotReadyYet(src_project, src_srcinfo.package,
                              "no build success")
        if not srcrepos:
            raise NoBuildSuccess(src_project, src_srcinfo.package,
                                 src_srcinfo.verifymd5)
        for mr in matchrepos:
            if not (mr.srcrepo, arch) in srcrepos:
                self.logger.error("%s/%s had no build success" %
                                  (mr.srcrepo, arch))
                raise NoBuildSuccess(src_project, src_srcinfo.package,
                                     src_srcinfo.verifymd5)

        return matchrepos

    # common with repochecker
    def _md5_disturl(self, disturl):
        """Get the md5 from the DISTURL from a RPM file."""
        return os.path.basename(disturl).split('-')[0]

    def disturl_matches_md5(self, disturl, md5):
        if self._md5_disturl(disturl) != md5:
            return False
        return True

    # this is a bit magic. OBS allows to take the disturl md5 from the package
    # and query the source info for that. We will then get the verify md5 that
    # belongs to that md5.
    def disturl_matches(self, disturl, prj, srcinfo):
        md5 = self._md5_disturl(disturl)
        info = self.get_sourceinfo(prj, srcinfo.package, rev=md5)
        self.logger.debug(pformat(srcinfo))
        self.logger.debug(pformat(info))
        if info.verifymd5 == srcinfo.verifymd5:
            return True
        return False

    def compute_fetchlist(self, prj, pkg, srcinfo, repo, arch):
        """ scan binary rpms of the specified repo for libraries.
        Returns a set of packages to fetch and the libraries found
        """
        self.logger.debug('scanning %s/%s %s/%s' % (prj, pkg, repo, arch))

        headers = self._fetchcpioheaders(prj, pkg, repo, arch)
        missing_debuginfo = set()
        lib_packages = dict()  # pkgname -> set(lib file names)
        pkgs = dict()  # pkgname -> cpiohdr, rpmhdr
        lib_aliases = dict()
        for rpmfn, h in headers:
            # skip src rpm
            if h['sourcepackage']:
                continue
            pkgname = h['name'].decode('utf-8')
            if pkgname.endswith('-32bit') or pkgname.endswith('-64bit'):
                # -32bit and -64bit packages are just repackaged, so
                # we skip them and only check the original one.
                continue
            self.logger.debug("inspecting %s", pkgname)
            if not self.disturl_matches(h['disturl'].decode('utf-8'), prj,
                                        srcinfo):
                raise DistUrlMismatch(h['disturl'].decode('utf-8'), srcinfo)
            pkgs[pkgname] = (rpmfn, h)
            if debugpkg_re.match(pkgname):
                continue
            for fn, mode, lnk in zip(h['filenames'], h['filemodes'],
                                     h['filelinktos']):
                fn = fn.decode('utf-8')
                lnk = lnk.decode('utf-8')
                if so_re.match(fn):
                    if S_ISREG(mode):
                        self.logger.debug('found lib: %s' % fn)
                        lib_packages.setdefault(pkgname, set()).add(fn)
                    elif S_ISLNK(mode) and lnk is not None:
                        alias = os.path.basename(fn)
                        libname = os.path.basename(lnk)
                        self.logger.debug('found alias: %s -> %s' %
                                          (alias, libname))
                        lib_aliases.setdefault(libname, set()).add(alias)

        fetchlist = set()
        liblist = dict()
        debuglist = dict()
        # check whether debug info exists for each lib
        for pkgname in sorted(lib_packages.keys()):
            dpkgname = pkgname + '-debuginfo'
            if not dpkgname in pkgs:
                missing_debuginfo.add((prj, pkg, repo, arch, pkgname))
                continue

            # check file list of debuginfo package
            rpmfn, h = pkgs[dpkgname]
            files = set([f.decode('utf-8') for f in h['filenames']])
            ok = True
            for lib in lib_packages[pkgname]:
                libdebug = '/usr/lib/debug%s.debug' % lib
                if not libdebug in files:
                    # some new format that includes version, release and arch in debuginfo?
                    # FIXME: version and release are actually the
                    # one from the main package, sub packages may
                    # differ. BROKEN RIGHT NOW
                    # XXX: would have to actually read debuglink
                    # info to get that right so just guessing
                    arch = h['arch'].decode('utf-8')
                    if arch == 'i586':
                        arch = 'i386'
                    libdebug = '/usr/lib/debug%s-%s-%s.%s.debug' % (
                        lib, h['version'].decode('utf-8'),
                        h['release'].decode('utf-8'), arch)
                    if not libdebug in files:
                        missing_debuginfo.add(
                            (prj, pkg, repo, arch, pkgname, lib))
                        ok = False

                if ok:
                    fetchlist.add(pkgs[pkgname][0])
                    fetchlist.add(rpmfn)
                    liblist.setdefault(lib, set())
                    debuglist.setdefault(lib, libdebug)
                    libname = os.path.basename(lib)
                    if libname in lib_aliases:
                        liblist[lib] |= lib_aliases[libname]

        if missing_debuginfo:
            self.logger.error('missing debuginfo: %s' %
                              pformat(missing_debuginfo))
            raise MissingDebugInfo(missing_debuginfo)

        return fetchlist, liblist, debuglist
class StagingReport(object):
    def __init__(self, api):
        self.api = api
        self.comment = CommentAPI(api.apiurl)

    def _package_url(self, package):
        link = '/package/live_build_log/%s/%s/%s/%s'
        link = link % (package['project'],
                       package['package'],
                       package['repository'],
                       package['arch'])
        text = '[%s](%s)' % (package['arch'], link)
        return text

    def old_enough(self, _date):
        time_delta = datetime.utcnow() - _date
        safe_margin = timedelta(hours=MARGIN_HOURS)
        return safe_margin <= time_delta

    def update_status_comment(self, project, report, force=False, only_replace=False):
        report = self.comment.add_marker(report, MARKER)
        comments = self.comment.get_comments(project_name=project)
        comment, _ = self.comment.comment_find(comments, MARKER)
        if comment:
            write_comment = (report != comment['comment'] and self.old_enough(comment['when']))
        else:
            write_comment = not only_replace

        if write_comment or force:
            if osc.conf.config['debug']:
                print('Updating comment')
            if comment:
                self.comment.delete(comment['id'])
            self.comment.add_comment(project_name=project, comment=report)

    def _report_broken_packages(self, info):
        broken_package_status = info['broken_packages']

        # Group packages by name
        groups = defaultdict(list)
        for package in broken_package_status:
            groups[package['package']].append(package)

        failing_lines = [
            '* Build failed %s (%s)' % (key, ', '.join(self._package_url(p) for p in value))
            for key, value in groups.iteritems()
        ]

        report = '\n'.join(failing_lines[:MAX_LINES])
        if len(failing_lines) > MAX_LINES:
            report += '* and more (%s) ...' % (len(failing_lines) - MAX_LINES)
        return report

    def report_checks(self, info):
        failing_lines, green_lines = [], []

        links_state = {}
        for check in info['checks']:
            links_state.setdefault(check['state'], [])
            links_state[check['state']].append('[{}]({})'.format(check['name'], check['url']))

        lines = []
        failure = False
        for state, links in links_state.items():
            if len(links) > MAX_LINES:
                extra = len(links) - MAX_LINES
                links = links[:MAX_LINES]
                links.append('and {} more...'.format(extra))

            lines.append('- {}'.format(state))
            if state != 'success':
                lines.extend(['  - {}'.format(link) for link in links])
                failure = True
            else:
                lines[-1] += ': {}'.format(', '.join(links))

        return '\n'.join(lines).strip(), failure

    def report(self, project, aggregate=True, force=False):
        info = self.api.project_status(project, aggregate)

        # Do not attempt to process projects without staging info, or projects
        # in a pending state that will change before settling. This avoids
        # intermediate notifications that may end up being spammy and for
        # long-lived stagings where checks may be re-triggered multiple times
        # and thus enter pending state (not seen on first run) which is not
        # useful to report.
        if not info or not self.api.project_status_final(info):
            return

        report_broken_packages = self._report_broken_packages(info)
        report_checks, check_failure = self.report_checks(info)

        if report_broken_packages or check_failure:
            if report_broken_packages:
                report_broken_packages = 'Broken:\n\n' + report_broken_packages
            if report_checks:
                report_checks = 'Checks:\n\n' + report_checks
            report = '\n\n'.join((report_broken_packages, report_checks))
            report = report.strip()
            only_replace = False
        else:
            report = 'Congratulations! All fine now.'
            only_replace = True

        self.update_status_comment(project, report, force=force, only_replace=only_replace)

        if osc.conf.config['debug']:
            print(project)
            print('-' * len(project))
            print(report)
예제 #20
0
class OpenQABot(ReviewBot.ReviewBot):
    """ check ABI of library packages
    """
    def __init__(self, *args, **kwargs):
        super(OpenQABot, self).__init__(*args, **kwargs)
        self.tgt_repo = {}
        self.project_settings = {}
        self.api_map = {}

        self.force = False
        self.openqa = None
        self.commentapi = CommentAPI(self.apiurl)
        self.update_test_builds = {}
        self.pending_target_repos = set()
        self.openqa_jobs = {}

    def gather_test_builds(self):
        for prj, u in self.tgt_repo[self.openqa.baseurl].items():
            buildnr = 0
            cjob = 0
            for j in self.jobs_for_target(u):
                # avoid going backwards in job ID
                if cjob > int(j['id']):
                    continue
                buildnr = j['settings']['BUILD']
                cjob = int(j['id'])
            self.update_test_builds[prj] = buildnr
            jobs = self.jobs_for_target(u, build=buildnr)
            self.openqa_jobs[prj] = jobs
            if self.calculate_qa_status(jobs) == QA_INPROGRESS:
                self.pending_target_repos.add(prj)

    # reimplemention from baseclass
    def check_requests(self):

        if self.ibs:
            self.check_suse_incidents()

        # first calculate the latest build number for current jobs
        self.gather_test_builds()

        started = []
        # then check progress on running incidents
        for req in self.requests:
            jobs = self.request_get_openqa_jobs(req,
                                                incident=True,
                                                test_repo=True)
            ret = self.calculate_qa_status(jobs)
            if ret != QA_UNKNOWN:
                started.append(req)

        all_requests = self.requests
        self.requests = started
        self.logger.debug("check started requests")
        super(OpenQABot, self).check_requests()

        self.requests = all_requests

        skipped_one = False
        # now make sure the jobs are for current repo
        for prj, u in self.tgt_repo[self.openqa.baseurl].items():
            if prj in self.pending_target_repos:
                skipped_one = True
                continue
            self.trigger_build_for_target(prj, u)

        # do not schedule new incidents unless we finished
        # last wave
        if skipped_one:
            return
        self.logger.debug("Check all requests")
        super(OpenQABot, self).check_requests()

    # check a set of repos for their primary checksums
    @staticmethod
    def calculate_repo_hash(repos):
        m = md5.new()
        # if you want to force it, increase this number
        m.update('b')
        for url in repos:
            url += '/repodata/repomd.xml'
            try:
                root = ET.parse(osc.core.http_GET(url)).getroot()
            except HTTPError:
                raise
            cs = root.find(
                './/{http://linux.duke.edu/metadata/repo}data[@type="primary"]/{http://linux.duke.edu/metadata/repo}checksum'
            )
            m.update(cs.text)
        return m.hexdigest()

    def is_incident_in_testing(self, incident):
        # hard coded for now as we only run this code for SUSE Maintenance workflow
        project = 'SUSE:Maintenance:{}'.format(incident)

        xpath = "(state/@name='review') and (action/source/@project='{}' and action/@type='maintenance_release')".format(
            project)
        res = osc.core.search(self.apiurl, request=xpath)['request']
        # return the one and only (or None)
        return res.find('request')

    def calculate_incidents(self, incidents):
        """
        get incident numbers from SUSE:Maintenance:Test project
        returns dict with openQA var name : string with numbers
        """
        self.logger.debug("calculate_incidents: {}".format(pformat(incidents)))
        l_incidents = []
        for kind, prj in incidents.items():
            packages = osc.core.meta_get_packagelist(self.apiurl, prj)
            incidents = []
            # filter out incidents in staging
            for incident in packages:
                # remove patchinfo. prefix
                incident = incident.replace('_', '.').split('.')[1]
                req = self.is_incident_in_testing(incident)
                # without release request it's in staging
                if not req:
                    continue

                # skip kgraft patches from aggregation
                req_ = osc.core.Request()
                req_.read(req)
                src_prjs = {a.src_project for a in req_.actions}
                if SUSEUpdate.kgraft_target(self.apiurl, src_prjs.pop()):
                    self.logger.debug(
                        "calculate_incidents: Incident is kgraft - {} ".format(
                            incident))
                    continue

                incidents.append(incident)

            l_incidents.append((kind + '_TEST_ISSUES', ','.join(incidents)))
        self.logger.debug("Calculate incidents:{}".format(
            pformat(l_incidents)))
        return l_incidents

    def jobs_for_target(self, data, build=None):
        settings = data['settings'][0]
        values = {
            'distri': settings['DISTRI'],
            'version': settings['VERSION'],
            'arch': settings['ARCH'],
            'flavor': settings['FLAVOR'],
            'scope': 'relevant',
            'latest': '1',
        }
        if build:
            values['build'] = build
        else:
            values['test'] = data['test']
        self.logger.debug("Get jobs: {}".format(pformat(values)))
        return self.openqa.openqa_request('GET', 'jobs', values)['jobs']

    # we don't know the current BUILD and querying all jobs is too expensive
    # so we need to check for one known TEST first
    # if that job doesn't contain the proper hash, we trigger a new one
    # and then we know the build
    def trigger_build_for_target(self, prj, data):
        today = date.today().strftime("%Y%m%d")

        try:
            repohash = self.calculate_repo_hash(data['repos'])
        except HTTPError as e:
            self.logger.debug(
                "REPOHAS not calculated with response {}".format(e))
            return

        buildnr = None
        jobs = self.jobs_for_target(data)
        for job in jobs:
            if job['settings'].get('REPOHASH', '') == repohash:
                # take the last in the row
                buildnr = job['settings']['BUILD']
        self.update_test_builds[prj] = buildnr
        # ignore old build numbers, we want a fresh run every day
        # to find regressions in the tests and to get data about
        # randomly failing tests
        if buildnr and buildnr.startswith(today):
            return

        buildnr = 0

        # not found, then check for the next free build nr
        for job in jobs:
            build = job['settings']['BUILD']
            if build and build.startswith(today):
                try:
                    nr = int(build.split('-')[1])
                    if nr > buildnr:
                        buildnr = nr
                except ValueError:
                    continue

        buildnr = "{!s}-{:d}".format(today, buildnr + 1)

        for s in data['settings']:
            # now schedule it for real
            if 'incidents' in data.keys():
                for x, y in self.calculate_incidents(data['incidents']):
                    s[x] = y
            s['BUILD'] = buildnr
            s['REPOHASH'] = repohash
            self.logger.debug("Prepared: {}".format(pformat(s)))
            if not self.dryrun:
                try:
                    self.logger.info("Openqa isos POST {}".format(pformat(s)))
                    self.openqa.openqa_request('POST',
                                               'isos',
                                               data=s,
                                               retries=1)
                except Exception as e:
                    self.logger.error(e)
        self.update_test_builds[prj] = buildnr

    def request_get_openqa_jobs(self, req, incident=True, test_repo=False):
        ret = None
        types = {a.type for a in req.actions}
        if 'maintenance_release' in types:
            src_prjs = {a.src_project for a in req.actions}
            if len(src_prjs) != 1:
                raise Exception(
                    "can't handle maintenance_release from different incidents"
                )
            build = src_prjs.pop()
            tgt_prjs = {a.tgt_project for a in req.actions}
            ret = []
            if incident:
                ret += self.openqa_jobs[build]
            for prj in sorted(tgt_prjs):
                repo_settings = self.tgt_repo.get(self.openqa.baseurl, {})
                if test_repo and prj in repo_settings:
                    repo_jobs = self.openqa_jobs[prj]
                    ret += repo_jobs

        return ret

    def calculate_qa_status(self, jobs=None):
        if not jobs:
            return QA_UNKNOWN

        j = {}
        has_failed = False
        in_progress = False

        for job in jobs:
            if job['clone_id']:
                continue
            name = job['name']

            if name in j and int(job['id']) < int(j[name]['id']):
                continue
            j[name] = job

            if job['state'] not in ('cancelled', 'done'):
                in_progress = True
            else:
                if job['result'] != 'passed' and job['result'] != 'softfailed':
                    has_failed = True

        if not j:
            return QA_UNKNOWN
        if in_progress:
            return QA_INPROGRESS
        if has_failed:
            return QA_FAILED

        return QA_PASSED

    def add_comment(self, msg, state, request_id=None, result=None):
        if not self.do_comments:
            return

        comment = "<!-- openqa state={!s}{!s} -->\n".format(
            state, ' result={!s}'.format(result) if result else '')
        comment += "\n" + msg

        info = self.find_obs_request_comment(request_id=request_id)
        comment_id = info.get('id', None)

        if state == info.get('state', 'missing'):
            lines_before = len(info['comment'].split('\n'))
            lines_after = len(comment.split('\n'))
            if lines_before == lines_after:
                self.logger.info(
                    "not worth the update, previous comment %s is state %s",
                    comment_id, info['state'])
                return

        self.logger.info("adding comment to %s, state %s result %s",
                         request_id, state, result)
        self.logger.info("message: %s", msg)
        if not self.dryrun:
            if comment_id:
                self.commentapi.delete(comment_id)
            self.commentapi.add_comment(request_id=request_id,
                                        comment=str(comment))

    # escape markdown
    @staticmethod
    def emd(str):
        return str.replace('_', r'\_')

    @staticmethod
    def get_step_url(testurl, modulename):
        failurl = testurl + '/modules/{!s}/fails'.format(modulename)
        fails = requests.get(failurl).json()
        failed_step = fails.get('first_failed_step', 1)
        return "[{!s}]({!s}#step/{!s}/{:d})".format(OpenQABot.emd(modulename),
                                                    testurl, modulename,
                                                    failed_step)

    @staticmethod
    def job_test_name(job):
        return "{!s}@{!s}".format(OpenQABot.emd(job['settings']['TEST']),
                                  OpenQABot.emd(job['settings']['MACHINE']))

    def summarize_one_openqa_job(self, job):
        testurl = osc.core.makeurl(self.openqa.baseurl,
                                   ['tests', str(job['id'])])
        if not job['result'] in ['passed', 'failed', 'softfailed']:
            rstring = job['result']
            if rstring == 'none':
                return None
            return '\n- [{!s}]({!s}) is {!s}'.format(self.job_test_name(job),
                                                     testurl, rstring)

        modstrings = []
        for module in job['modules']:
            if module['result'] != 'failed':
                continue
            modstrings.append(self.get_step_url(testurl, module['name']))

        if modstrings:
            return '\n- [{!s}]({!s}) failed in {!s}'.format(
                self.job_test_name(job), testurl, ','.join(modstrings))
        elif job['result'] == 'failed':  # rare case: fail without module fails
            return '\n- [{!s}]({!s}) failed'.format(self.job_test_name(job),
                                                    testurl)
        return ''

    def summarize_openqa_jobs(self, jobs):
        groups = {}
        for job in jobs:
            gl = "{!s}@{!s}".format(self.emd(job['group']),
                                    self.emd(job['settings']['FLAVOR']))
            if gl not in groups:
                groupurl = osc.core.makeurl(
                    self.openqa.baseurl, ['tests', 'overview'], {
                        'version': job['settings']['VERSION'],
                        'groupid': job['group_id'],
                        'flavor': job['settings']['FLAVOR'],
                        'distri': job['settings']['DISTRI'],
                        'build': job['settings']['BUILD'],
                    })
                groups[gl] = {
                    'title': "__Group [{!s}]({!s})__\n".format(gl, groupurl),
                    'passed': 0,
                    'unfinished': 0,
                    'failed': []
                }

            job_summary = self.summarize_one_openqa_job(job)
            if job_summary is None:
                groups[gl]['unfinished'] = groups[gl]['unfinished'] + 1
                continue
            # None vs ''
            if not len(job_summary):
                groups[gl]['passed'] = groups[gl]['passed'] + 1
                continue
            # if there is something to report, hold the request
            # TODO: what is this ?
            # qa_state = QA_FAILED
            # gmsg = groups[gl]

            groups[gl]['failed'].append(job_summary)

        msg = ''
        for group in sorted(groups.keys()):
            msg += "\n\n" + groups[group]['title']
            infos = []
            if groups[group]['passed']:
                infos.append("{:d} tests passed".format(
                    groups[group]['passed']))
            if len(groups[group]['failed']):
                infos.append("{:d} tests failed".format(
                    len(groups[group]['failed'])))
            if groups[group]['unfinished']:
                infos.append("{:d} unfinished tests".format(
                    groups[group]['unfinished']))
            msg += "(" + ', '.join(infos) + ")\n"
            for fail in groups[group]['failed']:
                msg += fail
        return msg.rstrip('\n')

    def check_one_request(self, req):
        ret = None

        try:
            jobs = self.request_get_openqa_jobs(req)
            qa_state = self.calculate_qa_status(jobs)
            self.logger.debug("request %s state %s", req.reqid, qa_state)
            msg = None
            if self.force or qa_state == QA_UNKNOWN:
                ret = super(OpenQABot, self).check_one_request(req)
                jobs = self.request_get_openqa_jobs(req)

                if self.force:
                    # make sure to delete previous comments if we're forcing
                    info = self.find_obs_request_comment(request_id=req.reqid)
                    if 'id' in info:
                        self.logger.debug("deleting old comment %s",
                                          info['id'])
                        if not self.dryrun:
                            self.commentapi.delete(info['id'])

                if jobs:
                    # no notification until the result is done
                    osc.core.change_review_state(
                        self.apiurl,
                        req.reqid,
                        newstate='new',
                        by_group=self.review_group,
                        by_user=self.review_user,
                        message='now testing in openQA')
                else:
                    msg = "no openQA tests defined"
                    self.add_comment(msg,
                                     'done',
                                     request_id=req.reqid,
                                     result='accepted')
                    ret = True
            elif qa_state == QA_FAILED or qa_state == QA_PASSED:
                # don't take test repo results into the calculation of total
                # this is for humans to decide which incident broke the test repo
                jobs += self.request_get_openqa_jobs(req,
                                                     incident=False,
                                                     test_repo=True)
                if self.calculate_qa_status(jobs) == QA_INPROGRESS:
                    self.logger.info(
                        "incident tests for request %s are done, but need to wait for test repo",
                        req.reqid)
                    return
                if qa_state == QA_PASSED:
                    msg = "openQA tests passed\n"
                    result = 'accepted'
                    ret = True
                else:
                    msg = "openQA tests problematic\n"
                    result = 'declined'
                    ret = False

                msg += self.summarize_openqa_jobs(jobs)
                self.add_comment(msg,
                                 'done',
                                 result=result,
                                 request_id=req.reqid)
            elif qa_state == QA_INPROGRESS:
                self.logger.info("request %s still in progress", req.reqid)
            else:
                raise Exception("unknown QA state %d", qa_state)

        except Exception:
            import traceback
            self.logger.error("unhandled exception in openQA Bot")
            self.logger.error(traceback.format_exc())
            ret = None

        return ret

    def find_obs_request_comment(self, request_id=None, project_name=None):
        """Return previous comments (should be one)."""
        if self.do_comments:
            comments = self.commentapi.get_comments(request_id=request_id,
                                                    project_name=project_name)
            for c in comments.values():
                m = comment_marker_re.match(c['comment'])
                if m:
                    return {
                        'id': c['id'],
                        'state': m.group('state'),
                        'result': m.group('result'),
                        'comment': c['comment'],
                        'revision': m.group('revision')
                    }
        return {}

    def check_product(self, job, product_prefix):
        pmap = self.api_map[product_prefix]
        posts = []
        for arch in pmap['archs']:
            need = False
            settings = {'VERSION': pmap['version'], 'ARCH': arch}
            settings[
                'DISTRI'] = 'sle' if 'distri' not in pmap else pmap['distri']
            issues = pmap.get('issues', {})
            issues['OS_TEST_ISSUES'] = issues.get('OS_TEST_ISSUES',
                                                  product_prefix)
            required_issue = pmap.get('required_issue', False)
            for key, prefix in issues.items():
                self.logger.debug("{} {}".format(key, prefix))
                if prefix + arch in job['channels']:
                    settings[key] = str(job['id'])
                    need = True
            if required_issue:
                if required_issue not in settings:
                    need = False

            if need:
                update = self.project_settings[product_prefix + arch]
                update.apiurl = self.apiurl
                update.logger = self.logger
                for j in update.settings(
                        update.maintenance_project + ':' + str(job['id']),
                        product_prefix + arch, []):
                    if not job.get('openqa_build'):
                        job['openqa_build'] = update.get_max_revision(job)
                    if not job.get('openqa_build'):
                        return []
                    j['BUILD'] += '.' + str(job['openqa_build'])
                    j.update(settings)
                    # kGraft jobs can have different version
                    if 'real_version' in j:
                        j['VERSION'] = j['real_version']
                        del j['real_version']
                    posts.append(j)
        self.logger.debug("Pmap: {} Posts: {}".format(pmap, posts))
        return posts

    def incident_openqa_jobs(self, s):
        return self.openqa.openqa_request(
            'GET', 'jobs', {
                'distri': s['DISTRI'],
                'version': s['VERSION'],
                'arch': s['ARCH'],
                'flavor': s['FLAVOR'],
                'build': s['BUILD'],
                'scope': 'relevant',
                'latest': '1'
            })['jobs']

    def check_suse_incidents(self):
        for inc in requests.get(
                'https://maintenance.suse.de/api/incident/active/').json():
            self.logger.info("Incident number: {}".format(inc))

            job = requests.get('https://maintenance.suse.de/api/incident/' +
                               inc).json()

            if job['meta']['state'] in ['final', 'gone']:
                continue
            # required in job: project, id, channels
            self.test_job(job['base'])

    def test_job(self, job):
        self.logger.debug("Called test_job with: {}".format(job))
        incident_project = str(job['project'])
        try:
            comment_info = self.find_obs_request_comment(
                project_name=incident_project)
        except HTTPError as e:
            self.logger.debug("Couldn't loaadd comments - {}".format(e))
            return
        comment_id = comment_info.get('id', None)
        comment_build = str(comment_info.get('revision', ''))

        openqa_posts = []
        for prod in self.api_map.keys():
            self.logger.debug("{} -- product in apimap".format(prod))
            openqa_posts += self.check_product(job, prod)
        openqa_jobs = []
        for s in openqa_posts:
            jobs = self.incident_openqa_jobs(s)
            # take the project comment as marker for not posting jobs
            if not len(jobs) and comment_build != str(job['openqa_build']):
                if self.dryrun:
                    self.logger.info('WOULD POST:{}'.format(
                        pformat(json.dumps(s, sort_keys=True))))
                else:
                    self.logger.info("Posted: {}".format(
                        pformat(json.dumps(s, sort_keys=True))))
                    self.openqa.openqa_request('POST',
                                               'isos',
                                               data=s,
                                               retries=1)
                    openqa_jobs += self.incident_openqa_jobs(s)
            else:
                self.logger.info("{} got {}".format(pformat(s), len(jobs)))
                openqa_jobs += jobs

        self.openqa_jobs[incident_project] = openqa_jobs

        if len(openqa_jobs) == 0:
            self.logger.debug("No openqa jobs defined")
            return
        # print openqa_jobs
        msg = self.summarize_openqa_jobs(openqa_jobs)
        state = 'seen'
        result = 'none'
        qa_status = self.calculate_qa_status(openqa_jobs)
        if qa_status == QA_PASSED:
            result = 'accepted'
            state = 'done'
        if qa_status == QA_FAILED:
            result = 'declined'
            state = 'done'
        comment = "<!-- openqa state={!s} result={!s} revision={!s} -->\n".format(
            state, result, job.get('openqa_build'))
        comment += msg

        if comment_id and state != 'done':
            self.logger.info("%s is already commented, wait until done",
                             incident_project)
            return
        if comment_info.get('comment',
                            '').rstrip('\n') == comment.rstrip('\n'):
            self.logger.info("%s comment did not change", incident_project)
            return

        self.logger.info("adding comment to %s, state %s", incident_project,
                         state)
        if not self.dryrun:
            if comment_id:
                self.logger.debug("delete comment: {}".format(comment_id))
                self.commentapi.delete(comment_id)
            self.commentapi.add_comment(project_name=str(incident_project),
                                        comment=str(comment))
예제 #21
0
class TestCommentOBS(OBSLocal.TestCase):
    def setUp(self):
        super(TestCommentOBS, self).setUp()
        self.wf = OBSLocal.FactoryWorkflow()
        self.wf.create_user('factory-auto')
        self.wf.create_user('repo-checker')
        self.wf.create_user('staging-bot')
        self.wf.create_group('factory-staging', ['staging-bot'])
        self.wf.create_project(PROJECT,
                               maintainer={'groups': ['factory-staging']})
        self.api = CommentAPI(self.apiurl)
        # Ensure different test runs operate in unique namespace.
        self.bot = '::'.join([type(self).__name__, str(random.getrandbits(8))])

    def tearDown(self):
        self.osc_user('Admin')
        del self.wf

    def test_basic(self):
        self.osc_user('staging-bot')

        self.assertFalse(self.comments_filtered(self.bot)[0])

        self.assertTrue(
            self.api.add_comment(project_name=PROJECT,
                                 comment=self.api.add_marker(
                                     COMMENT, self.bot)))
        comment, _ = self.comments_filtered(self.bot)
        self.assertTrue(comment)

        self.assertTrue(self.api.delete(comment['id']))
        self.assertFalse(self.comments_filtered(self.bot)[0])

    def test_delete_nested(self):
        self.osc_user('staging-bot')
        comment_marked = self.api.add_marker(COMMENT, self.bot)

        # Allow for existing comments by basing assertion on delta from initial count.
        comment_count = len(self.api.get_comments(project_name=PROJECT))
        self.assertFalse(self.comments_filtered(self.bot)[0])

        self.assertTrue(
            self.api.add_comment(project_name=PROJECT, comment=comment_marked))
        comment, _ = self.comments_filtered(self.bot)
        self.assertTrue(comment)

        for i in range(0, 3):
            self.assertTrue(
                self.api.add_comment(project_name=PROJECT,
                                     comment=comment_marked,
                                     parent_id=comment['id']))

        comments = self.api.get_comments(project_name=PROJECT)
        parented_count = 0
        for comment in comments.values():
            if comment['parent']:
                parented_count += 1

        self.assertEqual(parented_count, 3)
        self.assertTrue(len(comments) == comment_count + 4)

        self.api.delete_from(project_name=PROJECT)
        self.assertFalse(len(self.api.get_comments(project_name=PROJECT)))

    def test_delete_batch(self):
        users = ['factory-auto', 'repo-checker', 'staging-bot']
        for user in users:
            self.osc_user(user)
            print('logged in as ', user)
            bot = '::'.join([self.bot, user])
            comment = self.api.add_marker(COMMENT, bot)

            self.assertFalse(self.comments_filtered(bot)[0])
            self.assertTrue(
                self.api.add_comment(project_name=PROJECT, comment=comment))
            self.assertTrue(self.comments_filtered(bot)[0])

        # Allow for existing comments by basing assertion on delta from initial count.
        comment_count = len(self.api.get_comments(project_name=PROJECT))
        self.assertTrue(comment_count >= len(users))

        self.api.delete_from_where_user(users[0], project_name=PROJECT)
        self.assertTrue(
            len(self.api.get_comments(project_name=PROJECT)) == comment_count -
            1)

        self.api.delete_from(project_name=PROJECT)
        self.assertFalse(len(self.api.get_comments(project_name=PROJECT)))

    def comments_filtered(self, bot):
        comments = self.api.get_comments(project_name=PROJECT)
        return self.api.comment_find(comments, bot)
예제 #22
0
    def prepare_review(self):
        # Reset for request batch.
        self.requests_map = {}
        self.groups = {}
        self.groups_build = {}

        # Manipulated in ensure_group().
        self.group = None
        self.mirrored = set()

        # Stores parsed install_check() results grouped by package.
        self.package_results = {}

        # Look for requests of interest and group by staging.
        skip_build = set()
        for request in self.requests:
            # Only interesting if request is staged.
            group = request_staged(request)
            if not group:
                self.logger.debug('{}: not staged'.format(request.reqid))
                continue

            if self.limit_group and group != self.limit_group:
                continue

            # Only interested if group has completed building.
            api = self.staging_api(request.actions[0].tgt_project)
            status = api.project_status(group, True)
            # Corrupted requests may reference non-existent projects and will
            # thus return a None status which should be considered not ready.
            if not status or str(
                    status['overall_state']) not in ('testing', 'review',
                                                     'acceptable'):
                # Not in a "ready" state.
                openQA_only = False  # Not relevant so set to False.
                if status and str(status['overall_state']) == 'failed':
                    # Exception to the rule is openQA only in failed state.
                    openQA_only = True
                    for project in api.project_status_walk(status):
                        if len(project['broken_packages']):
                            # Broken packages so not just openQA.
                            openQA_only = False
                            break

                if not self.force and not openQA_only:
                    self.logger.debug('{}: {} not ready'.format(
                        request.reqid, group))
                    continue

            # Only interested if request is in consistent state.
            selected = api.project_status_requests('selected')
            if request.reqid not in selected:
                self.logger.debug('{}: inconsistent state'.format(
                    request.reqid))

            if group not in self.groups_build:
                # Generate build hash based on hashes from relevant projects.
                builds = []
                for staging in api.staging_walk(group):
                    builds.append(
                        ET.fromstringlist(
                            show_results_meta(self.apiurl,
                                              staging,
                                              multibuild=True,
                                              repository=['standard'
                                                          ])).get('state'))
                builds.append(
                    ET.fromstringlist(
                        show_results_meta(self.apiurl,
                                          api.project,
                                          multibuild=True,
                                          repository=['standard'
                                                      ])).get('state'))

                # Include meta revision for config changes (like whitelist).
                builds.append(str(api.get_prj_meta_revision(group)))
                self.groups_build[group] = hashlib.sha1(
                    ''.join(builds)).hexdigest()[:7]

                # Determine if build has changed since last comment.
                comment_api = CommentAPI(api.apiurl)
                comments = comment_api.get_comments(project_name=group)
                _, info = comment_api.comment_find(comments, self.bot_name)
                if info and self.groups_build[group] == info.get('build'):
                    skip_build.add(group)

            if not self.force and group in skip_build:
                self.logger.debug('{}: {} build unchanged'.format(
                    request.reqid, group))
                continue

            self.requests_map[int(request.reqid)] = group

            requests = self.groups.get(group, [])
            requests.append(request)
            self.groups[group] = requests

            self.logger.debug('{}: {} ready'.format(request.reqid, group))

        # Filter out undesirable requests and ensure requests are ordered
        # together with group for efficiency.
        count_before = len(self.requests)
        self.requests = []
        for group, requests in sorted(self.groups.items()):
            self.requests.extend(requests)

        self.logger.debug('requests: {} skipped, {} queued'.format(
            count_before - len(self.requests), len(self.requests)))
예제 #23
0
class StagingReport(object):
    def __init__(self, api):
        self.api = api
        self.comment = CommentAPI(api.apiurl)

    def _package_url(self, package):
        link = '/package/live_build_log/%s/%s/%s/%s'
        link = link % (package.get('project'), package.get('package'),
                       package.get('repository'), package.get('arch'))
        text = '[%s](%s)' % (package.get('arch'), link)
        return text

    def old_enough(self, _date):
        time_delta = datetime.utcnow() - _date
        safe_margin = timedelta(hours=MARGIN_HOURS)
        return safe_margin <= time_delta

    def update_status_comment(self,
                              project,
                              report,
                              force=False,
                              only_replace=False):
        report = self.comment.add_marker(report, MARKER)
        comments = self.comment.get_comments(project_name=project)
        comment, _ = self.comment.comment_find(comments, MARKER)
        if comment:
            write_comment = (report != comment['comment']
                             and self.old_enough(comment['when']))
        else:
            write_comment = not only_replace

        if write_comment or force:
            if osc.conf.config['debug']:
                print('Updating comment')
            if comment:
                self.comment.delete(comment['id'])
            self.comment.add_comment(project_name=project, comment=report)

    def _report_broken_packages(self, info):
        # Group packages by name
        groups = defaultdict(list)
        for package in info.findall('broken_packages/package'):
            groups[package.get('package')].append(package)

        failing_lines = [
            '* Build failed %s (%s)' %
            (key, ', '.join(self._package_url(p) for p in value))
            for key, value in groups.items()
        ]

        report = '\n'.join(failing_lines[:MAX_LINES])
        if len(failing_lines) > MAX_LINES:
            report += '* and more (%s) ...' % (len(failing_lines) - MAX_LINES)
        return report

    def report_checks(self, info):
        links_state = {}
        for check in info.findall('checks/check'):
            state = check.find('state').text
            links_state.setdefault(state, [])
            links_state[state].append('[{}]({})'.format(
                check.get('name'),
                check.find('url').text))

        lines = []
        failure = False
        for state, links in links_state.items():
            if len(links) > MAX_LINES:
                extra = len(links) - MAX_LINES
                links = links[:MAX_LINES]
                links.append('and {} more...'.format(extra))

            lines.append('- {}'.format(state))
            if state != 'success':
                lines.extend(['  - {}'.format(link) for link in links])
                failure = True
            else:
                lines[-1] += ': {}'.format(', '.join(links))

        return '\n'.join(lines).strip(), failure

    def report(self, project, force=False):
        info = self.api.project_status(project)

        # Do not attempt to process projects without staging info, or projects
        # in a pending state that will change before settling. This avoids
        # intermediate notifications that may end up being spammy and for
        # long-lived stagings where checks may be re-triggered multiple times
        # and thus enter pending state (not seen on first run) which is not
        # useful to report.
        if info is None or not self.api.project_status_final(info):
            return

        report_broken_packages = self._report_broken_packages(info)
        report_checks, check_failure = self.report_checks(info)

        if report_broken_packages or check_failure:
            if report_broken_packages:
                report_broken_packages = 'Broken:\n\n' + report_broken_packages
            if report_checks:
                report_checks = 'Checks:\n\n' + report_checks
            report = '\n\n'.join((report_broken_packages, report_checks))
            report = report.strip()
            only_replace = False
        else:
            report = 'Congratulations! All fine now.'
            only_replace = True

        report = self.cc_list(project, info) + report
        self.update_status_comment(project,
                                   report,
                                   force=force,
                                   only_replace=only_replace)

        if osc.conf.config['debug']:
            print(project)
            print('-' * len(project))
            print(report)

    def cc_list(self, project, info):
        if not self.api.is_adi_project(project):
            return ""
        ccs = set()
        for req in info.findall('staged_requests/request'):
            ccs.add("@" + req.get('creator'))
        str = "Submitters: " + " ".join(sorted(list(ccs))) + "\n\n"
        return str
class InstallChecker(object):
    def __init__(self, api, config):
        self.api = api
        self.config = conf.config[api.project]
        self.logger = logging.getLogger('InstallChecker')
        self.commentapi = CommentAPI(api.apiurl)

        self.arch_whitelist = self.config.get('repo_checker-arch-whitelist')
        if self.arch_whitelist:
            self.arch_whitelist = set(self.arch_whitelist.split(' '))

        self.ring_whitelist = set(self.config.get('repo_checker-binary-whitelist-ring', '').split(' '))

        self.cycle_packages = self.config.get('repo_checker-allowed-in-cycles')
        self.calculate_allowed_cycles()

        self.existing_problems = self.binary_list_existing_problem(api.project, api.cmain_repo)

    def check_required_by(self, fileinfo, provides, requiredby, built_binaries, comments):
        if requiredby.get('name') in built_binaries:
            return True
        # extract >= and the like
        provide = provides.get('dep')
        provide = provide.split(' ')[0]
        comments.append('{} provides {} required by {}'.format(fileinfo.find('name').text, provide, requiredby.get('name')))
        url = api.makeurl(['build', api.project, api.cmain_repo, 'x86_64', '_repository', requiredby.get('name') + '.rpm'],
                      {'view': 'fileinfo_ext'})
        reverse_fileinfo = ET.parse(osc.core.http_GET(url)).getroot()
        for require in reverse_fileinfo.findall('requires_ext'):
            # extract >= and the like here too
            dep = require.get('dep').split(' ')[0]
            if dep != provide:
                continue
            for provided_by in require.findall('providedby'):
                if provided_by.get('name') in built_binaries:
                    continue
                comments.append('  also provided by {} -> ignoring'.format(provided_by.get('name')))
                return True
        comments.append('Error: missing alternative provides for {}'.format(provide))
        return False

    def check_delete_request(self, req, to_ignore, comments):
        package = req['package']
        if package in to_ignore:
            self.logger.info('Delete request for package {} ignored'.format(package))
            return True

        built_binaries = set([])
        file_infos = []
        for fileinfo in fileinfo_ext_all(self.api.apiurl, self.api.project, self.api.cmain_repo, 'x86_64', package):
            built_binaries.add(fileinfo.find('name').text)
            file_infos.append(fileinfo)

        result = True
        for fileinfo in file_infos:
            for provides in fileinfo.findall('provides_ext'):
                for requiredby in provides.findall('requiredby[@name]'):
                    result = result and self.check_required_by(fileinfo, provides, requiredby, built_binaries, comments)

        what_depends_on = depends_on(api.apiurl, api.project, api.cmain_repo, [package], True)

        # filter out dependency on package itself (happens with eg
        # java bootstrapping itself with previous build)
        if package in what_depends_on:
            what_depends_on.remove(package)

        if len(what_depends_on):
            comments.append('{} is still a build requirement of:\n\n- {}'.format(
                package, '\n- '.join(sorted(what_depends_on))))
            return False

        return result

    def packages_to_ignore(self, project):
        comments = self.commentapi.get_comments(project_name=project)
        ignore_re = re.compile(r'^installcheck: ignore (?P<args>.*)$', re.MULTILINE)

        # the last wins, for now we don't care who said it
        args = []
        for comment in comments.values():
            match = ignore_re.search(comment['comment'].replace('\r', ''))
            if not match:
                continue
            args = match.group('args').strip()
            # allow space and comma to seperate
            args = args.replace(',', ' ').split(' ')
        return args

    def staging(self, project, force=False):
        api = self.api

        repository = self.api.cmain_repo

        # fetch the build ids at the beginning - mirroring takes a while
        buildids = {}
        try:
            architectures = self.target_archs(project, repository)
        except HTTPError as e:
            if e.code == 404:
                # adi disappear all the time, so don't worry
                return False
            raise e

        all_done = True
        for arch in architectures:
            pra = '{}/{}/{}'.format(project, repository, arch)
            buildid = self.buildid(project, repository, arch)
            if not buildid:
                self.logger.error('No build ID in {}'.format(pra))
                return False
            buildids[arch] = buildid
            url = self.report_url(project, repository, arch, buildid)
            try:
                root = ET.parse(osc.core.http_GET(url)).getroot()
                check = root.find('check[@name="installcheck"]/state')
                if check is not None and check.text != 'pending':
                    self.logger.info('{} already "{}", ignoring'.format(pra, check.text))
                else:
                    all_done = False
            except HTTPError:
                self.logger.info('{} has no status report'.format(pra))
                all_done = False

        if all_done and not force:
            return True

        repository_pairs = repository_path_expand(api.apiurl, project, repository)
        staging_pair = [project, repository]

        result = True

        status = api.project_status(project)
        if not status:
            self.logger.error('no project status for {}'.format(project))
            return False

        result_comment = []

        to_ignore = self.packages_to_ignore(project)
        meta = api.load_prj_pseudometa(status['description'])
        for req in meta['requests']:
            if req['type'] == 'delete':
                result = result and self.check_delete_request(req, to_ignore, result_comment)

        for arch in architectures:
            # hit the first repository in the target project (if existant)
            target_pair = None
            directories = []
            for pair_project, pair_repository in repository_pairs:
                # ignore repositories only inherited for config
                if repository_arch_state(self.api.apiurl, pair_project, pair_repository, arch):
                    if not target_pair and pair_project == api.project:
                        target_pair = [pair_project, pair_repository]

                    directories.append(self.mirror(pair_project, pair_repository, arch))

            if not api.is_adi_project(project):
                # For "leaky" ring packages in letter stagings, where the
                # repository setup does not include the target project, that are
                # not intended to to have all run-time dependencies satisfied.
                whitelist = self.ring_whitelist
            else:
                whitelist = self.existing_problems

            whitelist |= set(to_ignore)

            check = self.cycle_check(project, repository, arch)
            if not check.success:
                self.logger.warn('Cycle check failed')
                result_comment.append(check.comment)
                result = False

            check = self.install_check(target_pair, arch, directories, None, whitelist)
            if not check.success:
                self.logger.warn('Install check failed')
                result_comment.append(check.comment)
                result = False

        if result:
            self.report_state('success', self.gocd_url(), project, repository, buildids)
        else:
            result_comment.insert(0, 'Generated from {}\n'.format(self.gocd_url()))
            self.report_state('failure', self.upload_failure(project, result_comment), project, repository, buildids)
            self.logger.warn('Not accepting {}'.format(project))
            return False

        return result

    def upload_failure(self, project, comment):
        print(project, '\n'.join(comment))
        url = self.api.makeurl(['source', 'home:repo-checker', 'reports', project])
        osc.core.http_PUT(url, data='\n'.join(comment))

        url = self.api.apiurl.replace('api.', 'build.')
        return '{}/package/view_file/home:repo-checker/reports/{}'.format(url, project)

    def report_state(self, state, report_url, project, repository, buildids):
        architectures = self.target_archs(project, repository)
        for arch in architectures:
            self.report_pipeline(state, report_url, project, repository, arch, buildids[arch], arch == architectures[-1])

    def gocd_url(self):
        if not os.environ.get('GO_SERVER_URL'):
            # placeholder :)
            return 'http://stephan.kulow.org/'
        report_url = os.environ.get('GO_SERVER_URL').replace(':8154', '')
        return report_url + '/tab/build/detail/{}/{}/{}/{}/{}#tab-console'.format(os.environ.get('GO_PIPELINE_NAME'),
                            os.environ.get('GO_PIPELINE_COUNTER'),
                            os.environ.get('GO_STAGE_NAME'),
                            os.environ.get('GO_STAGE_COUNTER'),
                            os.environ.get('GO_JOB_NAME'))

    def buildid(self, project, repository, architecture):
        url = self.api.makeurl(['build', project, repository, architecture], {'view': 'status'})
        root = ET.parse(osc.core.http_GET(url)).getroot()
        buildid = root.find('buildid')
        if buildid is None:
            return False
        return buildid.text

    def report_url(self, project, repository, architecture, buildid):
        return self.api.makeurl(['status_reports', 'built', project,
                                repository, architecture, 'reports', buildid])

    def report_pipeline(self, state, report_url, project, repository, architecture, buildid, is_last):
        url = self.report_url(project, repository, architecture, buildid)
        name = 'installcheck'
        # this is a little bit ugly, but we don't need 2 failures. So save a success for the
        # other archs to mark them as visited - pending we put in both
        if not is_last:
            if state == 'failure':
                state = 'success'

        xml = self.check_xml(report_url, state, name)
        try:
            osc.core.http_POST(url, data=xml)
        except HTTPError:
            print('failed to post status to ' + url)
            sys.exit(1)

    def check_xml(self, url, state, name):
        check = ET.Element('check')
        if url:
            se = ET.SubElement(check, 'url')
            se.text = url
        se = ET.SubElement(check, 'state')
        se.text = state
        se = ET.SubElement(check, 'name')
        se.text = name
        return ET.tostring(check)

    def target_archs(self, project, repository):
        archs = target_archs(self.api.apiurl, project, repository)

        # Check for arch whitelist and use intersection.
        if self.arch_whitelist:
            archs = list(self.arch_whitelist.intersection(set(archs)))

        # Trick to prioritize x86_64.
        return sorted(archs, reverse=True)

    @memoize(ttl=60, session=True, add_invalidate=True)
    def mirror(self, project, repository, arch):
        """Call bs_mirrorfull script to mirror packages."""
        directory = os.path.join(CACHEDIR, project, repository, arch)
        if not os.path.exists(directory):
            os.makedirs(directory)

        script = os.path.join(SCRIPT_PATH, 'bs_mirrorfull')
        path = '/'.join((project, repository, arch))
        url = '{}/public/build/{}'.format(self.api.apiurl, path)
        parts = ['LC_ALL=C', 'perl', script, '--nodebug', url, directory]
        parts = [pipes.quote(part) for part in parts]

        self.logger.info('mirroring {}'.format(path))
        if os.system(' '.join(parts)):
            raise Exception('failed to mirror {}'.format(path))

        return directory

    @memoize(session=True)
    def binary_list_existing_problem(self, project, repository):
        """Determine which binaries are mentioned in repo_checker output."""
        binaries = set()

        filename = self.project_pseudometa_file_name(project, repository)
        content = project_pseudometa_file_load(self.api.apiurl, project, filename)
        if not content:
            self.logger.warn('no project_only run from which to extract existing problems')
            return binaries

        sections = self.install_check_parse(content)
        for section in sections:
            for binary in section.binaries:
                match = re.match(BINARY_REGEX, binary)
                if match:
                    binaries.add(match.group('name'))

        return binaries

    def install_check(self, target_project_pair, arch, directories,
                      ignore=None, whitelist=[], parse=False, no_filter=False):
        self.logger.info('install check: start (ignore:{}, whitelist:{}, parse:{}, no_filter:{})'.format(
            bool(ignore), len(whitelist), parse, no_filter))

        with tempfile.NamedTemporaryFile() as ignore_file:
            # Print ignored rpms on separate lines in ignore file.
            if ignore:
                for item in ignore:
                    ignore_file.write(item + '\n')
                ignore_file.flush()

            # Invoke repo_checker.pl to perform an install check.
            script = os.path.join(SCRIPT_PATH, 'repo_checker.pl')
            parts = ['LC_ALL=C', 'perl', script, arch, ','.join(directories),
                     '-f', ignore_file.name, '-w', ','.join(whitelist)]
            if no_filter:
                parts.append('--no-filter')

            parts = [pipes.quote(part) for part in parts]
            p = subprocess.Popen(' '.join(parts), shell=True,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE, close_fds=True)
            stdout, stderr = p.communicate()

        if p.returncode:
            self.logger.info('install check: failed')
            if p.returncode == 126:
                self.logger.warn('mirror cache reset due to corruption')
                self._invalidate_all()
            elif parse:
                # Parse output for later consumption for posting comments.
                sections = self.install_check_parse(stdout)
                self.install_check_sections_group(
                    target_project_pair[0], target_project_pair[1], arch, sections)

            # Format output as markdown comment.
            parts = []

            stdout = stdout.strip()
            if stdout:
                parts.append(stdout + '\n')
            stderr = stderr.strip()
            if stderr:
                parts.append(stderr + '\n')

            header = '### [install check & file conflicts for {}]'.format(arch)
            return CheckResult(False, header + '\n\n' + ('\n' + ('-' * 80) + '\n\n').join(parts))

        self.logger.info('install check: passed')
        return CheckResult(True, None)

    def install_check_sections_group(self, project, repository, arch, sections):
        _, binary_map = package_binary_list(self.api.apiurl, project, repository, arch)

        for section in sections:
            # If switch to creating bugs likely makes sense to join packages to
            # form grouping key and create shared bugs for conflicts.
            # Added check for b in binary_map after encountering:
            # https://lists.opensuse.org/opensuse-buildservice/2017-08/msg00035.html
            # Under normal circumstances this should never occur.
            packages = set([binary_map[b] for b in section.binaries if b in binary_map])
            for package in packages:
                self.package_results.setdefault(package, [])
                self.package_results[package].append(section)

    def install_check_parse(self, output):
        section = None
        text = None

        # Loop over lines and parse into chunks assigned to binaries.
        for line in output.splitlines(True):
            if line.startswith(' '):
                if section:
                    text += line
            else:
                if section:
                    yield InstallSection(section, text)

                match = re.match(INSTALL_REGEX, line)
                if match:
                    # Remove empty groups since regex matches different patterns.
                    binaries = [b for b in match.groups() if b is not None]
                    section = binaries
                    text = line
                else:
                    section = None

        if section:
            yield InstallSection(section, text)

    def calculate_allowed_cycles(self):
        self.allowed_cycles = []
        if self.cycle_packages:
            for comma_list in self.cycle_packages.split(';'):
                self.allowed_cycles.append(comma_list.split(','))

    def cycle_check(self, project, repository, arch):
        self.logger.info('cycle check: start %s/%s/%s' % (project, repository, arch))
        comment = []

        depinfo = builddepinfo(self.api.apiurl, project, repository, arch, order = False)
        for cycle in depinfo.findall('cycle'):
            for package in cycle.findall('package'):
                package = package.text
                allowed = False
                for acycle in self.allowed_cycles:
                    if package in acycle:
                        allowed = True
                        break
                if not allowed:
                    cycled = [p.text for p in cycle.findall('package')]
                    comment.append('Package {} appears in cycle {}'.format(package, '/'.join(cycled)))

        if len(comment):
            # New cycles, post comment.
            self.logger.info('cycle check: failed')
            return CheckResult(False, '\n'.join(comment) + '\n')

        self.logger.info('cycle check: passed')
        return CheckResult(True, None)

    def project_pseudometa_file_name(self, project, repository):
        filename = 'repo_checker'

        main_repo = Config.get(self.api.apiurl, project).get('main-repo')
        if not main_repo:
            filename += '.' + repository

        return filename
예제 #25
0
class ABIChecker(ReviewBot.ReviewBot):
    """ check ABI of library packages
    """

    def __init__(self, *args, **kwargs):
        ReviewBot.ReviewBot.__init__(self, *args, **kwargs)

        self.no_review = False
        self.force = False

        self.ts = rpm.TransactionSet()
        self.ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)

        # reports of source submission
        self.reports = []
        # textual report summary for use in accept/decline message
        # or comments
        self.text_summary = ''

        self.session = DB.db_session()

        self.dblogger = LogToDB(self.session)

        self.logger.addFilter(self.dblogger)

        self.commentapi = CommentAPI(self.apiurl)

    def check_source_submission(self, src_project, src_package, src_rev, dst_project, dst_package):

        # happens for maintenance incidents
        if dst_project == None and src_package == 'patchinfo':
            return None

        if dst_project in PROJECT_BLACKLIST:
            self.logger.info(PROJECT_BLACKLIST[dst_project])
#            self.text_summary += PROJECT_BLACKLIST[dst_project] + "\n"
            return True

        # default is to accept the review, just leave a note if
        # there were problems.
        ret = True

        ReviewBot.ReviewBot.check_source_submission(self, src_project, src_package, src_rev, dst_project, dst_package)

        report = Report(src_project, src_package, src_rev, dst_project, dst_package, [], None)

        dst_srcinfo = self.get_sourceinfo(dst_project, dst_package)
        self.logger.debug('dest sourceinfo %s', pformat(dst_srcinfo))
        if dst_srcinfo is None:
            msg = "%s/%s seems to be a new package, no need to review"%(dst_project, dst_package)
            self.logger.info(msg)
            self.text_summary += msg + "\n"
            self.reports.append(report)
            return True
        src_srcinfo = self.get_sourceinfo(src_project, src_package, src_rev)
        self.logger.debug('src sourceinfo %s', pformat(src_srcinfo))
        if src_srcinfo is None:
            msg = "%s/%s@%s does not exist!? can't check"%(src_project, src_package, src_rev)
            self.logger.error(msg)
            self.text_summary += msg + "\n"
            self.reports.append(report)
            return False

        if os.path.exists(UNPACKDIR):
            shutil.rmtree(UNPACKDIR)

        try:
            # compute list of common repos to find out what to compare
            myrepos = self.findrepos(src_project, src_srcinfo, dst_project, dst_srcinfo)
        except NoBuildSuccess as e:
            self.logger.info(e)
            self.text_summary += "**Error**: %s\n"%e
            self.reports.append(report)
            return False
        except NotReadyYet as e:
            self.logger.info(e)
            self.reports.append(report)
            return None
        except SourceBroken as e:
            self.logger.error(e)
            self.text_summary += "**Error**: %s\n"%e
            self.reports.append(report)
            return False

        if not myrepos:
            self.text_summary += "**Error**: %s does not build against %s, can't check library ABIs\n\n"%(src_project, dst_project)
            self.logger.info("no matching repos, can't compare")
            self.reports.append(report)
            return False

        # *** beware of nasty maintenance stuff ***
        # if the destination is a maintained project we need to
        # mangle our comparison target and the repo mapping
        try:
            originproject, originpackage, origin_srcinfo, new_repo_map = self._maintenance_hack(dst_project, dst_srcinfo, myrepos)
            if originproject is not None:
                dst_project = originproject
            if originpackage is not None:
                dst_package = originpackage
            if origin_srcinfo is not None:
                dst_srcinfo = origin_srcinfo
            if new_repo_map is not None:
                myrepos = new_repo_map
        except MaintenanceError as e:
            self.text_summary += "**Error**: %s\n\n"%e
            self.logger.error('%s', e)
            self.reports.append(report)
            return False
        except NoBuildSuccess as e:
            self.logger.info(e)
            self.text_summary += "**Error**: %s\n"%e
            self.reports.append(report)
            return False
        except NotReadyYet as e:
            self.logger.info(e)
            self.reports.append(report)
            return None
        except SourceBroken as e:
            self.logger.error(e)
            self.text_summary += "**Error**: %s\n"%e
            self.reports.append(report)
            return False

        notes = []
        libresults = []

        overall = None

        missing_debuginfo  = []

        for mr in myrepos:
            try:
                dst_libs = self.extract(dst_project, dst_package, dst_srcinfo, mr.dstrepo, mr.arch)
                # nothing to fetch, so no libs
                if dst_libs is None:
                    continue
            except DistUrlMismatch as e:
                self.logger.error("%s/%s %s/%s: %s"%(dst_project, dst_package, mr.dstrepo, mr.arch, e))
                if ret == True: # need to check again
                    ret = None
                continue
            except MissingDebugInfo as e:
                missing_debuginfo.append(str(e))
                ret = False
                continue
            except FetchError as e:
                self.logger.error(e)
                if ret == True: # need to check again
                    ret = None
                continue

            try:
                src_libs = self.extract(src_project, src_package, src_srcinfo, mr.srcrepo, mr.arch)
                if src_libs is None:
                    if dst_libs:
                        self.text_summary += "*Warning*: the submission does not contain any libs anymore\n\n"
                    continue
            except DistUrlMismatch as e:
                self.logger.error("%s/%s %s/%s: %s"%(src_project, src_package, mr.srcrepo, mr.arch, e))
                if ret == True: # need to check again
                    ret = None
                continue
            except MissingDebugInfo as e:
                missing_debuginfo.append(str(e))
                ret = False
                continue
            except FetchError as e:
                self.logger.error(e)
                if ret == True: # need to check again
                    ret = None
                continue

            # create reverse index for aliases in the source project
            src_aliases = dict()
            for lib in src_libs.keys():
                for a in src_libs[lib]:
                    src_aliases.setdefault(a, set()).add(lib)

            # for each library in the destination project check if the same lib
            # exists in the source project. If not check the aliases (symlinks)
            # to catch soname changes. Generate pairs of matching libraries.
            pairs = set()
            for lib in dst_libs.keys():
                if lib in src_libs:
                    pairs.add((lib, lib))
                else:
                    self.logger.debug("%s not found in submission, checking aliases", lib)
                    found = False
                    for a in dst_libs[lib]:
                        if a in src_aliases:
                            for l in src_aliases[a]:
                                pairs.add((lib, l))
                                found = True
                    if found == False:
                        self.text_summary += "*Warning*: %s no longer packaged\n\n"%lib

            self.logger.debug("to diff: %s", pformat(pairs))

            # for each pair dump and compare the abi
            for old, new in pairs:
                # abi dump of old lib
                new_base = os.path.join(UNPACKDIR, dst_project, dst_package, mr.dstrepo, mr.arch)
                old_dump = os.path.join(CACHEDIR, 'old.dump')
                # abi dump of new lib
                old_base = os.path.join(UNPACKDIR, src_project, src_package, mr.srcrepo, mr.arch)
                new_dump = os.path.join(CACHEDIR, 'new.dump')

                def cleanup():
                    if os.path.exists(old_dump):
                        os.unlink(old_dump)
                    if os.path.exists(new_dump):
                        os.unlink(new_dump)

                cleanup()

                # we just need that to pass a name to abi checker
                m = so_re.match(old)
                htmlreport = 'report-%s-%s-%s-%s-%s-%08x.html'%(mr.srcrepo, os.path.basename(old), mr.dstrepo, os.path.basename(new), mr.arch, time.time())

                # run abichecker
                if m \
                    and self.run_abi_dumper(old_dump, new_base, old) \
                    and self.run_abi_dumper(new_dump, old_base, new):
                        reportfn = os.path.join(CACHEDIR, htmlreport)
                        r = self.run_abi_checker(m.group(1), old_dump, new_dump, reportfn)
                        if r is not None:
                            self.logger.debug('report saved to %s, compatible: %d', reportfn, r)
                            libresults.append(LibResult(mr.srcrepo, os.path.basename(old), mr.dstrepo, os.path.basename(new), mr.arch, htmlreport, r))
                            if overall is None:
                                overall = r
                            elif overall == True and r == False:
                                overall = r
                else:
                    self.logger.error('failed to compare %s <> %s'%(old,new))
                    self.text_summary += "**Error**: ABI check failed on %s vs %s\n\n"%(old, new)
                    if ret == True: # need to check again
                        ret = None

                cleanup()

        if missing_debuginfo:
            self.text_summary += 'debug information is missing for the following packages, can\'t check:\n<pre>'
            self.text_summary += ''.join(missing_debuginfo)
            self.text_summary += '</pre>\nplease enable debug info in your project config.\n'

        self.reports.append(report._replace(result = overall, reports = libresults))

        # upload reports

        if os.path.exists(UNPACKDIR):
            shutil.rmtree(UNPACKDIR)

        return ret

    def _maintenance_hack(self, dst_project, dst_srcinfo, myrepos):
        pkg = dst_srcinfo.package
        originproject = None
        originpackage = None

        # find the maintenance project
        url = osc.core.makeurl(self.apiurl, ('search', 'project', 'id'),
            "match=(maintenance/maintains/@project='%s'+and+attribute/@name='%s')"%(dst_project, osc.conf.config['maintenance_attribute']))
        root = ET.parse(osc.core.http_GET(url)).getroot()
        if root is not None:
            node = root.find('project')
            if node is not None:
                # check if target project is a project link where the
                # sources don't actually build (like openSUSE:...:Update). That
                # is the case if no update was released yet.
                # XXX: TODO: do check for whether the package builds here first
                originproject = self.get_originproject(dst_project, pkg)
                if originproject is not None:
                    self.logger.debug("origin project %s", originproject)
                    url = osc.core.makeurl(self.apiurl, ('build', dst_project, '_result'), { 'package': pkg })
                    root = ET.parse(osc.core.http_GET(url)).getroot()
                    alldisabled = True
                    for node in root.findall('status'):
                        if node.get('code') != 'disabled':
                            alldisabled = False
                    if alldisabled:
                        self.logger.debug("all repos disabled, using originproject %s"%originproject)
                    else:
                        originproject = None
                else:
                    mproject = node.attrib['name']
                    # packages are only a link to packagename.incidentnr
                    (linkprj, linkpkg) = self._get_linktarget(dst_project, pkg)
                    if linkpkg is not None and linkprj == dst_project:
                        self.logger.debug("%s/%s links to %s"%(dst_project, pkg, linkpkg))
                        regex = re.compile(r'.*\.(\d+)$')
                        m = regex.match(linkpkg)
                        if m is None:
                            raise MaintenanceError("%s/%s -> %s/%s is not a proper maintenance link (must match /%s/)"%(dst_project, pkg, linkprj, linkpkg, regex.pattern))
                        incident = m.group(1)
                        self.logger.debug("is maintenance incident %s"%incident)

                        originproject = "%s:%s"%(mproject, incident)
                        originpackage = pkg+'.'+dst_project.replace(':', '_')

                        origin_srcinfo = self.get_sourceinfo(originproject, originpackage)
                        if origin_srcinfo is None:
                            raise MaintenanceError("%s/%s invalid"%(originproject, originpackage))

                        # find the map of maintenance incident repos to destination repos
                        originrepos = self.findrepos(originproject, origin_srcinfo, dst_project, dst_srcinfo)
                        mapped = dict()
                        for mr in originrepos:
                            mapped[(mr.dstrepo, mr.arch)] = mr

                        self.logger.debug("mapping: %s", pformat(mapped))

                        # map the repos of the original request to the maintenance incident repos
                        matchrepos = set()
                        for mr in myrepos:
                            if not (mr.dstrepo, mr.arch) in mapped:
                                # sometimes a previously released maintenance
                                # update didn't cover all architectures. We can
                                # only ignore that then.
                                self.logger.warn("couldn't find repo %s/%s in %s/%s"%(mr.dstrepo, mr.arch, originproject, originpackage))
                                continue
                            matchrepos.add(MR(mr.srcrepo, mapped[(mr.dstrepo, mr.arch)].srcrepo, mr.arch))

                        myrepos = matchrepos
                        dst_srcinfo = origin_srcinfo
                        self.logger.debug("new repo map: %s", pformat(myrepos))

        return (originproject, originpackage, dst_srcinfo, myrepos)


    def find_abichecker_comment(self, req):
        """Return previous comments (should be one)."""
        comments = self.commentapi.get_comments(request_id=req.reqid)
        for c in comments.values():
            m = comment_marker_re.match(c['comment'])
            if m:
                return c['id'], m.group('state'), m.group('result')
        return None, None, None

    def check_one_request(self, req):

        self.review_messages = ReviewBot.ReviewBot.DEFAULT_REVIEW_MESSAGES

        if self.no_review and not self.force and self.check_request_already_done(req.reqid):
            self.logger.info("skip request %s which is already done", req.reqid)
            # TODO: check if the request was seen before and we
            # didn't reach a final state for too long
            return None

        commentid, state, result = self.find_abichecker_comment(req)
## using comments instead of db would be an options for bots
## that use no db
#        if self.no_review:
#            if state == 'done':
#                self.logger.debug("request %s already done, result: %s"%(req.reqid, result))
#                return

        self.dblogger.request_id = req.reqid

        self.reports = []
        self.text_summary = ''
        try:
            ret = ReviewBot.ReviewBot.check_one_request(self, req)
        except Exception as e:
            import traceback
            self.logger.error("unhandled exception in ABI checker")
            self.logger.error(traceback.format_exc())
            ret = None

        result = None
        if ret is not None:
            state = 'done'
            result = 'accepted' if ret else 'declined'
        else:
            # we probably don't want abichecker to spam here
            # FIXME don't delete comment in this case
            #if state is None and not self.text_summary:
            #    self.text_summary = 'abichecker will take a look later'
            state = 'seen'

        self.save_reports_to_db(req, state, result)
        if ret is not None and self.text_summary == '':
            # if for some reason save_reports_to_db didn't produce a
            # summary we add one
            self.text_summary = "ABI checker result: [%s](%s/request/%s)"%(result, WEB_URL, req.reqid)

        if commentid and not self.dryrun:
            self.commentapi.delete(commentid)

        self.post_comment(req, state, result)

        self.review_messages = { 'accepted': self.text_summary, 'declined': self.text_summary }

        if self.no_review:
            ret = None

        self.dblogger.request_id = None

        return ret

    def check_request_already_done(self, reqid):
        try:
            request = self.session.query(DB.Request).filter(DB.Request.id == reqid).one()
            if request.state == 'done':
                return True
        except sqlalchemy.orm.exc.NoResultFound as e:
            pass

        return False

    def save_reports_to_db(self, req, state, result):
        try:
            request = self.session.query(DB.Request).filter(DB.Request.id == req.reqid).one()
            for i in self.session.query(DB.ABICheck).filter(DB.ABICheck.request_id == request.id).all():
                # yeah, we could be smarter here and update existing reports instead
                self.session.delete(i)
            self.session.flush()
            request.state = state
            request.result = result
        except sqlalchemy.orm.exc.NoResultFound as e:
            request = DB.Request(id = req.reqid,
                    state = state,
                    result = result,
                    )
            self.session.add(request)
        self.session.commit()
        for r in self.reports:
            abicheck = DB.ABICheck(
                    request = request,
                    src_project = r.src_project,
                    src_package = r.src_package,
                    src_rev = r.src_rev,
                    dst_project = r.dst_project,
                    dst_package = r.dst_package,
                    result = r.result
                    )
            self.session.add(abicheck)
            self.session.commit()
            if r.result is None:
                continue
            elif r.result:
                self.text_summary += "Good news from ABI check, "
                self.text_summary += "%s seems to be ABI [compatible](%s/request/%s):\n\n"%(r.dst_package, WEB_URL, req.reqid)
            else:
                self.text_summary += "Warning: bad news from ABI check, "
                self.text_summary += "%s may be ABI [**INCOMPATIBLE**](%s/request/%s):\n\n"%(r.dst_package, WEB_URL, req.reqid)
            for lr in r.reports:
                libreport = DB.LibReport(
                        abicheck = abicheck,
                        src_repo = lr.src_repo,
                        src_lib = lr.src_lib,
                        dst_repo = lr.dst_repo,
                        dst_lib = lr.dst_lib,
                        arch = lr.arch,
                        htmlreport = lr.htmlreport,
                        result = lr.result,
                        )
                self.session.add(libreport)
                self.session.commit()
                self.text_summary += "* %s (%s): [%s](%s/report/%d)\n"%(lr.dst_lib, lr.arch,
                    "compatible" if lr.result else "***INCOMPATIBLE***",
                    WEB_URL, libreport.id)

        self.reports = []

    def post_comment(self, req, state, result):
        if not self.text_summary:
            return

        msg = "<!-- abichecker state=%s%s -->\n"%(state, ' result=%s'%result if result else '')
        msg += self.text_summary

        self.logger.info("add comment: %s"%msg)
        if not self.dryrun:
            #self.commentapi.delete_from_where_user(self.review_user, request_id = req.reqid)
            self.commentapi.add_comment(request_id = req.reqid, comment = msg)

    def run_abi_checker(self, libname, old, new, output):
        cmd = ['abi-compliance-checker',
                '-lib', libname,
                '-old', old,
                '-new', new,
                '-report-path', output
                ]
        self.logger.debug(cmd)
        r = subprocess.Popen(cmd, close_fds=True, cwd=CACHEDIR).wait()
        if not r in (0, 1):
            self.logger.error('abi-compliance-checker failed')
            # XXX: record error
            return None
        return r == 0

    def run_abi_dumper(self, output, base, filename):
        cmd = ['abi-dumper',
                '-o', output,
                '-lver', os.path.basename(filename),
                '/'.join([base, filename])]
        debuglib = '%s/usr/lib/debug/%s.debug'%(base, filename)
        if os.path.exists(debuglib):
            cmd.append(debuglib)
        self.logger.debug(cmd)
        r = subprocess.Popen(cmd, close_fds=True, cwd=CACHEDIR).wait()
        if r != 0:
            self.logger.error("failed to dump %s!"%filename)
            # XXX: record error
            return False
        return True

    def extract(self, project, package, srcinfo, repo, arch):
            # fetch cpio headers
            # check file lists for library packages
            fetchlist, liblist = self.compute_fetchlist(project, package, srcinfo, repo, arch)

            if not fetchlist:
                msg = "no libraries found in %s/%s %s/%s"%(project, package, repo, arch)
                self.logger.info(msg)
                return None

            # mtimes in cpio are not the original ones, so we need to fetch
            # that separately :-(
            mtimes= self._getmtimes(project, package, repo, arch)

            self.logger.debug("fetchlist %s", pformat(fetchlist))
            self.logger.debug("liblist %s", pformat(liblist))

            debugfiles = set(['/usr/lib/debug%s.debug'%f for f in liblist])

            # fetch binary rpms
            downloaded = self.download_files(project, package, repo, arch, fetchlist, mtimes)

            # extract binary rpms
            tmpfile = os.path.join(CACHEDIR, "cpio")
            for fn in fetchlist:
                self.logger.debug("extract %s"%fn)
                with open(tmpfile, 'wb') as tmpfd:
                    if not fn in downloaded:
                        raise FetchError("%s was not downloaded!"%fn)
                    self.logger.debug(downloaded[fn])
                    r = subprocess.call(['rpm2cpio', downloaded[fn]], stdout=tmpfd, close_fds=True)
                    if r != 0:
                        raise FetchError("failed to extract %s!"%fn)
                    tmpfd.close()
                    os.unlink(downloaded[fn])
                    cpio = CpioRead(tmpfile)
                    cpio.read()
                    for ch in cpio:
                        fn = ch.filename
                        if fn.startswith('./'): # rpm payload is relative
                            fn = fn[1:]
                        self.logger.debug("cpio fn %s", fn)
                        if not fn in liblist and not fn in debugfiles:
                            continue
                        dst = os.path.join(UNPACKDIR, project, package, repo, arch)
                        dst += fn
                        if not os.path.exists(os.path.dirname(dst)):
                            os.makedirs(os.path.dirname(dst))
                        self.logger.debug("dst %s", dst)
                        # the filehandle in the cpio archive is private so
                        # open it again
                        with open(tmpfile, 'rb') as cpiofh:
                            cpiofh.seek(ch.dataoff, os.SEEK_SET)
                            with open(dst, 'wb') as fh:
                                while True:
                                    buf = cpiofh.read(4096)
                                    if buf is None or buf == '':
                                        break
                                    fh.write(buf)
            os.unlink(tmpfile)

            return liblist

    def download_files(self, project, package, repo, arch, filenames, mtimes):
        downloaded = dict()
        for fn in filenames:
            if not fn in mtimes:
                raise FetchError("missing mtime information for %s, can't check"% fn)
            repodir = os.path.join(DOWNLOADS, package, project, repo)
            if not os.path.exists(repodir):
                os.makedirs(repodir)
            t = os.path.join(repodir, fn)
            self._get_binary_file(project, repo, arch, package, fn, t, mtimes[fn])
            downloaded[fn] = t
        return downloaded

    def _get_binary_file(self, project, repository, arch, package, filename, target, mtime):
        """Get a binary file from OBS."""
        # Used to cache, but dropped as part of python3 migration.
        osc.core.get_binary_file(self.apiurl, project, repository, arch,
                                 filename, package=package,
                                 target_filename=target)

    def readRpmHeaderFD(self, fd):
        h = None
        try:
            h = self.ts.hdrFromFdno(fd)
        except rpm.error as e:
            if str(e) == "public key not available":
                print str(e)
            if str(e) == "public key not trusted":
                print str(e)
            if str(e) == "error reading package header":
                print str(e)
            h = None
        return h

    def _fetchcpioheaders(self, project, package, repo, arch):
        u = osc.core.makeurl(self.apiurl, [ 'build', project, repo, arch, package ],
            [ 'view=cpioheaders' ])
        try:
            r = osc.core.http_GET(u)
        except HTTPError as e:
            raise FetchError('failed to fetch header information: %s'%e)
        tmpfile = NamedTemporaryFile(prefix="cpio-", delete=False)
        for chunk in r:
            tmpfile.write(chunk)
        tmpfile.close()
        cpio = CpioRead(tmpfile.name)
        cpio.read()
        rpm_re = re.compile('(.+\.rpm)-[0-9A-Fa-f]{32}$')
        for ch in cpio:
            # ignore errors
            if ch.filename == '.errors':
                continue
            # the filehandle in the cpio archive is private so
            # open it again
            with open(tmpfile.name, 'rb') as fh:
                fh.seek(ch.dataoff, os.SEEK_SET)
                h = self.readRpmHeaderFD(fh)
                if h is None:
                    raise FetchError("failed to read rpm header for %s"%ch.filename)
                m = rpm_re.match(ch.filename)
                if m:
                    yield m.group(1), h
        os.unlink(tmpfile.name)

    def _getmtimes(self, prj, pkg, repo, arch):
        """ returns a dict of filename: mtime """
        url = osc.core.makeurl(self.apiurl, ('build', prj, repo, arch, pkg))
        try:
            root = ET.parse(osc.core.http_GET(url)).getroot()
        except HTTPError:
            return None

        return dict([(node.attrib['filename'], node.attrib['mtime']) for node in root.findall('binary')])

    # modified from repochecker
    def _last_build_success(self, src_project, tgt_project, src_package, rev):
        """Return the last build success XML document from OBS."""
        try:
            query = { 'lastsuccess' : 1,
                    'package' : src_package,
                    'pathproject' : tgt_project,
                    'srcmd5' : rev }
            url = osc.core.makeurl(self.apiurl, ('build', src_project, '_result'), query)
            return ET.parse(osc.core.http_GET(url)).getroot()
        except HTTPError as e:
            if e.code != 404:
                self.logger.error('ERROR in URL %s [%s]' % (url, e))
                raise
            pass
        return None

    def get_buildsuccess_repos(self, src_project, tgt_project, src_package, rev):
        root = self._last_build_success(src_project, tgt_project, src_package, rev)
        if root is None:
            return None

        # build list of repos as set of (name, arch) tuples
        repos = set()
        for repo in root.findall('repository'):
            name = repo.attrib['name']
            for node in repo.findall('arch'):
                repos.add((name, node.attrib['arch']))

        self.logger.debug("success repos: %s", pformat(repos))

        return repos

    def get_dstrepos(self, project):
        url = osc.core.makeurl(self.apiurl, ('source', project, '_meta'))
        try:
            root = ET.parse(osc.core.http_GET(url)).getroot()
        except HTTPError:
            return None

        repos = set()
        for repo in root.findall('repository'):
            name = repo.attrib['name']
            if project in REPO_WHITELIST and name not in REPO_WHITELIST[project]:
                continue

            for node in repo.findall('arch'):
                arch = node.text

                if project in ARCH_WHITELIST and arch not in ARCH_WHITELIST[project]:
                    continue

                repos.add((name, arch))

        return repos

    def ensure_settled(self, src_project, src_srcinfo, matchrepos):
        """ make sure current build state is final so we're not
        tricked with half finished results"""
        rmap = dict()
        results = osc.core.get_package_results(self.apiurl,
                src_project, src_srcinfo.package,
                repository = [ mr.srcrepo for mr in matchrepos],
                arch = [ mr.arch for mr in matchrepos])
        for result in results:
            for res, _ in osc.core.result_xml_to_dicts(result):
                if not 'package' in res or res['package'] != src_srcinfo.package:
                    continue
                rmap[(res['repository'], res['arch'])] = res

        for mr in matchrepos:
            if not (mr.srcrepo, mr.arch) in rmap:
                self.logger.warn("%s/%s had no build success"%(mr.srcrepo, mr.arch))
                raise NotReadyYet(src_project, src_srcinfo.package, "no result")
            if rmap[(mr.srcrepo, mr.arch)]['dirty']:
                self.logger.warn("%s/%s dirty"%(mr.srcrepo, mr.arch))
                raise NotReadyYet(src_project, src_srcinfo.package, "dirty")
            code = rmap[(mr.srcrepo, mr.arch)]['code']
            if code == 'broken':
                raise SourceBroken(src_project, src_srcinfo.package)
            if code != 'succeeded' and code != 'locked' and code != 'excluded':
                self.logger.warn("%s/%s not succeeded (%s)"%(mr.srcrepo, mr.arch, code))
                raise NotReadyYet(src_project, src_srcinfo.package, code)

    def findrepos(self, src_project, src_srcinfo, dst_project, dst_srcinfo):

        # get target repos that had a successful build
        dstrepos = self.get_dstrepos(dst_project)
        if dstrepos is None:
            return None

        url = osc.core.makeurl(self.apiurl, ('source', src_project, '_meta'))
        try:
            root = ET.parse(osc.core.http_GET(url)).getroot()
        except HTTPError:
            return None

        # set of source repo name, target repo name, arch
        matchrepos = set()
        for repo in root.findall('repository'):
            name = repo.attrib['name']
            path = repo.findall('path')
            if path is None or len(path) != 1:
                self.logger.error("repo %s has more than one path"%name)
                continue
            prj = path[0].attrib['project']
            if prj == 'openSUSE:Tumbleweed':
                prj = 'openSUSE:Factory' # XXX: hack
            if prj != dst_project:
                continue
            for node in repo.findall('arch'):
                arch = node.text
                dstname = path[0].attrib['repository']
                if (dstname, arch) in dstrepos:
                    matchrepos.add(MR(name, dstname, arch))

        if not matchrepos:
            return None
        else:
            self.logger.debug('matched repos %s', pformat(matchrepos))

        # make sure it's not dirty
        self.ensure_settled(src_project, src_srcinfo, matchrepos)

        # now check if all matched repos built successfully
        srcrepos = self.get_buildsuccess_repos(src_project, dst_project, src_srcinfo.package, src_srcinfo.verifymd5)
        if srcrepos is None:
            raise NotReadyYet(src_project, src_srcinfo.package, "no build success")
        if not srcrepos:
            raise NoBuildSuccess(src_project, src_srcinfo.package, src_srcinfo.verifymd5)
        for mr in matchrepos:
            if not (mr.srcrepo, arch) in srcrepos:
                self.logger.error("%s/%s had no build success"%(mr.srcrepo, arch))
                raise NoBuildSuccess(src_project, src_srcinfo.package, src_srcinfo.verifymd5)

        return matchrepos

    # common with repochecker
    def _md5_disturl(self, disturl):
        """Get the md5 from the DISTURL from a RPM file."""
        return os.path.basename(disturl).split('-')[0]

    def disturl_matches_md5(self, disturl, md5):
        if self._md5_disturl(disturl) != md5:
            return False
        return True

    # this is a bit magic. OBS allows to take the disturl md5 from the package
    # and query the source info for that. We will then get the verify md5 that
    # belongs to that md5.
    def disturl_matches(self, disturl, prj, srcinfo):
        md5 = self._md5_disturl(disturl)
        info = self.get_sourceinfo(prj, srcinfo.package, rev = md5)
        self.logger.debug(pformat(srcinfo))
        self.logger.debug(pformat(info))
        if info.verifymd5 == srcinfo.verifymd5:
            return True
        return False

    def compute_fetchlist(self, prj, pkg, srcinfo, repo, arch):
        """ scan binary rpms of the specified repo for libraries.
        Returns a set of packages to fetch and the libraries found
        """
        self.logger.debug('scanning %s/%s %s/%s'%(prj, pkg, repo, arch))

        headers = self._fetchcpioheaders(prj, pkg, repo, arch)
        missing_debuginfo = set()
        lib_packages = dict() # pkgname -> set(lib file names)
        pkgs = dict() # pkgname -> cpiohdr, rpmhdr
        lib_aliases = dict()
        for rpmfn, h in headers:
            # skip src rpm
            if h['sourcepackage']:
                continue
            pkgname = h['name']
            if pkgname.endswith('-32bit') or pkgname.endswith('-64bit'):
                # -32bit and -64bit packages are just repackaged, so
                # we skip them and only check the original one.
                continue
            self.logger.debug(pkgname)
            if not self.disturl_matches(h['disturl'], prj, srcinfo):
                raise DistUrlMismatch(h['disturl'], srcinfo)
            pkgs[pkgname] = (rpmfn, h)
            if debugpkg_re.match(pkgname):
                continue
            for fn, mode, lnk in zip(h['filenames'], h['filemodes'], h['filelinktos']):
                if so_re.match(fn):
                    if S_ISREG(mode):
                        self.logger.debug('found lib: %s'%fn)
                        lib_packages.setdefault(pkgname, set()).add(fn)
                    elif S_ISLNK(mode) and lnk is not None:
                        alias = os.path.basename(fn)
                        libname = os.path.basename(lnk)
                        self.logger.debug('found alias: %s -> %s'%(alias, libname))
                        lib_aliases.setdefault(libname, set()).add(alias)

        fetchlist = set()
        liblist = dict()
        # check whether debug info exists for each lib
        for pkgname in sorted(lib_packages.keys()):
            dpkgname = pkgname+'-debuginfo'
            if not dpkgname in pkgs:
                missing_debuginfo.add((prj, pkg, repo, arch, pkgname))
                continue

            # check file list of debuginfo package
            rpmfn, h = pkgs[dpkgname]
            files = set (h['filenames'])
            ok = True
            for lib in lib_packages[pkgname]:
                fn = '/usr/lib/debug%s.debug'%lib
                if not fn in files:
                    missing_debuginfo.add((prj, pkg, repo, arch, pkgname, lib))
                    ok = False
                if ok:
                    fetchlist.add(pkgs[pkgname][0])
                    fetchlist.add(rpmfn)
                    liblist.setdefault(lib, set())
                    libname = os.path.basename(lib)
                    if libname in lib_aliases:
                        liblist[lib] |= lib_aliases[libname]

        if missing_debuginfo:
            self.logger.error('missing debuginfo: %s'%pformat(missing_debuginfo))
            raise MissingDebugInfo(missing_debuginfo)

        return fetchlist, liblist
예제 #26
0
class OpenQABot(ReviewBot.ReviewBot):

    """ check ABI of library packages
    """

    def __init__(self, *args, **kwargs):
        super(OpenQABot, self).__init__(*args, **kwargs)
        self.tgt_repo = {}
        self.project_settings = {}
        self.api_map = {}
        self.bot_name = 'openqa'
        self.force = False
        self.openqa = None
        self.commentapi = CommentAPI(self.apiurl)

    def gather_test_builds(self):
        for prj, u in self.tgt_repo[self.openqa.baseurl].items():
            buildnr = 0
            cjob = 0
            for j in self.jobs_for_target(u):
                # avoid going backwards in job ID
                if cjob > int(j['id']):
                    continue
                buildnr = j['settings']['BUILD']
                cjob = int(j['id'])
            self.update_test_builds[prj] = buildnr
            jobs = self.jobs_for_target(u, build=buildnr)
            self.openqa_jobs[prj] = jobs
            if self.calculate_qa_status(jobs) == QA_INPROGRESS:
                self.pending_target_repos.add(prj)

    # reimplemention from baseclass
    def check_requests(self):

        # to be filled by repos of active
        self.incident_repos = dict()
        self.update_test_builds = {}
        self.pending_target_repos = set()
        self.openqa_jobs = {}

        if self.ibs:
            self.check_suse_incidents()
        else:
            self.check_opensuse_incidents()

        # first calculate the latest build number for current jobs
        self.gather_test_builds()

        super(OpenQABot, self).check_requests()

        # now make sure the jobs are for current repo
        for prj, u in self.tgt_repo[self.openqa.baseurl].items():
            if prj in self.pending_target_repos:
                self.logger.debug("Do not trigger for " + prj)
                continue
            self.trigger_build_for_target(prj, u)

    # check a set of repos for their primary checksums
    @staticmethod
    def calculate_repo_hash(repos, incidents):
        m = md5.new()
        # if you want to force it, increase this number
        m.update('b')
        for url in repos:
            url += '/repodata/repomd.xml'
            try:
                root = ET.parse(osc.core.http_GET(url)).getroot()
            except HTTPError:
                raise
            cs = root.find(
                './/{http://linux.duke.edu/metadata/repo}data[@type="primary"]/{http://linux.duke.edu/metadata/repo}checksum')
            m.update(cs.text)
        # now add the open incidents
        m.update(json.dumps(incidents, sort_keys=True))
        digest = m.hexdigest()
        open_incidents = sorted(incidents.keys())
        if open_incidents:
            digest += ':' + ','.join(open_incidents)
        return digest

    def is_incident_in_testing(self, incident):
        # hard coded for now as we only run this code for SUSE Maintenance workflow
        project = 'SUSE:Maintenance:{}'.format(incident)

        xpath = "(state/@name='review') and (action/source/@project='{}' and action/@type='maintenance_release')".format(project)
        res = osc.core.search(self.apiurl, request=xpath)['request']
        # return the one and only (or None)
        return res.find('request')

    def calculate_incidents(self, incidents):
        """
        get incident numbers from SUSE:Maintenance:Test project
        returns dict with openQA var name : string with numbers
        """
        self.logger.debug("calculate_incidents: {}".format(pformat(incidents)))
        l_incidents = []
        for kind, prj in incidents.items():
            packages = osc.core.meta_get_packagelist(self.apiurl, prj)
            incidents = []
            # filter out incidents in staging
            for incident in packages:
                # remove patchinfo. prefix
                incident = incident.replace('_', '.').split('.')[1]
                req = self.is_incident_in_testing(incident)
                # without release request it's in staging
                if not req:
                    continue

                # skip kgraft patches from aggregation
                req_ = osc.core.Request()
                req_.read(req)
                src_prjs = {a.src_project for a in req_.actions}
                if SUSEUpdate.kgraft_target(self.apiurl, src_prjs.pop()):
                    self.logger.debug(
                        "calculate_incidents: Incident is kgraft - {} ".format(incident))
                    continue

                incidents.append(incident)

            l_incidents.append((kind + '_TEST_ISSUES', ','.join(incidents)))
        self.logger.debug("Calculate incidents:{}".format(pformat(l_incidents)))
        return l_incidents

    def jobs_for_target(self, data, build=None):
        settings = data['settings']
        values = {
            'distri': settings['DISTRI'],
            'version': settings['VERSION'],
            'arch': settings['ARCH'],
            'flavor': settings['FLAVOR'],
            'scope': 'relevant',
            'latest': '1',
        }
        if build:
            values['build'] = build
        else:
            values['test'] = data['test']
        self.logger.debug("Get jobs: {}".format(pformat(values)))
        return self.openqa.openqa_request('GET', 'jobs', values)['jobs']

    # we don't know the current BUILD and querying all jobs is too expensive
    # so we need to check for one known TEST first
    # if that job doesn't contain the proper hash, we trigger a new one
    # and then we know the build
    def trigger_build_for_target(self, prj, data):
        today = date.today().strftime("%Y%m%d")

        try:
            repohash = self.calculate_repo_hash(data['repos'], self.incident_repos.get(prj, {}))
        except HTTPError as e:
            self.logger.debug("REPOHASH not calculated with response {}".format(e))
            return

        buildnr = None
        jobs = self.jobs_for_target(data)
        for job in jobs:
            if job['settings'].get('REPOHASH', '') == repohash:
                # take the last in the row
                buildnr = job['settings']['BUILD']
        self.update_test_builds[prj] = buildnr
        # ignore old build numbers, we want a fresh run every day
        # to find regressions in the tests and to get data about
        # randomly failing tests
        if buildnr and buildnr.startswith(today):
            return

        buildnr = 0

        # not found, then check for the next free build nr
        for job in jobs:
            build = job['settings']['BUILD']
            if build and build.startswith(today):
                try:
                    nr = int(build.split('-')[1])
                    if nr > buildnr:
                        buildnr = nr
                except ValueError:
                    continue

        buildnr = "{!s}-{:d}".format(today, buildnr + 1)

        s = data['settings']
        # now schedule it for real
        if 'incidents' in data.keys():
            for x, y in self.calculate_incidents(data['incidents']):
                s[x] = y
        s['BUILD'] = buildnr
        s['REPOHASH'] = repohash
        self.logger.debug("Prepared: {}".format(pformat(s)))
        if not self.dryrun:
            try:
                self.logger.info("Openqa isos POST {}".format(pformat(s)))
                self.openqa.openqa_request('POST', 'isos', data=s, retries=1)
            except Exception as e:
                self.logger.error(e)
        self.update_test_builds[prj] = buildnr

    def request_get_openqa_status(self, req):
        types = {a.type for a in req.actions}
        if 'maintenance_release' not in types:
            return [], QA_UNKNOWN

        src_prjs = {a.src_project for a in req.actions}
        if len(src_prjs) != 1:
            raise Exception("can't handle maintenance_release from different incidents")
        build = src_prjs.pop()
        incident_id = build.split(':')[-1]
        tgt_prjs = {a.tgt_project for a in req.actions}
        jobs = self.openqa_jobs.get(build, [])
        qa_status = self.calculate_qa_status(jobs)
        if qa_status == QA_UNKNOWN or qa_status == QA_INPROGRESS:
            return jobs, qa_status

        # check if the repo jobs include the incident
        repo_jobs = []
        for prj in sorted(tgt_prjs):
            repo_settings = self.tgt_repo.get(self.openqa.baseurl, {})
            if prj in repo_settings:
                repo_jobs += self.openqa_jobs[prj]
        for job in repo_jobs:
            foundissue = False
            for key, value in job['settings'].items():
                if key.endswith('_TEST_ISSUES'):
                    if incident_id in value.split(','):
                        foundissue = True
            if not foundissue:
                self.logger.info("Repo job {} not for {} - ignoring".format(job['id'], incident_id))
                return jobs, QA_INPROGRESS
            # print(foundissue, incident_id, json.dumps(job['settings'], indent=4))

        jobs += repo_jobs
        return jobs, self.calculate_qa_status(jobs)

    def calculate_qa_status(self, jobs=None):
        if not jobs:
            return QA_UNKNOWN

        j = {}
        has_failed = False
        in_progress = False

        for job in jobs:
            if job['clone_id']:
                continue
            name = job['name']

            if name in j and int(job['id']) < int(j[name]['id']):
                continue
            j[name] = job

            if job['state'] not in ('cancelled', 'done'):
                in_progress = True
            else:
                if job['result'] != 'passed' and job['result'] != 'softfailed':
                    has_failed = True

        if not j:
            return QA_UNKNOWN
        if in_progress:
            return QA_INPROGRESS
        if has_failed:
            return QA_FAILED

        return QA_PASSED

    # escape markdown
    @staticmethod
    def emd(str):
        return str.replace('_', r'\_')

    @staticmethod
    def get_step_url(testurl, modulename):
        failurl = testurl + '/modules/{!s}/fails'.format(quote_plus(modulename))
        fails = requests.get(failurl).json()
        failed_step = fails.get('first_failed_step', 1)
        return "[{!s}]({!s}#step/{!s}/{:d})".format(OpenQABot.emd(modulename), testurl, modulename, failed_step)

    @staticmethod
    def job_test_name(job):
        return "{!s}@{!s}".format(OpenQABot.emd(job['settings']['TEST']), OpenQABot.emd(job['settings']['MACHINE']))

    def summarize_one_openqa_job(self, job):
        testurl = osc.core.makeurl(self.openqa.baseurl, ['tests', str(job['id'])])
        if not job['result'] in ['passed', 'failed', 'softfailed']:
            rstring = job['result']
            if rstring == 'none':
                return None
            return '\n- [{!s}]({!s}) is {!s}'.format(self.job_test_name(job), testurl, rstring)

        modstrings = []
        for module in job['modules']:
            if module['result'] != 'failed':
                continue
            modstrings.append(self.get_step_url(testurl, module['name']))

        if modstrings:
            return '\n- [{!s}]({!s}) failed in {!s}'.format(self.job_test_name(job), testurl, ','.join(modstrings))
        elif job['result'] == 'failed':  # rare case: fail without module fails
            return '\n- [{!s}]({!s}) failed'.format(self.job_test_name(job), testurl)
        return ''

    def summarize_openqa_jobs(self, jobs):
        groups = {}
        for job in jobs:
            gl = "{!s}@{!s}".format(self.emd(job['group']), self.emd(job['settings']['FLAVOR']))
            if gl not in groups:
                groupurl = osc.core.makeurl(self.openqa.baseurl, ['tests', 'overview'],
                                            {'version': job['settings']['VERSION'],
                                             'groupid': job['group_id'],
                                             'flavor': job['settings']['FLAVOR'],
                                             'distri': job['settings']['DISTRI'],
                                             'build': job['settings']['BUILD'],
                                             })
                groups[gl] = {'title': "__Group [{!s}]({!s})__\n".format(gl, groupurl),
                              'passed': 0, 'unfinished': 0, 'failed': []}

            job_summary = self.summarize_one_openqa_job(job)
            if job_summary is None:
                groups[gl]['unfinished'] = groups[gl]['unfinished'] + 1
                continue
            # None vs ''
            if not len(job_summary):
                groups[gl]['passed'] = groups[gl]['passed'] + 1
                continue
            # if there is something to report, hold the request
            # TODO: what is this ?
            # qa_state = QA_FAILED
            # gmsg = groups[gl]

            groups[gl]['failed'].append(job_summary)

        msg = ''
        for group in sorted(groups.keys()):
            msg += "\n\n" + groups[group]['title']
            infos = []
            if groups[group]['passed']:
                infos.append("{:d} tests passed".format(groups[group]['passed']))
            if len(groups[group]['failed']):
                infos.append("{:d} tests failed".format(len(groups[group]['failed'])))
            if groups[group]['unfinished']:
                infos.append("{:d} unfinished tests".format(groups[group]['unfinished']))
            msg += "(" + ', '.join(infos) + ")\n"
            for fail in groups[group]['failed']:
                msg += fail
        return msg.rstrip('\n')

    def check_one_request(self, req):
        try:
            jobs, qa_state = self.request_get_openqa_status(req)
            self.logger.debug("request %s state %s", req.reqid, qa_state)
            msg = None
            if qa_state == QA_UNKNOWN:
                incident_id = req.to_xml().findall('.action/source')[0].get('project').split(":")[-1]
                if not jobs and incident_id not in self.wait_for_build:
                    msg = "no openQA tests defined"
                    self.comment_write(state='done', message=msg, request=req, result='accepted')
                    return True
                else:
                    self.logger.debug("request {} waits for build".format(req.reqid))
            elif qa_state == QA_FAILED or qa_state == QA_PASSED:
                if qa_state == QA_PASSED:
                    msg = "openQA tests passed\n"
                    result = 'accepted'
                    ret = True
                else:
                    msg = "openQA tests problematic\n"
                    result = 'declined'
                    ret = False

                msg += self.summarize_openqa_jobs(jobs)
                self.comment_write(state='done', message=msg, result=result, request=req)
                return ret
            elif qa_state == QA_INPROGRESS:
                self.logger.info("request %s still in progress", req.reqid)
            else:
                raise Exception("unknown QA state %d", qa_state)

        except Exception:
            import traceback
            self.logger.error("unhandled exception in openQA Bot")
            self.logger.error(traceback.format_exc())
            return None

        return

    def find_obs_request_comment(self, request_id=None, project_name=None):
        """Return previous comments (should be one)."""
        comments = self.commentapi.get_comments(request_id=request_id, project_name=project_name)
        comment, info = self.commentapi.comment_find(comments, self.bot_name)
        if comment:
            # we only care for two fields
            return {'id': comment['id'], 'revision': info['revision']}

        return {}

    def check_product_arch(self, job, product_prefix, pmap, arch):
        need = False
        settings = {'VERSION': pmap['version']}
        settings['ARCH'] = arch if arch else 'x86_64'
        settings['DISTRI'] = pmap.get('distri', 'sle')
        issues = pmap.get('issues', {})
        issues['OS_TEST_ISSUES'] = issues.get('OS_TEST_ISSUES', product_prefix)
        required_issue = pmap.get('required_issue', False)
        for key, prefix in issues.items():
            # self.logger.debug("KP {} {}".format(key, prefix) + str(job))
            channel = prefix
            if arch:
                channel += arch
            if channel in job['channels']:
                settings[key] = str(job['id'])
                need = True
        if required_issue:
            if required_issue not in settings:
                need = False

        if not need:
            return []

        product_key = product_prefix
        if arch:
            product_key += arch
        update = self.project_settings[product_key]
        update.apiurl = self.apiurl
        update.logger = self.logger
        posts = []
        for j in update.settings(
                update.maintenance_project + ':' + str(job['id']),
                product_key):
            if not job.get('openqa_build'):
                job['openqa_build'] = update.get_max_revision(job)
            if not job.get('openqa_build'):
                self.wait_for_build.add(str(job['id']))
                return []
            self.incident_repos.setdefault(product_key, dict())[
                str(job['id'])] = job.get('openqa_build')
            j['BUILD'] += '.' + str(job['openqa_build'])
            j.update(settings)
            # kGraft jobs can have different version
            if 'real_version' in j:
                j['VERSION'] = j['real_version']
                del j['real_version']
            posts.append(j)
        return posts

    def check_product(self, job, product_prefix):
        pmap = self.api_map[product_prefix]
        posts = []
        if 'archs' in pmap:
            for arch in pmap['archs']:
                posts += self.check_product_arch(job, product_prefix, pmap, arch)
        else:
            posts += self.check_product_arch(job, product_prefix, pmap, None)

        self.logger.debug("Pmap: {} Posts: {}".format(pmap, posts))
        return posts

    def incident_openqa_jobs(self, s):
        return self.openqa.openqa_request(
            'GET', 'jobs',
            {
                'distri': s['DISTRI'],
                'version': s['VERSION'],
                'arch': s['ARCH'],
                'flavor': s['FLAVOR'],
                'build': s['BUILD'],
                'scope': 'relevant',
                'latest': '1'
            })['jobs']

    # for SUSE we use mesh, for openSUSE we limit the jobs to open release requests
    def check_opensuse_incidents(self):
        requests = dict()  # collecting unique requests
        self.wait_for_build = set()
        for prj in self.tgt_repo[self.openqa.baseurl].keys():
            for r in self.ids_project(prj, 'maintenance_release'):
                requests[r.reqid] = r

        # to be stored in settings
        issues = dict()
        for req in sorted(requests.keys()):
            req = requests[req]
            types = set([a.type for a in req.actions])
            if 'maintenance_release' not in types:
                continue

            src_prjs = set([a.src_project for a in req.actions])
            if len(src_prjs) != 1:
                raise Exception("can't handle maintenance_release from different incidents")
            build = src_prjs.pop()
            incident_id = build.split(':')[-1]
            tgt_prjs = set()
            for a in req.actions:
                prj = a.tgt_project
                # ignore e.g. Backports
                if prj not in self.project_settings:
                    continue

                issues.setdefault(prj, set()).add(incident_id)
                tgt_prjs.add(prj)

            self.test_job({'project': build, 'id': incident_id, 'channels': list(tgt_prjs)})

        for prj in self.tgt_repo[self.openqa.baseurl].keys():
            s = self.tgt_repo[self.openqa.baseurl][prj]['settings']
            s['OS_TEST_ISSUES'] = ','.join(sorted(issues.get(prj, set())))

    def check_suse_incidents(self):
        self.wait_for_build = set()
        for inc in requests.get('https://maintenance.suse.de/api/incident/active/').json():
            self.logger.info("Incident number: {}".format(inc))

            mesh_job = requests.get('https://maintenance.suse.de/api/incident/' + inc).json()

            if mesh_job['meta']['state'] in ['final', 'gone']:
                continue
            # required in mesh_job: project, id, channels
            self.test_job(mesh_job['base'])

    def test_job(self, mesh_job):
        self.logger.debug("Called test_job with: {}".format(mesh_job))
        incident_project = str(mesh_job['project'])
        try:
            comment_info = self.find_obs_request_comment(project_name=incident_project)
        except HTTPError as e:
            self.logger.debug("Couldn't load comments - {}".format(e))
            return
        comment_build = str(comment_info.get('revision', ''))

        openqa_posts = []
        for prod in self.api_map.keys():
            self.logger.debug("{} -- product in apimap".format(prod))
            openqa_posts += self.check_product(mesh_job, prod)
        openqa_jobs = []
        for s in openqa_posts:
            if 'skip_job' in s:
                self.wait_for_build.add(str(mesh_job['id']))
                continue
            jobs = self.incident_openqa_jobs(s)
            # take the project comment as marker for not posting jobs
            if not len(jobs) and comment_build != str(mesh_job['openqa_build']):
                if self.dryrun:
                    self.logger.info('WOULD POST:{}'.format(pformat(json.dumps(s, sort_keys=True))))
                else:
                    self.logger.info("Posted: {}".format(pformat(json.dumps(s, sort_keys=True))))
                    self.openqa.openqa_request('POST', 'isos', data=s, retries=1)
                    openqa_jobs += self.incident_openqa_jobs(s)
            else:
                self.logger.info("{} got {}".format(pformat(s), len(jobs)))
                openqa_jobs += jobs

        self.openqa_jobs[incident_project] = openqa_jobs

        if len(openqa_jobs) == 0:
            self.logger.debug("No openqa jobs defined")
            return
        # print openqa_jobs
        msg = self.summarize_openqa_jobs(openqa_jobs)
        state = 'seen'
        result = 'none'
        qa_status = self.calculate_qa_status(openqa_jobs)
        if qa_status == QA_PASSED:
            result = 'accepted'
            state = 'done'
        if qa_status == QA_FAILED:
            result = 'declined'
            state = 'done'
        self.comment_write(project=str(incident_project), state=state,
                           result=result, message=msg,
                           info_extra={'revision': str(mesh_job.get('openqa_build'))})
class ReviewBot(object):
    """
    A generic obs request reviewer
    Inherit from this class and implement check functions for each action type:

    def check_action_<type>(self, req, action):
        return (None|True|False)
    """

    DEFAULT_REVIEW_MESSAGES = { 'accepted' : 'ok', 'declined': 'review failed' }
    REVIEW_CHOICES = ('normal', 'no', 'accept', 'accept-onpass', 'fallback-onfail', 'fallback-always')

    COMMENT_MARKER_REGEX = re.compile(r'<!-- (?P<bot>[^ ]+) state=(?P<state>[^ ]+)(?: result=(?P<result>[^ ]+))? -->')

    # map of default config entries
    config_defaults = {
            # list of tuples (prefix, apiurl, submitrequestprefix)
            # set this if the obs instance maps another instance into it's
            # namespace
            'project_namespace_api_map' : [
                ('openSUSE.org:', 'https://api.opensuse.org', 'obsrq'),
                ],
            }

    def __init__(self, apiurl = None, dryrun = False, logger = None, user = None, group = None):
        self.apiurl = apiurl
        self.ibs = apiurl.startswith('https://api.suse.de')
        self.dryrun = dryrun
        self.logger = logger
        self.review_user = user
        self.review_group = group
        self.requests = []
        self.review_messages = ReviewBot.DEFAULT_REVIEW_MESSAGES
        self._review_mode = 'normal'
        self.fallback_user = None
        self.fallback_group = None
        self.comment_api = CommentAPI(self.apiurl)
        self.bot_name = self.__class__.__name__
        self.only_one_action = False
        self.request_default_return = None
        self.comment_handler = False
        self.override_allow = True
        self.override_group_key = '{}-override-group'.format(self.bot_name.lower())

        self.load_config()

    def _load_config(self, handle = None):
        d = self.__class__.config_defaults
        y = yaml.safe_load(handle) if handle is not None else {}
        return namedtuple('BotConfig', sorted(d.keys()))(*[ y.get(p, d[p]) for p in sorted(d.keys()) ])

    def load_config(self, filename = None):
        if filename:
            with open(filename, 'r') as fh:
                self.config = self._load_config(fh)
        else:
            self.config = self._load_config()

    def staging_api(self, project):
        if project not in self.staging_apis:
            config = Config(project)
            self.staging_apis[project] = StagingAPI(self.apiurl, project)

            config.apply_remote(self.staging_apis[project])
            self.staging_config[project] = conf.config[project].copy()

        return self.staging_apis[project]

    @property
    def review_mode(self):
        return self._review_mode

    @review_mode.setter
    def review_mode(self, value):
        if value not in self.REVIEW_CHOICES:
            raise Exception("invalid review option: %s"%value)
        self._review_mode = value

    def set_request_ids(self, ids):
        for rqid in ids:
            u = osc.core.makeurl(self.apiurl, [ 'request', rqid ], { 'withfullhistory' : '1' })
            r = osc.core.http_GET(u)
            root = ET.parse(r).getroot()
            req = osc.core.Request()
            req.read(root)
            self.requests.append(req)

    # function called before requests are reviewed
    def prepare_review(self):
        pass

    def check_requests(self):
        self.staging_apis = {}
        self.staging_config = {}

        # give implementations a chance to do something before single requests
        self.prepare_review()
        for req in self.requests:
            self.logger.info("checking %s"%req.reqid)
            self.request = req

            override = self.request_override_check(req)
            if override is not None:
                good = override
            else:
                good = self.check_one_request(req)

            if self.review_mode == 'no':
                good = None
            elif self.review_mode == 'accept':
                good = True

            if good is None:
                self.logger.info("%s ignored"%req.reqid)
            elif good:
                self._set_review(req, 'accepted')
            elif self.review_mode != 'accept-onpass':
                self._set_review(req, 'declined')

    @memoize(session=True)
    def request_override_check_users(self, project):
        """Determine users allowed to override review in a comment command."""
        self.staging_api(project)
        config = self.staging_config[project]

        users = []
        group = config.get('staging-group')
        if group:
            users += group_members(self.apiurl, group)

        if self.override_group_key:
            override_group = config.get(self.override_group_key)
            if override_group:
                users += group_members(self.apiurl, override_group)

        return users

    def request_override_check(self, request):
        """Check for a comment command requesting review override."""
        if not self.override_allow:
            return None

        comments = self.comment_api.get_comments(request_id=request.reqid)
        users = self.request_override_check_users(request.actions[0].tgt_project)
        for args, who in self.comment_api.command_find(
            comments, self.review_user, 'override', users):
            message = 'overridden by {}'.format(who)
            override = args[1] or None
            if override == 'accept':
                self.review_messages['accepted'] = message
                return True

            if override == 'decline':
                self.review_messages['declined'] = message
                return False

    def _set_review(self, req, state):
        doit = self.can_accept_review(req.reqid)
        if doit is None:
            self.logger.info("can't change state, %s does not have the reviewer"%(req.reqid))

        newstate = state

        by_user = self.fallback_user
        by_group = self.fallback_group

        msg = self.review_messages[state] if state in self.review_messages else state
        self.logger.info("%s %s: %s"%(req.reqid, state, msg))

        if state == 'declined':
            if self.review_mode == 'fallback-onfail':
                self.logger.info("%s needs fallback reviewer"%req.reqid)
                self.add_review(req, by_group=by_group, by_user=by_user, msg="Automated review failed. Needs fallback reviewer.")
                newstate = 'accepted'
        elif self.review_mode == 'fallback-always':
            self.add_review(req, by_group=by_group, by_user=by_user, msg='Adding fallback reviewer')

        if doit == True:
            self.logger.debug("setting %s to %s"%(req.reqid, state))
            if not self.dryrun:
                osc.core.change_review_state(apiurl = self.apiurl,
                        reqid = req.reqid, newstate = newstate,
                        by_group=self.review_group,
                        by_user=self.review_user, message=msg)
        else:
            self.logger.debug("%s review not changed"%(req.reqid))

    # allow_duplicate=True should only be used if it makes sense to force a
    # re-review in a scenario where the bot adding the review will rerun.
    # Normally a declined review will automatically be reopened along with the
    # request and any other bot reviews already added will not be touched unless
    # the issuing bot is rerun which does not fit normal workflow.
    def add_review(self, req, by_group=None, by_user=None, by_project=None, by_package=None,
                   msg=None, allow_duplicate=False):
        query = {
            'cmd': 'addreview'
        }
        if by_group:
            query['by_group'] = by_group
        elif by_user:
            query['by_user'] = by_user
        elif by_project:
            query['by_project'] = by_project
            if by_package:
                query['by_package'] = by_package
        else:
            raise osc.oscerr.WrongArgs("missing by_*")

        for r in req.reviews:
            if (r.by_group == by_group and
                r.by_project == by_project and
                r.by_package == by_package and
                r.by_user == by_user and
                # Only duplicate when allow_duplicate and state != new.
                (not allow_duplicate or r.state == 'new')):
                del query['cmd']
                self.logger.debug('skipped adding duplicate review for {}'.format(
                    '/'.join(query.values())))
                return

        u = osc.core.makeurl(self.apiurl, ['request', req.reqid], query)
        if self.dryrun:
            self.logger.info('POST %s' % u)
            return

        r = osc.core.http_POST(u, data=msg)
        code = ET.parse(r).getroot().attrib['code']
        if code != 'ok':
            raise Exception('non-ok return code: {}'.format(code))

    def check_one_request(self, req):
        """
        check all actions in one request.

        calls helper functions for each action type

        return None if nothing to do, True to accept, False to reject
        """

        # Copy original values to revert changes made to them.
        self.review_messages = self.DEFAULT_REVIEW_MESSAGES.copy()

        if self.only_one_action and len(req.actions) != 1:
            self.review_messages['declined'] = 'Only one action per request'
            return False

        if self.comment_handler is not False:
            self.comment_handler_add()

        overall = None
        for a in req.actions:
            fn = 'check_action_%s'%a.type
            if not hasattr(self, fn):
                fn = 'check_action__default'
            func = getattr(self, fn)
            ret = func(req, a)
            if ret == False or overall is None and ret is not None:
                overall = ret
        return overall

    @staticmethod
    def _is_patchinfo(pkgname):
        return pkgname == 'patchinfo' or pkgname.startswith('patchinfo.')

    def check_action_maintenance_incident(self, req, a):
        dst_package = a.src_package
        # Ignoring patchinfo package for checking
        if self._is_patchinfo(a.src_package):
            self.logger.info("package is patchinfo, ignoring")
            return None
        # dirty obs crap
        if a.tgt_releaseproject is not None:
            ugly_suffix = '.'+a.tgt_releaseproject.replace(':', '_')
            if dst_package.endswith(ugly_suffix):
                dst_package = dst_package[:-len(ugly_suffix)]
        return self.check_source_submission(a.src_project, a.src_package, a.src_rev, a.tgt_releaseproject, dst_package)

    def check_action_maintenance_release(self, req, a):
        pkgname = a.src_package
        if self._is_patchinfo(pkgname):
            return None
        linkpkg = self._get_linktarget_self(a.src_project, pkgname)
        if linkpkg is not None:
            pkgname = linkpkg
        # packages in maintenance have links to the target. Use that
        # to find the real package name
        (linkprj, linkpkg) = self._get_linktarget(a.src_project, pkgname)
        if linkpkg is None or linkprj is None or linkprj != a.tgt_project:
            self.logger.error("%s/%s is not a link to %s"%(a.src_project, pkgname, a.tgt_project))
            return False
        else:
            pkgname = linkpkg
        return self.check_source_submission(a.src_project, a.src_package, None, a.tgt_project, pkgname)

    def check_action_submit(self, req, a):
        return self.check_source_submission(a.src_project, a.src_package, a.src_rev, a.tgt_project, a.tgt_package)

    def check_action__default(self, req, a):
        # Disable any comment handler to avoid making a comment even if
        # comment_write() is called by another bot wrapping __default().
        self.comment_handler_remove()

        message = 'unhandled request type {}'.format(a.type)
        self.logger.error(message)
        self.review_messages['accepted'] += ': ' + message
        return self.request_default_return

    def check_source_submission(self, src_project, src_package, src_rev, target_project, target_package):
        """ default implemention does nothing """
        self.logger.info("%s/%s@%s -> %s/%s"%(src_project, src_package, src_rev, target_project, target_package))
        return None

    @staticmethod
    @memoize(session=True)
    def _get_sourceinfo(apiurl, project, package, rev=None):
        query = { 'view': 'info' }
        if rev is not None:
            query['rev'] = rev
        url = osc.core.makeurl(apiurl, ('source', project, package), query=query)
        try:
            return ET.parse(osc.core.http_GET(url)).getroot()
        except (urllib2.HTTPError, urllib2.URLError):
            return None

    def get_originproject(self, project, package, rev=None):
        root = ReviewBot._get_sourceinfo(self.apiurl, project, package, rev)
        if root is None:
            return None

        originproject = root.find('originproject')
        if originproject is not None:
            return originproject.text

        return None

    def get_sourceinfo(self, project, package, rev=None):
        root = ReviewBot._get_sourceinfo(self.apiurl, project, package, rev)
        if root is None:
            return None

        props = ('package', 'rev', 'vrev', 'srcmd5', 'lsrcmd5', 'verifymd5')
        return namedtuple('SourceInfo', props)(*[ root.get(p) for p in props ])

    # TODO: what if there is more than _link?
    def _get_linktarget_self(self, src_project, src_package):
        """ if it's a link to a package in the same project return the name of the package"""
        prj, pkg = self._get_linktarget(src_project, src_package)
        if prj is None or prj == src_project:
            return pkg

    def _get_linktarget(self, src_project, src_package):

        query = {}
        url = osc.core.makeurl(self.apiurl, ('source', src_project, src_package), query=query)
        try:
            root = ET.parse(osc.core.http_GET(url)).getroot()
        except urllib2.HTTPError:
            return (None, None)

        if root is not None:
            linkinfo = root.find("linkinfo")
            if linkinfo is not None:
                return (linkinfo.get('project'), linkinfo.get('package'))

        return (None, None)

    def _has_open_review_by(self, root, by_what, reviewer):
        states = set([review.get('state') for review in root.findall('review') if review.get(by_what) == reviewer])
        if not states:
            return None
        elif 'new' in states:
            return True
        return False

    def can_accept_review(self, request_id):
        """return True if there is a new review for the specified reviewer"""
        states = set()
        url = osc.core.makeurl(self.apiurl, ('request', str(request_id)))
        try:
            root = ET.parse(osc.core.http_GET(url)).getroot()
            if self.review_user and self._has_open_review_by(root, 'by_user', self.review_user):
                return True
            if self.review_group and self._has_open_review_by(root, 'by_group', self.review_group):
                return True
        except urllib2.HTTPError as e:
            print('ERROR in URL %s [%s]' % (url, e))
        return False

    def set_request_ids_search_review(self):
        review = None
        if self.review_user:
            review = "@by_user='******' and @state='new'" % self.review_user
        if self.review_group:
            review = osc.core.xpath_join(review, "@by_group='%s' and @state='new'" % self.review_group)
        url = osc.core.makeurl(self.apiurl, ('search', 'request'), { 'match': "state/@name='review' and review[%s]" % review, 'withfullhistory': 1 } )
        root = ET.parse(osc.core.http_GET(url)).getroot()

        self.requests = []

        for request in root.findall('request'):
            req = osc.core.Request()
            req.read(request)
            self.requests.append(req)

    # also used by openqabot
    def ids_project(self, project, typename):
        url = osc.core.makeurl(self.apiurl, ('search', 'request'),
                               { 'match': "(state/@name='review' or state/@name='new') and (action/target/@project='%s' and action/@type='%s')" % (project, typename),
                                 'withfullhistory': 1 })
        root = ET.parse(osc.core.http_GET(url)).getroot()

        ret = []

        for request in root.findall('request'):
            req = osc.core.Request()
            req.read(request)
            ret.append(req)
        return ret

    def set_request_ids_project(self, project, typename):
        self.requests = self.ids_project(project, typename)

    def comment_handler_add(self, level=logging.INFO):
        """Add handler to start recording log messages for comment."""
        self.comment_handler = CommentFromLogHandler(level)
        self.logger.addHandler(self.comment_handler)

    def comment_handler_remove(self):
        self.logger.removeHandler(self.comment_handler)

    def comment_handler_lines_deduplicate(self):
        self.comment_handler.lines = list(OrderedDict.fromkeys(self.comment_handler.lines))

    def comment_write(self, state='done', result=None, project=None, package=None,
                      request=None, message=None, identical=False, only_replace=False,
                      info_extra=None, info_extra_identical=True, bot_name_suffix=None):
        """Write comment if not similar to previous comment and replace old one.

        The state, result, and info_extra (dict) are combined to create the info
        that is passed to CommentAPI methods for creating a marker and finding
        previous comments. self.bot_name, which defaults to class, will be used
        as the primary matching key. When info_extra_identical is set to False
        info_extra will not be included when finding previous comments to
        compare message against.

        A comment from the same bot will be replaced when a new comment is
        written. The only_replace flag will restrict to only writing a comment
        if a prior one is being replaced. This can be useful for writing a final
        comment that indicates a change from previous uncompleted state, but
        only makes sense to post if a prior comment was posted.

        The project, package, and request variables control where the comment is
        placed. If no value is given the default is the request being reviewed.

        If no message is provided the content will be extracted from
        self.comment_handler.line which is provided by CommentFromLogHandler. To
        use this call comment_handler_add() at the point which messages should
        start being collected. Alternatively the self.comment_handler setting
        may be set to True to automatically set one on each request.

        The previous comment body line count is compared to see if too similar
        to bother posting another comment which is useful for avoiding
        re-posting comments that contain irrelevant minor changes. To force an
        exact match use the identical flag to replace any non-identical
        comment body.
        """
        if project:
            kwargs = {'project_name': project}
            if package:
                kwargs['package_name'] = package
        else:
            if request is None:
                request = self.request
            kwargs = {'request_id': request.reqid}
        debug_key = '/'.join(kwargs.values())

        if message is None:
            if not len(self.comment_handler.lines):
                self.logger.debug('skipping empty comment for {}'.format(debug_key))
                return
            message = '\n\n'.join(self.comment_handler.lines)

        bot_name = self.bot_name
        if bot_name_suffix:
            bot_name = '::'.join([bot_name, bot_name_suffix])

        info = {'state': state, 'result': result}
        if info_extra and info_extra_identical:
            info.update(info_extra)

        comments = self.comment_api.get_comments(**kwargs)
        comment, _ = self.comment_api.comment_find(comments, bot_name, info)

        if info_extra and not info_extra_identical:
            # Add info_extra once comment has already been matched.
            info.update(info_extra)

        message = self.comment_api.add_marker(message, bot_name, info)
        message = self.comment_api.truncate(message.strip())

        if (comment is not None and
            ((identical and
              # Remove marker from comments since handled during comment_find().
              self.comment_api.remove_marker(comment['comment']) ==
              self.comment_api.remove_marker(message)) or
             (not identical and comment['comment'].count('\n') == message.count('\n')))
        ):
            # Assume same state/result and number of lines in message is duplicate.
            self.logger.debug('previous comment too similar on {}'.format(debug_key))
            return

        if comment is None:
            self.logger.debug('broadening search to include any state on {}'.format(debug_key))
            comment, _ = self.comment_api.comment_find(comments, bot_name)
        if comment is not None:
            self.logger.debug('removing previous comment on {}'.format(debug_key))
            if not self.dryrun:
                self.comment_api.delete(comment['id'])
        elif only_replace:
            self.logger.debug('no previous comment to replace on {}'.format(debug_key))
            return

        self.logger.debug('adding comment to {}: {}'.format(debug_key, message))
        if not self.dryrun:
            self.comment_api.add_comment(comment=message, **kwargs)

        self.comment_handler_remove()
예제 #28
0
class TestAccept(unittest.TestCase):

    def setup_wf(self, description=''):
        wf = OBSLocal.FactoryWorkflow()
        wf.setup_rings()

        self.c_api = CommentAPI(wf.api.apiurl)

        staging_b = wf.create_staging('B', freeze=True)
        self.prj = staging_b.name

        self.winerq = wf.create_submit_request('devel:wine', 'wine', text='Hallo World', description=description)
        self.assertEqual(True, SelectCommand(wf.api, self.prj).perform(['wine']))
        self.comments = self.c_api.get_comments(project_name=self.prj)
        wf.create_attribute_type('OSRT', 'ProductVersion', 1)
        return wf

    def test_accept_comments(self):
        wf = self.setup_wf()

        self.assertEqual(True, AcceptCommand(wf.api).accept_all(['B']))

        # Comments are cleared up
        accepted_comments = self.c_api.get_comments(project_name=self.prj)
        self.assertEqual(len(accepted_comments), 0)

    def test_accept_bugowners(self):
        wf = self.setup_wf(description="bugowner: group:factory-staging")

        self.assertEqual(True, AcceptCommand(wf.api).accept_all(['B']))
        # we expect that the requests increase by 1 - to avoid a full search
        request = get_request(wf.apiurl, str(int(self.winerq.reqid) + 1))
        # it's in review because this is a staging workflow
        self.assertEqual(request.state.name, 'review')
        exp = '<action type="set_bugowner">\n  <target project="openSUSE:Factory" ' + \
            'package="wine" />\n  <group name="factory-staging" />\n</action>'
        self.assertEqual(request.actions[0].to_str(), exp)

    def test_accept_final_comment(self):
        wf = self.setup_wf()

        # snipe out cleanup to see the comments before the final countdown
        wf.api.staging_deactivate = MagicMock(return_value=True)

        self.assertEqual(True, AcceptCommand(wf.api).accept_all(['B']))

        comments = self.c_api.get_comments(project_name=self.prj)
        self.assertGreater(len(comments), len(self.comments))

        # check which id was added
        new_id = (set(comments.keys()) - set(self.comments.keys())).pop()
        comment = comments[new_id]['comment']
        ncomment = 'Project "{}" accepted. '.format(self.prj)
        ncomment += "The following packages have been submitted to openSUSE:Factory: wine."
        self.assertEqual(ncomment, comment)

    def test_accept_new_multibuild_package(self):
        wf = self.setup_wf()

        staging = wf.create_staging('A', freeze=True)

        project = wf.create_project('devel:gcc')
        package = OBSLocal.Package(name='gcc9', project=project)
        package.create_commit(filename='gcc9.spec')
        package.create_commit(filename='gcc9-tests.spec')
        package.create_commit('<multibuild><flavor>gcc9-tests.spec</flavor></multibuild>', filename='_multibuild')
        wf.submit_package(package)

        SelectCommand(wf.api, staging.name).perform(['gcc9'])
        ac = AcceptCommand(wf.api)
        self.assertEqual(True, ac.accept_all(['A'], True))

        # no stale links
        self.assertEqual([], package_list(wf.apiurl, staging.name))
        self.assertEqual(['gcc9', 'wine'], package_list(wf.apiurl, wf.project))

    def test_accept_new_multispec_package(self):
        wf = self.setup_wf()

        staging = wf.create_staging('A', freeze=True)

        project = wf.create_project('devel:gcc')
        package = OBSLocal.Package(name='gcc9', project=project)
        package.create_commit(filename='gcc9.spec')
        package.create_commit(filename='gcc9-tests.spec')
        wf.submit_package(package)

        SelectCommand(wf.api, staging.name).perform(['gcc9'])
        ac = AcceptCommand(wf.api)
        self.assertEqual(True, ac.accept_all(['A'], True))

        # no stale links
        self.assertEqual([], package_list(wf.apiurl, staging.name))
        self.assertEqual(['gcc9', 'gcc9-tests', 'wine'], package_list(wf.apiurl, wf.project))

    def test_accept_switch_to_multibuild_package(self):
        wf = self.setup_wf()

        staging = wf.create_staging('A', freeze=True)

        tpackage = wf.create_package('target', 'gcc9')
        tpackage.create_commit(filename='gcc9.spec')
        tpackage.create_commit(filename='gcc9-tests.spec')
        lpackage = wf.create_package('target', 'gcc9-tests')
        lpackage.create_commit('<link package="gcc9" cicount="copy" />', filename='_link')

        project = wf.create_project('devel:gcc')
        package = OBSLocal.Package(name='gcc9', project=project)
        package.create_commit(filename='gcc9.spec')
        package.create_commit(filename='gcc9-tests.spec')
        package.create_commit('<multibuild><flavor>gcc9-tests.spec</flavor></multibuild>', filename='_multibuild')

        wf.submit_package(package)

        SelectCommand(wf.api, staging.name).perform(['gcc9'])
        ac = AcceptCommand(wf.api)
        self.assertEqual(True, ac.accept_all(['A'], True))

        # no stale links
        self.assertEqual([], package_list(wf.apiurl, staging.name))
        self.assertEqual(['gcc9', 'wine'], package_list(wf.apiurl, wf.project))
예제 #29
0
class ReviewBot(object):
    """
    A generic obs request reviewer
    Inherit from this class and implement check functions for each action type:

    def check_action_<type>(self, req, action):
        return (None|True|False)
    """

    DEFAULT_REVIEW_MESSAGES = { 'accepted' : 'ok', 'declined': 'review failed' }
    REVIEW_CHOICES = ('normal', 'no', 'accept', 'accept-onpass', 'fallback-onfail', 'fallback-always')

    COMMENT_MARKER_REGEX = re.compile(r'<!-- (?P<bot>[^ ]+) state=(?P<state>[^ ]+)(?: result=(?P<result>[^ ]+))? -->')

    # map of default config entries
    config_defaults = {
            # list of tuples (prefix, apiurl, submitrequestprefix)
            # set this if the obs instance maps another instance into it's
            # namespace
            'project_namespace_api_map' : [
                ('openSUSE.org:', 'https://api.opensuse.org', 'obsrq'),
                ],
            }

    def __init__(self, apiurl = None, dryrun = False, logger = None, user = None, group = None):
        self.apiurl = apiurl
        self.ibs = apiurl.startswith('https://api.suse.de')
        self.dryrun = dryrun
        self.logger = logger
        self.review_user = user
        self.review_group = group
        self.requests = []
        self.review_messages = ReviewBot.DEFAULT_REVIEW_MESSAGES
        self._review_mode = 'normal'
        self.fallback_user = None
        self.fallback_group = None
        self.comment_api = CommentAPI(self.apiurl)
        self.bot_name = self.__class__.__name__
        self.only_one_action = False
        self.request_default_return = None
        self.comment_handler = False
        self.override_allow = True
        self.override_group_key = '{}-override-group'.format(self.bot_name.lower())
        self.lookup = PackageLookup(self.apiurl)

        self.load_config()

    def _load_config(self, handle = None):
        d = self.__class__.config_defaults
        y = yaml.safe_load(handle) if handle is not None else {}
        return namedtuple('BotConfig', sorted(d.keys()))(*[ y.get(p, d[p]) for p in sorted(d.keys()) ])

    def load_config(self, filename = None):
        if filename:
            with open(filename, 'r') as fh:
                self.config = self._load_config(fh)
        else:
            self.config = self._load_config()

    def staging_api(self, project):
        # Allow for the Staging subproject to be passed directly from config
        # which should be stripped before initializing StagingAPI. This allows
        # for NonFree subproject to utilize StagingAPI for main project.
        if project.endswith(':Staging'):
            project = project[:-8]

        if project not in self.staging_apis:
            Config.get(self.apiurl, project)
            self.staging_apis[project] = StagingAPI(self.apiurl, project)

        return self.staging_apis[project]

    @property
    def review_mode(self):
        return self._review_mode

    @review_mode.setter
    def review_mode(self, value):
        if value not in self.REVIEW_CHOICES:
            raise Exception("invalid review option: %s"%value)
        self._review_mode = value

    def set_request_ids(self, ids):
        for rqid in ids:
            u = osc.core.makeurl(self.apiurl, [ 'request', rqid ], { 'withfullhistory' : '1' })
            r = osc.core.http_GET(u)
            root = ET.parse(r).getroot()
            req = osc.core.Request()
            req.read(root)
            self.requests.append(req)

    # function called before requests are reviewed
    def prepare_review(self):
        pass

    def check_requests(self):
        self.staging_apis = {}

        # give implementations a chance to do something before single requests
        self.prepare_review()
        for req in self.requests:
            self.logger.info("checking %s"%req.reqid)
            self.request = req

            override = self.request_override_check(req)
            if override is not None:
                good = override
            else:
                try:
                    good = self.check_one_request(req)
                except:
                    good = None

                    import traceback
                    traceback.print_exc()

            if self.review_mode == 'no':
                good = None
            elif self.review_mode == 'accept':
                good = True

            if good is None:
                self.logger.info("%s ignored"%req.reqid)
            elif good:
                self._set_review(req, 'accepted')
            elif self.review_mode != 'accept-onpass':
                self._set_review(req, 'declined')

    @memoize(session=True)
    def request_override_check_users(self, project):
        """Determine users allowed to override review in a comment command."""
        config = Config.get(self.apiurl, project)

        users = []
        group = config.get('staging-group')
        if group:
            users += group_members(self.apiurl, group)

        if self.override_group_key:
            override_group = config.get(self.override_group_key)
            if override_group:
                users += group_members(self.apiurl, override_group)

        return users

    def request_override_check(self, request, force=False):
        """Check for a comment command requesting review override."""
        if not force and not self.override_allow:
            return None

        comments = self.comment_api.get_comments(request_id=request.reqid)
        users = self.request_override_check_users(request.actions[0].tgt_project)
        for args, who in self.comment_api.command_find(
            comments, self.review_user, 'override', users):
            message = 'overridden by {}'.format(who)
            override = args[1] if len(args) >= 2 else 'accept'
            if override == 'accept':
                self.review_messages['accepted'] = message
                return True

            if override == 'decline':
                self.review_messages['declined'] = message
                return False

    def _set_review(self, req, state):
        doit = self.can_accept_review(req.reqid)
        if doit is None:
            self.logger.info("can't change state, %s does not have the reviewer"%(req.reqid))

        newstate = state

        by_user = self.fallback_user
        by_group = self.fallback_group

        msg = self.review_messages[state] if state in self.review_messages else state
        self.logger.info("%s %s: %s"%(req.reqid, state, msg))

        if state == 'declined':
            if self.review_mode == 'fallback-onfail':
                self.logger.info("%s needs fallback reviewer"%req.reqid)
                self.add_review(req, by_group=by_group, by_user=by_user, msg="Automated review failed. Needs fallback reviewer.")
                newstate = 'accepted'
        elif self.review_mode == 'fallback-always':
            self.add_review(req, by_group=by_group, by_user=by_user, msg='Adding fallback reviewer')

        if doit == True:
            self.logger.debug("setting %s to %s"%(req.reqid, state))
            if not self.dryrun:
                osc.core.change_review_state(apiurl = self.apiurl,
                        reqid = req.reqid, newstate = newstate,
                        by_group=self.review_group,
                        by_user=self.review_user, message=msg)
        else:
            self.logger.debug("%s review not changed"%(req.reqid))

    # allow_duplicate=True should only be used if it makes sense to force a
    # re-review in a scenario where the bot adding the review will rerun.
    # Normally a declined review will automatically be reopened along with the
    # request and any other bot reviews already added will not be touched unless
    # the issuing bot is rerun which does not fit normal workflow.
    def add_review(self, req, by_group=None, by_user=None, by_project=None, by_package=None,
                   msg=None, allow_duplicate=False):
        query = {
            'cmd': 'addreview'
        }
        if by_group:
            query['by_group'] = by_group
        elif by_user:
            query['by_user'] = by_user
        elif by_project:
            query['by_project'] = by_project
            if by_package:
                query['by_package'] = by_package
        else:
            raise osc.oscerr.WrongArgs("missing by_*")

        for r in req.reviews:
            if (r.by_group == by_group and
                r.by_project == by_project and
                r.by_package == by_package and
                r.by_user == by_user and
                # Only duplicate when allow_duplicate and state != new.
                (not allow_duplicate or r.state == 'new')):
                del query['cmd']
                self.logger.debug('skipped adding duplicate review for {}'.format(
                    '/'.join(query.values())))
                return

        u = osc.core.makeurl(self.apiurl, ['request', req.reqid], query)
        if self.dryrun:
            self.logger.info('POST %s' % u)
            return

        r = osc.core.http_POST(u, data=msg)
        code = ET.parse(r).getroot().attrib['code']
        if code != 'ok':
            raise Exception('non-ok return code: {}'.format(code))

    def devel_project_review_add(self, request, project, package, message='adding devel project review'):
        devel_project, devel_package = devel_project_fallback(self.apiurl, project, package)
        if not devel_project:
            self.logger.warning('no devel project found for {}/{}'.format(project, package))
            return False

        try:
            self.add_review(request, by_project=devel_project, by_package=devel_package, msg=message)
        except HTTPError as e:
            # could happen when the bot is not actually a reviewer and has no permissions
            if e.code != 403:
                raise e
            self.logger.error('failed to add devel project review for {}/{}'.format(devel_project, devel_package))
            return False

        return True

    def devel_project_review_ensure(self, request, project, package, message='submitter not devel maintainer'):
        if not self.devel_project_review_needed(request, project, package):
            self.logger.debug('devel project review not needed')
            return True

        return self.devel_project_review_add(request, project, package, message)

    def devel_project_review_needed(self, request, project, package):
        author = request.get_creator()
        maintainers = set(maintainers_get(self.apiurl, project, package))

        if author in maintainers:
            return False

        # Carried over from maintbot, but seems haphazard.
        for review in request.reviews:
            if review.by_user in maintainers:
                return False

        return True

    def check_one_request(self, req):
        """
        check all actions in one request.

        calls helper functions for each action type

        return None if nothing to do, True to accept, False to reject
        """

        # Copy original values to revert changes made to them.
        self.review_messages = self.DEFAULT_REVIEW_MESSAGES.copy()

        if self.only_one_action and len(req.actions) != 1:
            self.review_messages['declined'] = 'Only one action per request supported'
            return False

        if self.comment_handler is not False:
            self.comment_handler_add()

        overall = True
        for a in req.actions:
            # Store in-case sub-classes need direct access to original values.
            self.action = a

            func = getattr(self, self.action_method(a))
            ret = func(req, a)

            # In the case of multiple actions take the "lowest" result where the
            # order from lowest to highest is: False, None, True.
            if overall is not False:
                if ((overall is True and ret is not True) or
                    (overall is None and ret is False)):
                    overall = ret

        return overall

    def action_method(self, action):
        method_prefix = 'check_action'
        method_type = action.type
        method_suffix = None

        if method_type == 'delete':
            method_suffix = 'project'
            if action.tgt_package is not None:
                method_suffix = 'package'
            elif action.tgt_repository is not None:
                method_suffix = 'repository'

        if method_suffix:
            method = '_'.join([method_prefix, method_type, method_suffix])
            if hasattr(self, method):
                return method

        method = '_'.join([method_prefix, method_type])
        if hasattr(self, method):
            return method

        method_type = '_default'
        return '_'.join([method_prefix, method_type])

    @staticmethod
    def _is_patchinfo(pkgname):
        return pkgname == 'patchinfo' or pkgname.startswith('patchinfo.')

    def check_action_maintenance_incident(self, req, a):
        if self._is_patchinfo(a.src_package):
            self.logger.debug('ignoring patchinfo action')
            return True

        # Duplicate src_package as tgt_package since prior to assignment to a
        # specific incident project there is no target package (odd API). After
        # assignment it is still assumed the target will match the source. Since
        # the ultimate goal is the tgt_releaseproject the incident is treated
        # similar to staging in that the intermediate result is not the final
        # and thus the true target project (ex. openSUSE:Maintenance) is not
        # used for check_source_submission().
        tgt_package = a.src_package
        if a.tgt_releaseproject is not None:
            suffix = '.' + a.tgt_releaseproject.replace(':', '_')
            if tgt_package.endswith(suffix):
                tgt_package = tgt_package[:-len(suffix)]

        # Note tgt_releaseproject (product) instead of tgt_project (maintenance).
        return self.check_source_submission(a.src_project, a.src_package, a.src_rev,
                                            a.tgt_releaseproject, tgt_package)

    def check_action_maintenance_release(self, req, a):
        pkgname = a.src_package
        if self._is_patchinfo(pkgname):
            self.logger.debug('ignoring patchinfo action')
            return True

        linkpkg = self._get_linktarget_self(a.src_project, pkgname)
        if linkpkg is not None:
            pkgname = linkpkg
        # packages in maintenance have links to the target. Use that
        # to find the real package name
        (linkprj, linkpkg) = self._get_linktarget(a.src_project, pkgname)
        if linkpkg is None or linkprj is None or linkprj != a.tgt_project:
            self.logger.warning("%s/%s is not a link to %s"%(a.src_project, pkgname, a.tgt_project))
            return self.check_source_submission(a.src_project, a.src_package, a.src_rev, a.tgt_project, a.tgt_package)
        else:
            pkgname = linkpkg
        return self.check_source_submission(a.src_project, a.src_package, None, a.tgt_project, pkgname)

    def check_action_submit(self, req, a):
        return self.check_source_submission(a.src_project, a.src_package, a.src_rev, a.tgt_project, a.tgt_package)

    def check_action__default(self, req, a):
        # Disable any comment handler to avoid making a comment even if
        # comment_write() is called by another bot wrapping __default().
        self.comment_handler_remove()

        message = 'unhandled request type {}'.format(a.type)
        self.logger.error(message)
        self.review_messages['accepted'] += ': ' + message
        return self.request_default_return

    def check_source_submission(self, src_project, src_package, src_rev, target_project, target_package):
        """ default implemention does nothing """
        self.logger.info("%s/%s@%s -> %s/%s"%(src_project, src_package, src_rev, target_project, target_package))
        return None

    @staticmethod
    @memoize(session=True)
    def _get_sourceinfo(apiurl, project, package, rev=None):
        query = { 'view': 'info' }
        if rev is not None:
            query['rev'] = rev
        url = osc.core.makeurl(apiurl, ('source', project, package), query=query)
        try:
            return ET.parse(osc.core.http_GET(url)).getroot()
        except (HTTPError, URLError):
            return None

    def get_originproject(self, project, package, rev=None):
        root = ReviewBot._get_sourceinfo(self.apiurl, project, package, rev)
        if root is None:
            return None

        originproject = root.find('originproject')
        if originproject is not None:
            return originproject.text

        return None

    def get_sourceinfo(self, project, package, rev=None):
        root = ReviewBot._get_sourceinfo(self.apiurl, project, package, rev)
        if root is None:
            return None

        props = ('package', 'rev', 'vrev', 'srcmd5', 'lsrcmd5', 'verifymd5')
        return namedtuple('SourceInfo', props)(*[ root.get(p) for p in props ])

    # TODO: what if there is more than _link?
    def _get_linktarget_self(self, src_project, src_package):
        """ if it's a link to a package in the same project return the name of the package"""
        prj, pkg = self._get_linktarget(src_project, src_package)
        if prj is None or prj == src_project:
            return pkg

    def _get_linktarget(self, src_project, src_package):

        query = {}
        url = osc.core.makeurl(self.apiurl, ('source', src_project, src_package), query=query)
        try:
            root = ET.parse(osc.core.http_GET(url)).getroot()
        except HTTPError:
            return (None, None)

        if root is not None:
            linkinfo = root.find("linkinfo")
            if linkinfo is not None:
                return (linkinfo.get('project'), linkinfo.get('package'))

        return (None, None)

    def _has_open_review_by(self, root, by_what, reviewer):
        states = set([review.get('state') for review in root.findall('review') if review.get(by_what) == reviewer])
        if not states:
            return None
        elif 'new' in states:
            return True
        return False

    def can_accept_review(self, request_id):
        """return True if there is a new review for the specified reviewer"""
        states = set()
        url = osc.core.makeurl(self.apiurl, ('request', str(request_id)))
        try:
            root = ET.parse(osc.core.http_GET(url)).getroot()
            if self.review_user and self._has_open_review_by(root, 'by_user', self.review_user):
                return True
            if self.review_group and self._has_open_review_by(root, 'by_group', self.review_group):
                return True
        except HTTPError as e:
            print('ERROR in URL %s [%s]' % (url, e))
        return False

    def set_request_ids_search_review(self):
        review = None
        if self.review_user:
            review = "@by_user='******' and @state='new'" % self.review_user
        if self.review_group:
            review = osc.core.xpath_join(review, "@by_group='%s' and @state='new'" % self.review_group)
        url = osc.core.makeurl(self.apiurl, ('search', 'request'), { 'match': "state/@name='review' and review[%s]" % review, 'withfullhistory': 1 } )
        root = ET.parse(osc.core.http_GET(url)).getroot()

        self.requests = []

        for request in root.findall('request'):
            req = osc.core.Request()
            req.read(request)
            self.requests.append(req)

    # also used by openqabot
    def ids_project(self, project, typename):
        url = osc.core.makeurl(self.apiurl, ('search', 'request'),
                               { 'match': "(state/@name='review' or state/@name='new') and (action/target/@project='%s' and action/@type='%s')" % (project, typename),
                                 'withfullhistory': 1 })
        root = ET.parse(osc.core.http_GET(url)).getroot()

        ret = []

        for request in root.findall('request'):
            req = osc.core.Request()
            req.read(request)
            ret.append(req)
        return ret

    def set_request_ids_project(self, project, typename):
        self.requests = self.ids_project(project, typename)

    def comment_handler_add(self, level=logging.INFO):
        """Add handler to start recording log messages for comment."""
        self.comment_handler = CommentFromLogHandler(level)
        self.logger.addHandler(self.comment_handler)

    def comment_handler_remove(self):
        self.logger.removeHandler(self.comment_handler)

    def comment_handler_lines_deduplicate(self):
        self.comment_handler.lines = list(OrderedDict.fromkeys(self.comment_handler.lines))

    def comment_write(self, state='done', result=None, project=None, package=None,
                      request=None, message=None, identical=False, only_replace=False,
                      info_extra=None, info_extra_identical=True, bot_name_suffix=None):
        """Write comment if not similar to previous comment and replace old one.

        The state, result, and info_extra (dict) are combined to create the info
        that is passed to CommentAPI methods for creating a marker and finding
        previous comments. self.bot_name, which defaults to class, will be used
        as the primary matching key. When info_extra_identical is set to False
        info_extra will not be included when finding previous comments to
        compare message against.

        A comment from the same bot will be replaced when a new comment is
        written. The only_replace flag will restrict to only writing a comment
        if a prior one is being replaced. This can be useful for writing a final
        comment that indicates a change from previous uncompleted state, but
        only makes sense to post if a prior comment was posted.

        The project, package, and request variables control where the comment is
        placed. If no value is given the default is the request being reviewed.

        If no message is provided the content will be extracted from
        self.comment_handler.line which is provided by CommentFromLogHandler. To
        use this call comment_handler_add() at the point which messages should
        start being collected. Alternatively the self.comment_handler setting
        may be set to True to automatically set one on each request.

        The previous comment body line count is compared to see if too similar
        to bother posting another comment which is useful for avoiding
        re-posting comments that contain irrelevant minor changes. To force an
        exact match use the identical flag to replace any non-identical
        comment body.
        """
        if project:
            kwargs = {'project_name': project}
            if package:
                kwargs['package_name'] = package
        else:
            if request is None:
                request = self.request
            kwargs = {'request_id': request.reqid}
        debug_key = '/'.join(kwargs.values())

        if message is None:
            if not len(self.comment_handler.lines):
                self.logger.debug('skipping empty comment for {}'.format(debug_key))
                return
            message = '\n\n'.join(self.comment_handler.lines)

        bot_name = self.bot_name
        if bot_name_suffix:
            bot_name = '::'.join([bot_name, bot_name_suffix])

        info = {'state': state, 'result': result}
        if info_extra and info_extra_identical:
            info.update(info_extra)

        comments = self.comment_api.get_comments(**kwargs)
        comment, _ = self.comment_api.comment_find(comments, bot_name, info)

        if info_extra and not info_extra_identical:
            # Add info_extra once comment has already been matched.
            info.update(info_extra)

        message = self.comment_api.add_marker(message, bot_name, info)
        message = self.comment_api.truncate(message.strip())

        if (comment is not None and
            ((identical and
              # Remove marker from comments since handled during comment_find().
              self.comment_api.remove_marker(comment['comment']) ==
              self.comment_api.remove_marker(message)) or
             (not identical and comment['comment'].count('\n') == message.count('\n')))
        ):
            # Assume same state/result and number of lines in message is duplicate.
            self.logger.debug('previous comment too similar on {}'.format(debug_key))
            return

        if comment is None:
            self.logger.debug('broadening search to include any state on {}'.format(debug_key))
            comment, _ = self.comment_api.comment_find(comments, bot_name)
        if comment is not None:
            self.logger.debug('removing previous comment on {}'.format(debug_key))
            if not self.dryrun:
                self.comment_api.delete(comment['id'])
        elif only_replace:
            self.logger.debug('no previous comment to replace on {}'.format(debug_key))
            return

        self.logger.debug('adding comment to {}: {}'.format(debug_key, message))
        if not self.dryrun:
            self.comment_api.add_comment(comment=message, **kwargs)

        self.comment_handler_remove()

    def _check_matching_srcmd5(self, project, package, rev, history_limit = 5):
        """check if factory sources contain the package and revision. check head and history"""
        self.logger.debug("checking %s in %s"%(package, project))
        try:
            osc.core.show_package_meta(self.apiurl, project, package)
        except (HTTPError, URLError):
            self.logger.debug("new package")
            return None

        si = self.get_sourceinfo(project, package)
        if rev == si.verifymd5:
            self.logger.debug("srcmd5 matches")
            return True

        if history_limit:
            self.logger.debug("%s not the latest version, checking history", rev)
            u = osc.core.makeurl(self.apiurl, [ 'source', project, package, '_history' ], { 'limit': history_limit })
            try:
                r = osc.core.http_GET(u)
            except HTTPError as e:
                self.logger.debug("package has no history!?")
                return None

            root = ET.parse(r).getroot()
            # we need this complicated construct as obs doesn't honor
            # the 'limit' parameter use above for obs interconnect:
            # https://github.com/openSUSE/open-build-service/issues/2545
            for revision, i in zip(reversed(root.findall('revision')), count()):
                node = revision.find('srcmd5')
                if node is None:
                    continue
                self.logger.debug("checking %s"%node.text)
                if node.text == rev:
                    self.logger.debug("got it, rev %s"%revision.get('rev'))
                    return True
                if i == history_limit:
                    break

            self.logger.debug("srcmd5 not found in history either")

        return False
예제 #30
0
class TestReviewBotComment(OBSLocal.TestCase):
    def setUp(self):
        super(TestReviewBotComment, self).setUp()
        self.api = CommentAPI(self.apiurl)
        self.wf = OBSLocal.FactoryWorkflow()
        self.wf.create_user('factory-auto')
        self.project = self.wf.create_project(PROJECT)

        # Ensure different test runs operate in unique namespace.
        self.bot = '::'.join([type(self).__name__, str(random.getrandbits(8))])
        self.review_bot = ReviewBot(self.apiurl,
                                    logger=logging.getLogger(self.bot))
        self.review_bot.bot_name = self.bot

        self.osc_user('factory-auto')

    def tearDown(self):
        self.api.delete_from(project_name=PROJECT)
        self.assertFalse(len(self.api.get_comments(project_name=PROJECT)))
        self.osc_user('Admin')
        del self.wf

    def test_basic_logger(self):
        self.assertFalse(self.comments_filtered(self.bot)[0])

        # Initial comment.
        self.review_bot.comment_handler_add()
        self.review_bot.logger.info('something interesting')
        self.review_bot.comment_write(project=PROJECT)
        comment, _ = self.comments_filtered(self.bot)
        self.assertTrue(comment['comment'].endswith('something interesting'))

        # Second comment with extra line.
        self.review_bot.comment_handler_add()
        self.review_bot.logger.info('something interesting')
        self.review_bot.logger.info('something extra')
        self.review_bot.comment_write(project=PROJECT)
        comment, _ = self.comments_filtered(self.bot)
        self.assertTrue(comment['comment'].endswith('something extra'))

    def test_workflow(self):
        comment_count = len(self.api.get_comments(project_name=PROJECT))
        self.assertFalse(self.comments_filtered(self.bot)[0])

        # Initial comment.
        info = {'state': 'seen', 'result': 'failed'}
        info_extra = {'build': '1'}
        info_merged = info.copy()
        info_merged.update(info_extra)
        self.review_bot.comment_write(state='seen',
                                      result='failed',
                                      identical=True,
                                      info_extra=info_extra,
                                      info_extra_identical=False,
                                      project=PROJECT,
                                      message=COMMENT)
        comment, info_parsed = self.comments_filtered(self.bot)
        self.assertTrue(comment['comment'].endswith(COMMENT))
        self.assertEqual(info_parsed, info_merged)

        # Only build change (expect no change).
        info_extra = {'build': '2'}
        self.review_bot.comment_write(state='seen',
                                      result='failed',
                                      identical=True,
                                      info_extra=info_extra,
                                      info_extra_identical=False,
                                      project=PROJECT,
                                      message=COMMENT)
        comment, info_parsed = self.comments_filtered(self.bot)
        self.assertTrue(comment['comment'].endswith(COMMENT))
        self.assertEqual(info_parsed, info_merged)

        # Build and comment (except comment replacement).
        info_extra = {'build': '3'}
        info_merged.update(info_extra)
        self.review_bot.comment_write(state='seen',
                                      result='failed',
                                      identical=True,
                                      info_extra=info_extra,
                                      info_extra_identical=False,
                                      project=PROJECT,
                                      message=COMMENT + '3')
        comment, info_parsed = self.comments_filtered(self.bot)
        self.assertTrue(comment['comment'].endswith(COMMENT + '3'))
        self.assertEqual(info_parsed, info_merged)

        # Final build (except comment replacement).
        info_extra = {'build': '4'}
        info_merged.update(info_extra)
        self.review_bot.comment_write(state='seen',
                                      result='failed',
                                      identical=True,
                                      info_extra=info_extra,
                                      info_extra_identical=True,
                                      project=PROJECT,
                                      message=COMMENT + '4')
        comment, info_parsed = self.comments_filtered(self.bot)
        self.assertTrue(comment['comment'].endswith(COMMENT + '4'))
        self.assertEqual(info_parsed, info_merged)

        # Final build (except comment replacement).
        info = {'state': 'done', 'result': 'passed'}
        info_extra = {'build': '5'}
        info_merged = info.copy()
        info_merged.update(info_extra)
        self.review_bot.comment_write(state='done',
                                      result='passed',
                                      identical=True,
                                      info_extra=info_extra,
                                      info_extra_identical=True,
                                      only_replace=True,
                                      project=PROJECT,
                                      message=COMMENT + '5')
        comment, info_parsed = self.comments_filtered(self.bot)
        self.assertTrue(comment['comment'].endswith(COMMENT + '5'))
        self.assertEqual(info_parsed, info_merged)

        # Should never be more than one new comment.
        self.assertEqual(len(self.api.get_comments(project_name=PROJECT)),
                         comment_count + 1)

    def test_only_replace_none(self):
        self.review_bot.comment_write(only_replace=True,
                                      project=PROJECT,
                                      message=COMMENT)
        self.assertFalse(self.comments_filtered(self.bot)[0])

    def test_dryrun(self):
        # dryrun = True, no comment.
        self.review_bot.dryrun = True
        self.review_bot.comment_write(project=PROJECT, message=COMMENT)
        self.assertFalse(self.comments_filtered(self.bot)[0])

        # dryrun = False, a comment.
        self.review_bot.dryrun = False
        self.review_bot.comment_write(project=PROJECT, message=COMMENT)
        self.assertTrue(self.comments_filtered(self.bot)[0])

        # dryrun = True, no replacement.
        self.review_bot.dryrun = True
        self.review_bot.comment_write(state='changed',
                                      project=PROJECT,
                                      message=COMMENT)
        _, info = self.comments_filtered(self.bot)
        self.assertEqual(info['state'], 'done')

        # dryrun = False, replacement.
        self.review_bot.dryrun = False
        self.review_bot.comment_write(state='changed',
                                      project=PROJECT,
                                      message=COMMENT)
        _, info = self.comments_filtered(self.bot)
        self.assertEqual(info['state'], 'changed')

    def test_bot_name_suffix(self):
        suffix1 = 'suffix1'
        bot_suffixed1 = '::'.join([self.bot, suffix1])

        suffix2 = 'suffix2'
        bot_suffixed2 = '::'.join([self.bot, suffix2])

        self.review_bot.comment_write(bot_name_suffix=suffix1,
                                      project=PROJECT,
                                      message=COMMENT)
        self.assertFalse(self.comments_filtered(self.bot)[0])
        self.assertTrue(self.comments_filtered(bot_suffixed1)[0])
        self.assertFalse(self.comments_filtered(bot_suffixed2)[0])

        self.review_bot.comment_write(bot_name_suffix=suffix2,
                                      project=PROJECT,
                                      message=COMMENT)
        self.assertFalse(self.comments_filtered(self.bot)[0])
        self.assertTrue(self.comments_filtered(bot_suffixed1)[0])
        self.assertTrue(self.comments_filtered(bot_suffixed2)[0])

        self.review_bot.comment_write(bot_name_suffix=suffix1,
                                      project=PROJECT,
                                      message=COMMENT + '\nnew')
        comment, _ = self.comments_filtered(bot_suffixed1)
        self.assertTrue(comment['comment'].endswith(COMMENT + '\nnew'))

        comment, _ = self.comments_filtered(bot_suffixed2)
        self.assertTrue(comment['comment'].endswith(COMMENT))

    def comments_filtered(self, bot):
        comments = self.api.get_comments(project_name=PROJECT)
        return self.api.comment_find(comments, bot)
예제 #31
0
class PkgListGen(ToolBase.ToolBase):

    def __init__(self):
        ToolBase.ToolBase.__init__(self)
        self.logger = logging.getLogger(__name__)
        self.comment = CommentAPI(self.apiurl)
        self.reset()

    def reset(self):
        # package -> supportatus
        self.packages = dict()
        self.groups = dict()
        self._supportstatus = None
        self.input_dir = '.'
        self.output_dir = '.'
        self.lockjobs = dict()
        self.ignore_broken = False
        self.unwanted = set()
        self.output = None
        self.locales = set()
        self.filtered_architectures = None
        self.dry_run = False
        self.all_architectures = None

    def filter_architectures(self, architectures):
        self.filtered_architectures = sorted(list(set(architectures) & set(self.all_architectures)))

    def _load_supportstatus(self):
        # XXX
        fn = os.path.join(self.input_dir, 'supportstatus.txt')
        self._supportstatus = dict()
        if os.path.exists(fn):
            with open(fn, 'r') as fh:
                for line in fh:
                    # pkg, status
                    fields = line.rstrip().split(' ')
                    if len(fields) > 1:
                        self._supportstatus[fields[0]] = fields[1]

    def supportstatus(self, package):
        if self._supportstatus is None:
            self._load_supportstatus()

        return self._supportstatus.get(package)

    def _load_group_file(self, fn):
        output = None
        unwanted = None
        with open(fn, 'r') as fh:
            self.logger.debug('reading %s', fn)
            for groupname, group in yaml.safe_load(fh).items():
                if groupname == 'OUTPUT':
                    output = group
                    continue
                if groupname == 'UNWANTED':
                    unwanted = set(group)
                    continue
                g = Group(groupname, self)
                g.parse_yml(group)
        return output, unwanted

    def group_input_files(self):
        return glob.glob(os.path.join(self.input_dir, 'group*.yml'))

    def load_all_groups(self):
        for fn in self.group_input_files():
            o, u = self._load_group_file(fn)
            if o:
                if self.output is not None:
                    raise Exception('OUTPUT defined multiple times')
                self.output = o
            if u:
                self.unwanted |= u

    # required to generate release spec files (only)
    def write_group_stubs(self):
        archs = ['*'] + self.all_architectures
        for name in self.groups:
            group = self.groups[name]
            group.solved_packages = dict()
            fn = '{}.group'.format(group.name)
            with open(os.path.join(self.output_dir, fn), 'w') as fh:
                for arch in archs:
                    x = group.toxml(arch, group.ignore_broken, None)
                    x = ET.tostring(x, pretty_print=True, encoding='unicode')
                    fh.write(x)

    def write_all_groups(self):
        self._check_supplements()
        summary = dict()
        archs = ['*'] + self.all_architectures
        for name in self.groups:
            group = self.groups[name]
            if not group.solved:
                continue
            summary[name] = group.summary()
            fn = '{}.group'.format(group.name)
            with open(os.path.join(self.output_dir, fn), 'w') as fh:
                comment = group.comment
                for arch in archs:
                    x = group.toxml(arch, group.ignore_broken, comment)
                    # only comment first time
                    comment = None
                    x = ET.tostring(x, pretty_print=True, encoding='unicode')
                    x = re.sub(r'\s*<!-- reason:', ' <!-- reason:', x)
                    fh.write(x)
        return summary

    def solve_module(self, groupname, includes, excludes, use_recommends):
        g = self.groups[groupname]
        importants = set()
        for i in includes:
            name = i
            if isinstance(i, dict):
                name = list(i)[0]
                if i[name] != 'support':
                    importants.add(name)
            else:
                importants.add(name)
            g.inherit(self.groups[name])
        g.solve(use_recommends)
        for e in excludes:
            g.ignore(self.groups[e])
        for i in importants:
            group = self.groups[i]
            for arch in group.packages:
                if arch not in g.solved_packages:
                    continue
                for package in group.packages[arch]:
                    if package[0] in g.solved_packages[arch]:
                        continue
                    if package[0] not in g.solved_packages['*']:
                        self.logger.error(f'Missing {package[0]} in {groupname} for {arch}')

    def expand_repos(self, project, repo='standard'):
        return repository_path_expand(self.apiurl, project, repo)

    def _check_supplements(self):
        tocheck = set()
        tocheck_locales = set()
        for arch in self.filtered_architectures:
            pool = self.prepare_pool(arch, True)
            sel = pool.Selection()
            for s in pool.solvables_iter():
                sel.add_raw(solv.Job.SOLVER_SOLVABLE, s.id)

            for s in sel.solvables():
                for dep in s.lookup_deparray(solv.SOLVABLE_SUPPLEMENTS):
                    for d in dep.str().split(' '):
                        if d.startswith('namespace:modalias') or d.startswith('namespace:filesystem'):
                            tocheck.add(s.name)

            for locale in self.locales:
                id = pool.str2id('locale({})'.format(locale))
                for s in pool.whatprovides(id):
                    tocheck_locales.add(s.name)

        all_grouped = set()
        for g in self.groups.values():
            if g.solved:
                for arch in g.solved_packages.keys():
                    if g.solved_packages[arch]:
                        all_grouped.update(g.solved_packages[arch])

        for p in tocheck - all_grouped:
            self.logger.warning('package %s has supplements but is not grouped', p)

        for p in tocheck_locales - all_grouped:
            self.logger.warning('package %s provides supported locale but is not grouped', p)

    def prepare_pool(self, arch, ignore_conflicts):
        pool = solv.Pool()
        # the i586 DVD is really a i686 one
        if arch == 'i586':
            pool.setarch('i686')
        else:
            pool.setarch(arch)

        self.lockjobs[arch] = []
        solvables = set()

        for project, reponame in self.repos:
            repo = pool.add_repo(project)
            # check back the repo state to avoid suprises
            state = repository_arch_state(self.apiurl, project, reponame, arch)
            if state is None:
                continue
            s = f'repo-{project}-{reponame}-{arch}-{state}.solv'
            if not repo.add_solv(s):
                raise MismatchedRepoException('failed to add repo {}/{}/{}'.format(project, reponame, arch))
            for solvable in repo.solvables_iter():
                if ignore_conflicts:
                    solvable.unset(solv.SOLVABLE_CONFLICTS)
                    solvable.unset(solv.SOLVABLE_OBSOLETES)
                # only take the first solvable in the repo chain
                if not self.use_newest_version and solvable.name in solvables:
                    self.lockjobs[arch].append(pool.Job(solv.Job.SOLVER_SOLVABLE | solv.Job.SOLVER_LOCK, solvable.id))
                solvables.add(solvable.name)

        pool.addfileprovides()
        pool.createwhatprovides()

        for locale in self.locales:
            pool.set_namespaceproviders(solv.NAMESPACE_LANGUAGE, pool.Dep(locale), True)

        return pool

    # parse file and merge all groups
    def _parse_unneeded(self, filename):
        filename = os.path.join(self.input_dir, filename)
        if not os.path.isfile(filename):
            return set()
        fh = open(filename, 'r')
        self.logger.debug('reading %s', filename)
        result = set()
        for group in yaml.safe_load(fh).values():
            result.update(group)
        return result

    # the unsorted group is special and will contain all the rest for
    # the FTP tree. We filter it with unneeded though to create a
    # unsorted.yml file for release manager review
    def _collect_unsorted_packages(self, modules, unsorted):
        unneeded_regexps = [re.compile(r'\A' + r + r'\Z')
                            for r in self._parse_unneeded('unneeded.yml')]

        packages = dict()
        if unsorted:
            unsorted.solved_packages = dict()
            unsorted.solved_packages['*'] = dict()

        for arch in self.filtered_architectures:
            pool = self.prepare_pool(arch, False)
            pool.Selection()
            archpacks = [s.name for s in pool.solvables_iter()]

            # copy
            filtered = list(archpacks)
            for r in unneeded_regexps:
                filtered = [p for p in filtered if not r.match(p)]

            # convert to set
            filtered = set(filtered) - self.unwanted
            for g in modules:
                if unsorted and g == unsorted:
                    continue
                for a in ('*', arch):
                    filtered -= set(g.solved_packages[a])
            for package in filtered:
                packages.setdefault(package, []).append(arch)

            if unsorted:
                archpacks = set(archpacks)
                unsorted.solved_packages[arch] = dict()
                for g in modules:
                    archpacks -= set(g.solved_packages[arch])
                    archpacks -= set(g.solved_packages['*'])
                unsorted.solved_packages[arch] = dict()
                for p in archpacks:
                    unsorted.solved_packages[arch][p] = None

        if unsorted:
            common = None
            for arch in self.filtered_architectures:
                if common is None:
                    common = set(unsorted.solved_packages[arch])
                    continue
                common &= set(unsorted.solved_packages[arch])
            for p in common:
                unsorted.solved_packages['*'][p] = None
                for arch in self.filtered_architectures:
                    del unsorted.solved_packages[arch][p]

        with open(os.path.join(self.output_dir, 'unsorted.yml'), 'w') as fh:
            fh.write('unsorted:\n')
            for p in sorted(packages):
                fh.write('  - ')
                fh.write(p)
                if len(packages[p]) != len(self.filtered_architectures):
                    fh.write(': [')
                    fh.write(','.join(sorted(packages[p])))
                    fh.write(']')
                    reason = self._find_reason(p, modules)
                    if reason:
                        fh.write(' # ' + reason)
                fh.write(' \n')

    # give a hint if the package is related to a group
    def _find_reason(self, package, modules):
        # go through the modules multiple times to find the "best"
        for g in modules:
            if package in g.recommends:
                return 'recommended by ' + g.recommends[package]
        for g in modules:
            if package in g.suggested:
                return 'suggested by ' + g.suggested[package]
        for g in modules:
            if package in g.develpkgs:
                return 'devel package of ' + g.develpkgs[package]
        return None

    def update_one_repo(self, project, repo, arch, solv_file, solv_file_hash):
        # Either hash changed or new, so remove any old hash files.
        file_utils.unlink_list(None, glob.glob(solv_file + '::*'))

        d = os.path.join(CACHEDIR, project, repo, arch)
        if not os.path.exists(d):
            os.makedirs(d)

        self.logger.debug('updating %s', d)

        # only there to parse the repos
        bs_mirrorfull = os.path.join(SCRIPT_PATH, '..', 'bs_mirrorfull')

        args = [bs_mirrorfull]
        args.append('--nodebug')
        args.append('{}/public/build/{}/{}/{}'.format(self.apiurl, project, repo, arch))
        args.append(d)
        with subprocess.Popen(args, stdout=subprocess.PIPE) as p:
            for line in p.stdout:
                self.logger.info(line.decode('utf-8').rstrip())
            if p.wait() != 0:
                raise Exception("Mirroring repository failed")

        files = [os.path.join(d, f)
                 for f in os.listdir(d) if f.endswith('.rpm')]
        suffix = f'.{os.getpid()}.tmp'
        fh = open(solv_file + suffix, 'w')
        p = subprocess.Popen(
            ['rpms2solv', '-m', '-', '-0'], stdin=subprocess.PIPE, stdout=fh)
        p.communicate(bytes('\0'.join(files), 'utf-8'))
        fh.close()
        if p.wait() != 0:
            raise Exception("rpm2solv failed")
        os.rename(solv_file + suffix, solv_file)

        # Create hash file now that solv creation is complete.
        open(solv_file_hash, 'a').close()

    def update_repos(self, architectures):
        for project, repo in self.repos:
            for arch in architectures:
                # Fetch state before mirroring in-case it changes during download.
                state = repository_arch_state(self.apiurl, project, repo, arch)
                if state is None:
                    # Repo might not have this architecture
                    continue

                repo_solv_name = 'repo-{}-{}-{}.solv'.format(project, repo, arch)
                # Would be preferable to include hash in name, but cumbersome to handle without
                # reworking a fair bit since the state needs to be tracked.
                solv_file = os.path.join(CACHEDIR, repo_solv_name)
                solv_file_hash = '{}::{}'.format(solv_file, state)
                if os.path.exists(solv_file) and os.path.exists(solv_file_hash):
                    # Solve file exists and hash unchanged, skip updating solv.
                    self.logger.debug('skipping solv generation for {} due to matching state {}'.format(
                        '/'.join([project, repo, arch]), state))
                else:
                    self.update_one_repo(project, repo, arch, solv_file, solv_file_hash)
                shutil.copy(solv_file, f'./repo-{project}-{repo}-{arch}-{state}.solv')

    def create_weakremovers(self, target, target_config, directory, output):
        drops = dict()
        dropped_repos = dict()

        root = yaml.safe_load(open(os.path.join(directory, 'config.yml')))
        for item in root:
            key = list(item)[0]
            # cast 15.1 to string :)
            key = str(key)

            oldrepos = set()
            for suffix in ['xz', 'zst']:
                oldrepos |= set(glob.glob(os.path.join(directory, f"{key}_*.packages.{suffix}")))
                oldrepos |= set(glob.glob(os.path.join(directory, f"{key}.packages.{suffix}")))
            for oldrepo in sorted(oldrepos):
                pool = solv.Pool()
                pool.setarch()

                # we need some progress in the debug output - or gocd gets nervous
                self.logger.debug('checking {}'.format(oldrepo))
                oldsysrepo = file_utils.add_susetags(pool, oldrepo)

                for arch in self.all_architectures:
                    for project, repo in self.repos:
                        # check back the repo state to avoid suprises
                        state = repository_arch_state(self.apiurl, project, repo, arch)
                        if state is None:
                            self.logger.debug(f'Skipping {project}/{repo}/{arch}')
                        fn = f'repo-{project}-{repo}-{arch}-{state}.solv'
                        r = pool.add_repo('/'.join([project, repo]))
                        if not r.add_solv(fn):
                            raise MismatchedRepoException('failed to add repo {}/{}/{}.'.format(project, repo, arch))

                pool.createwhatprovides()

                accepted_archs = set(self.all_architectures)
                accepted_archs.add('noarch')

                for s in oldsysrepo.solvables_iter():
                    oldarch = s.arch
                    if oldarch == 'i686':
                        oldarch = 'i586'

                    if oldarch not in accepted_archs:
                        continue

                    haveit = False
                    for s2 in pool.whatprovides(s.nameid):
                        if s2.repo == oldsysrepo or s.nameid != s2.nameid:
                            continue
                        newarch = s2.arch
                        if newarch == 'i686':
                            newarch = 'i586'
                        if oldarch != newarch and newarch != 'noarch' and oldarch != 'noarch':
                            continue
                        haveit = True
                        break
                    if haveit:
                        continue

                    # check for already obsoleted packages
                    nevr = pool.rel2id(s.nameid, s.evrid, solv.REL_EQ)
                    for s2 in pool.whatmatchesdep(solv.SOLVABLE_OBSOLETES, nevr):
                        if s2.repo == oldsysrepo:
                            continue
                        haveit = True
                        break
                    if haveit:
                        continue
                    if s.name not in drops:
                        drops[s.name] = {'repo': key, 'archs': set()}
                    if oldarch == 'noarch':
                        drops[s.name]['archs'] |= set(self.all_architectures)
                    else:
                        drops[s.name]['archs'].add(oldarch)
                    dropped_repos[key] = 1

                del pool

        for repo in sorted(dropped_repos):
            repo_output = False
            exclusives = dict()
            for name in sorted(drops):
                if drops[name]['repo'] != repo:
                    continue
                if drops[name]['archs'] == set(self.all_architectures):
                    if not repo_output:
                        print('#', repo, file=output)
                        repo_output = True
                    print('Provides: weakremover({})'.format(name), file=output)
                else:
                    jarch = ' '.join(sorted(drops[name]['archs']))
                    exclusives.setdefault(jarch, []).append(name)

            for arch in sorted(exclusives):
                if not repo_output:
                    print('#', repo, file=output)
                    repo_output = True
                print('%ifarch {}'.format(arch), file=output)
                for name in sorted(exclusives[arch]):
                    print('Provides: weakremover({})'.format(name), file=output)
                print('%endif', file=output)
        output.flush()

    def read_summary_file(self, file):
        ret = dict()
        with open(file, 'r') as f:
            for line in f:
                pkg, group = line.strip().split(':')
                ret.setdefault(pkg, [])
                ret[pkg].append(group)
        return ret

    def calculcate_package_diff(self, old_file, new_file):
        old_file = self.read_summary_file(old_file)
        new_file = self.read_summary_file(new_file)

        # remove common part
        keys = list(old_file.keys())
        for key in keys:
            if new_file.get(key, []) == old_file[key]:
                del new_file[key]
                del old_file[key]

        if not old_file and not new_file:
            return None

        removed = dict()
        for pkg in old_file:
            old_groups = old_file[pkg]
            if new_file.get(pkg):
                continue
            removekey = ','.join(old_groups)
            removed.setdefault(removekey, [])
            removed[removekey].append(pkg)

        report = ''
        for rm in sorted(removed.keys()):
            report += f"**Remove from {rm}**\n\n```\n"
            paragraph = ', '.join(removed[rm])
            report += "\n".join(textwrap.wrap(paragraph, width=90, break_long_words=False, break_on_hyphens=False))
            report += "\n```\n\n"

        moved = dict()
        for pkg in old_file:
            old_groups = old_file[pkg]
            new_groups = new_file.get(pkg)
            if not new_groups:
                continue
            movekey = ','.join(old_groups) + ' to ' + ','.join(new_groups)
            moved.setdefault(movekey, [])
            moved[movekey].append(pkg)

        for move in sorted(moved.keys()):
            report += f"**Move from {move}**\n\n```\n"
            paragraph = ', '.join(moved[move])
            report += "\n".join(textwrap.wrap(paragraph, width=90, break_long_words=False, break_on_hyphens=False))
            report += "\n```\n\n"

        added = dict()
        for pkg in new_file:
            if pkg in old_file:
                continue
            addkey = ','.join(new_file[pkg])
            added.setdefault(addkey, [])
            added[addkey].append(pkg)

        for group in sorted(added):
            report += f"**Add to {group}**\n\n```\n"
            paragraph = ', '.join(added[group])
            report += "\n".join(textwrap.wrap(paragraph, width=90, break_long_words=False, break_on_hyphens=False))
            report += "\n```\n\n"

        return report.strip()

    def handle_package_diff(self, project, old_file, new_file):
        comments = self.comment.get_comments(project_name=project)
        comment, _ = self.comment.comment_find(comments, MARKER)

        report = self.calculcate_package_diff(old_file, new_file)
        if not report:
            if comment:
                self.comment.delete(comment['id'])
            return 0
        report = self.comment.add_marker(report, MARKER)

        if comment:
            write_comment = report != comment['comment']
        else:
            write_comment = True
        if write_comment:
            if comment:
                self.comment.delete(comment['id'])
            self.comment.add_comment(project_name=project, comment=report)
        else:
            for c in comments.values():
                if c['parent'] == comment['id']:
                    print(c)

        return 1

    def solve_project(self, ignore_unresolvable=False, ignore_recommended=False, locale=None, locales_from=None):
        self.load_all_groups()
        if not self.output:
            self.logger.error('OUTPUT not defined')
            return

        if ignore_unresolvable:
            self.ignore_broken = True
        global_use_recommends = not ignore_recommended
        if locale:
            self.locales |= set(locale.split(' '))
        if locales_from:
            with open(os.path.join(self.input_dir, locales_from), 'r') as fh:
                root = ET.parse(fh).getroot()
                self.locales |= set([lang.text for lang in root.findall('.//linguas/language')])

        modules = []
        # the yml parser makes an array out of everything, so
        # we loop a bit more than what we support
        for group in self.output:
            groupname = list(group)[0]
            settings = group[groupname]
            if not settings:  # e.g. unsorted
                settings = {}
            includes = settings.get('includes', [])
            excludes = settings.get('excludes', [])
            use_recommends = settings.get('recommends', global_use_recommends)
            self.solve_module(groupname, includes, excludes, use_recommends)
            g = self.groups[groupname]
            # the default is a little double negated but Factory has ignore_broken
            # as default and we only disable it for single groups (for now)
            g.ignore_broken = not settings.get('require_all', not self.ignore_broken)
            g.conflicts = settings.get('conflicts', [])
            g.default_support_status = settings.get('default-support', 'unsupported')
            modules.append(g)

        # not defined for openSUSE
        overlap = self.groups.get('overlap')
        for module in modules:
            module.check_dups(modules, overlap)
            module.collect_devel_packages()
            module.filter_already_selected(modules)

        if overlap:
            ignores = [x.name for x in overlap.ignored]
            self.solve_module(overlap.name, [], ignores, use_recommends=False)
            overlapped = set(overlap.solved_packages['*'])
            for arch in self.filtered_architectures:
                overlapped |= set(overlap.solved_packages[arch])
            for module in modules:
                if module.name == 'overlap' or module in overlap.ignored:
                    continue
                for arch in ['*'] + self.filtered_architectures:
                    for p in overlapped:
                        module.solved_packages[arch].pop(p, None)

        self._collect_unsorted_packages(modules, self.groups.get('unsorted'))
        return self.write_all_groups()

    def strip_medium_from_staging(self, path):
        # staging projects don't need source and debug medium - and the glibc source
        # rpm conflicts between standard and bootstrap_copy repository causing the
        # product builder to fail
        medium = re.compile('name="(DEBUG|SOURCE)MEDIUM"')
        for name in glob.glob(os.path.join(path, '*.kiwi')):
            lines = open(name).readlines()
            lines = [x for x in lines if not medium.search(x)]
            open(name, 'w').writelines(lines)

    def build_stub(self, destination, extension):
        with open(os.path.join(destination, '.'.join(['stub', extension])), 'w+') as f:
            f.write('# prevent building single {} files twice\n'.format(extension))
            f.write('Name: stub\n')
            f.write('Version: 0.0\n')

    def commit_package(self, path):
        if self.dry_run:
            package = Package(path)
            for i in package.get_diff():
                logging.info(''.join(i))
        else:
            # No proper API function to perform the same operation.
            logging.debug(subprocess.check_output(
                ' '.join(['cd', path, '&&', 'osc', 'addremove']), shell=True, encoding='utf-8'))
            package = Package(path)
            package.commit(msg='Automatic update', skip_local_service_run=True)

    def replace_product_version(self, product_file, product_version):
        product_version = '<version>{}</version>'.format(product_version)
        lines = open(product_file).readlines()
        new_lines = []
        for line in lines:
            new_lines.append(line.replace('<version></version>', product_version))
        open(product_file, 'w').write(''.join(new_lines))

    def update_and_solve_target(self, api, target_project, target_config, main_repo,
                                project, scope, force, no_checkout,
                                only_release_packages, stop_after_solve):
        self.all_architectures = target_config.get('pkglistgen-archs').split(' ')
        self.use_newest_version = str2bool(target_config.get('pkglistgen-use-newest-version', 'False'))
        self.repos = self.expand_repos(project, main_repo)
        logging.debug('[{}] {}/{}: update and solve'.format(scope, project, main_repo))

        group = target_config.get('pkglistgen-group', '000package-groups')
        product = target_config.get('pkglistgen-product', '000product')
        release = target_config.get('pkglistgen-release', '000release-packages')
        oldrepos = target_config.get('pkglistgen-repos', '000update-repos')

        url = api.makeurl(['source', project])
        packages = ET.parse(http_GET(url)).getroot()
        if packages.find('entry[@name="{}"]'.format(product)) is None:
            if not self.dry_run:
                undelete_package(api.apiurl, project, product, 'revive')
            # TODO disable build.
            logging.info('{} undeleted, skip dvd until next cycle'.format(product))
            return
        elif not force:
            root = ET.fromstringlist(show_results_meta(api.apiurl, project, product,
                                                       repository=[main_repo], multibuild=True))
            if len(root.xpath('result[@state="building"]')) or len(root.xpath('result[@state="dirty"]')):
                logging.info('{}/{} build in progress'.format(project, product))
                return

        drop_list = api.item_exists(project, oldrepos)
        checkout_list = [group, product, release]
        if drop_list and not only_release_packages:
            checkout_list.append(oldrepos)

        if packages.find('entry[@name="{}"]'.format(release)) is None:
            if not self.dry_run:
                undelete_package(api.apiurl, project, release, 'revive')
            logging.info('{} undeleted, skip dvd until next cycle'.format(release))
            return

        # Cache dir specific to hostname and project.
        host = urlparse(api.apiurl).hostname
        cache_dir = CacheManager.directory('pkglistgen', host, project)

        if not no_checkout:
            if os.path.exists(cache_dir):
                shutil.rmtree(cache_dir)
            os.makedirs(cache_dir)

        group_dir = os.path.join(cache_dir, group)
        product_dir = os.path.join(cache_dir, product)
        release_dir = os.path.join(cache_dir, release)
        oldrepos_dir = os.path.join(cache_dir, oldrepos)

        self.input_dir = group_dir
        self.output_dir = product_dir

        for package in checkout_list:
            if no_checkout:
                logging.debug('Skipping checkout of {}/{}'.format(project, package))
                continue
            checkout_package(api.apiurl, project, package, expand_link=True,
                             prj_dir=cache_dir, outdir=os.path.join(cache_dir, package))

        # print('RET', self.handle_package_diff(project, f"{group_dir}/summary-staging.txt", f"{product_dir}/summary-staging.txt"))

        file_utils.unlink_all_except(release_dir, ['weakremovers.inc'])
        if not only_release_packages:
            file_utils.unlink_all_except(product_dir)
        ignore_list = ['supportstatus.txt', 'summary-staging.txt', 'package-groups.changes']
        ignore_list += self.group_input_files()
        file_utils.copy_directory_contents(group_dir, product_dir, ignore_list)
        file_utils.change_extension(product_dir, '.spec.in', '.spec')
        file_utils.change_extension(product_dir, '.product.in', '.product')

        logging.debug('-> do_update')
        # make sure we only calculcate existant architectures
        self.filter_architectures(target_archs(api.apiurl, project, main_repo))
        self.update_repos(self.filtered_architectures)

        if only_release_packages:
            self.load_all_groups()
            self.write_group_stubs()
        else:
            summary = self.solve_project(ignore_unresolvable=str2bool(target_config.get('pkglistgen-ignore-unresolvable')),
                                         ignore_recommended=str2bool(
                                             target_config.get('pkglistgen-ignore-recommended')),
                                         locale=target_config.get('pkglistgen-locale'),
                                         locales_from=target_config.get('pkglistgen-locales-from'))

        if stop_after_solve:
            return

        if drop_list and not only_release_packages:
            weakremovers_file = os.path.join(release_dir, 'weakremovers.inc')
            try:
                self.create_weakremovers(project, target_config, oldrepos_dir, output=open(weakremovers_file, 'w'))
            except MismatchedRepoException:
                logging.error("Failed to create weakremovers.inc due to mismatch in repos - project most likey started building again.")
                return

        delete_products = target_config.get('pkglistgen-delete-products', '').split(' ')
        file_utils.unlink_list(product_dir, delete_products)

        logging.debug('-> product service')
        product_version = attribute_value_load(api.apiurl, project, 'ProductVersion')
        if not product_version:
            # for stagings the product version doesn't matter (I hope)
            product_version = '1'
        for product_file in glob.glob(os.path.join(product_dir, '*.product')):
            self.replace_product_version(product_file, product_version)
            logging.debug(subprocess.check_output(
                [PRODUCT_SERVICE, product_file, product_dir, project], encoding='utf-8'))

        for delete_kiwi in target_config.get('pkglistgen-delete-kiwis-{}'.format(scope), '').split(' '):
            delete_kiwis = glob.glob(os.path.join(product_dir, delete_kiwi))
            file_utils.unlink_list(product_dir, delete_kiwis)
        if scope == 'staging':
            self.strip_medium_from_staging(product_dir)

        spec_files = glob.glob(os.path.join(product_dir, '*.spec'))
        file_utils.move_list(spec_files, release_dir)
        inc_files = glob.glob(os.path.join(group_dir, '*.inc'))
        # filter special inc file
        inc_files = filter(lambda file: file.endswith('weakremovers.inc'), inc_files)
        file_utils.move_list(inc_files, release_dir)

        # do not overwrite weakremovers.inc if it exists
        # we will commit there afterwards if needed
        if os.path.exists(os.path.join(group_dir, 'weakremovers.inc')) and \
           not os.path.exists(os.path.join(release_dir, 'weakremovers.inc')):
            file_utils.move_list([os.path.join(group_dir, 'weakremovers.inc')], release_dir)

        file_utils.multibuild_from_glob(release_dir, '*.spec')
        self.build_stub(release_dir, 'spec')
        self.commit_package(release_dir)

        if only_release_packages:
            return

        file_utils.multibuild_from_glob(product_dir, '*.kiwi')
        self.build_stub(product_dir, 'kiwi')

        reference_summary = os.path.join(group_dir, f'summary-{scope}.txt')
        if os.path.isfile(reference_summary):
            summary_file = os.path.join(product_dir, f'summary-{scope}.txt')
            output = []
            for group in summary:
                for package in sorted(summary[group]):
                    output.append(f'{package}:{group}')

            with open(summary_file, 'w') as f:
                for line in sorted(output):
                    f.write(line + '\n')

        self.commit_package(product_dir)

        if os.path.isfile(reference_summary):
            return self.handle_package_diff(project, reference_summary, summary_file)
예제 #32
0
class TestReviewBotComment(OBSLocal.OBSLocalTestCase):
    def setUp(self):
        super(TestReviewBotComment, self).setUp()
        self.api = CommentAPI(self.apiurl)

        # Ensure different test runs operate in unique namespace.
        self.bot = '::'.join([type(self).__name__, str(random.getrandbits(8))])
        self.review_bot = ReviewBot(self.apiurl, logger=logging.getLogger(self.bot))
        self.review_bot.bot_name = self.bot

        self.osc_user('factory-auto')

    def tearDown(self):
        self.api.delete_from(project_name=PROJECT)
        self.assertFalse(len(self.api.get_comments(project_name=PROJECT)))

    def test_basic_logger(self):
        comment_count = len(self.api.get_comments(project_name=PROJECT))
        self.assertFalse(self.comments_filtered(self.bot)[0])

        # Initial comment.
        self.review_bot.comment_handler_add()
        self.review_bot.logger.info('something interesting')
        self.review_bot.comment_write(project=PROJECT)
        comment, _ = self.comments_filtered(self.bot)
        self.assertTrue(comment['comment'].endswith('something interesting'))

        # Second comment with extra line.
        self.review_bot.comment_handler_add()
        self.review_bot.logger.info('something interesting')
        self.review_bot.logger.info('something extra')
        self.review_bot.comment_write(project=PROJECT)
        comment, _ = self.comments_filtered(self.bot)
        self.assertTrue(comment['comment'].endswith('something extra'))

    def test_workflow(self):
        comment_count = len(self.api.get_comments(project_name=PROJECT))
        self.assertFalse(self.comments_filtered(self.bot)[0])

        # Initial comment.
        info = {'state': 'seen', 'result': 'failed'}
        info_extra = {'build': '1'}
        info_merged = info.copy()
        info_merged.update(info_extra)
        self.review_bot.comment_write(state='seen', result='failed', identical=True,
                                      info_extra=info_extra, info_extra_identical=False,
                                      project=PROJECT, message=COMMENT)
        comment, info_parsed = self.comments_filtered(self.bot)
        self.assertTrue(comment['comment'].endswith(COMMENT))
        self.assertEqual(info_parsed, info_merged)

        # Only build change (expect no change).
        info_extra = {'build': '2'}
        self.review_bot.comment_write(state='seen', result='failed', identical=True,
                                      info_extra=info_extra, info_extra_identical=False,
                                      project=PROJECT, message=COMMENT)
        comment, info_parsed = self.comments_filtered(self.bot)
        self.assertTrue(comment['comment'].endswith(COMMENT))
        self.assertEqual(info_parsed, info_merged)

        # Build and comment (except comment replacement).
        info_extra = {'build': '3'}
        info_merged.update(info_extra)
        self.review_bot.comment_write(state='seen', result='failed', identical=True,
                                      info_extra=info_extra, info_extra_identical=False,
                                      project=PROJECT, message=COMMENT + '3')
        comment, info_parsed = self.comments_filtered(self.bot)
        self.assertTrue(comment['comment'].endswith(COMMENT + '3'))
        self.assertEqual(info_parsed, info_merged)

        # Final build (except comment replacement).
        info_extra = {'build': '4'}
        info_merged.update(info_extra)
        self.review_bot.comment_write(state='seen', result='failed', identical=True,
                                      info_extra=info_extra, info_extra_identical=True,
                                      project=PROJECT, message=COMMENT + '4')
        comment, info_parsed = self.comments_filtered(self.bot)
        self.assertTrue(comment['comment'].endswith(COMMENT + '4'))
        self.assertEqual(info_parsed, info_merged)

        # Final build (except comment replacement).
        info = {'state': 'done', 'result': 'passed'}
        info_extra = {'build': '5'}
        info_merged = info.copy()
        info_merged.update(info_extra)
        self.review_bot.comment_write(state='done', result='passed', identical=True,
                                      info_extra=info_extra, info_extra_identical=True,
                                      only_replace=True,
                                      project=PROJECT, message=COMMENT + '5')
        comment, info_parsed = self.comments_filtered(self.bot)
        self.assertTrue(comment['comment'].endswith(COMMENT + '5'))
        self.assertEqual(info_parsed, info_merged)

        # Should never be more than one new comment.
        self.assertEqual(len(self.api.get_comments(project_name=PROJECT)), comment_count + 1)

    def test_only_replace_none(self):
        self.review_bot.comment_write(only_replace=True,
                                      project=PROJECT, message=COMMENT)
        self.assertFalse(self.comments_filtered(self.bot)[0])

    def test_dryrun(self):
        # dryrun = True, no comment.
        self.review_bot.dryrun = True
        self.review_bot.comment_write(project=PROJECT, message=COMMENT)
        self.assertFalse(self.comments_filtered(self.bot)[0])

        # dryrun = False, a comment.
        self.review_bot.dryrun = False
        self.review_bot.comment_write(project=PROJECT, message=COMMENT)
        self.assertTrue(self.comments_filtered(self.bot)[0])

        # dryrun = True, no replacement.
        self.review_bot.dryrun = True
        self.review_bot.comment_write(state='changed', project=PROJECT, message=COMMENT)
        _, info = self.comments_filtered(self.bot)
        self.assertEqual(info['state'], 'done')

        # dryrun = False, replacement.
        self.review_bot.dryrun = False
        self.review_bot.comment_write(state='changed', project=PROJECT, message=COMMENT)
        _, info = self.comments_filtered(self.bot)
        self.assertEqual(info['state'], 'changed')

    def test_bot_name_suffix(self):
        suffix1 = 'suffix1'
        bot_suffixed1 = '::'.join([self.bot, suffix1])

        suffix2 = 'suffix2'
        bot_suffixed2 = '::'.join([self.bot, suffix2])

        self.review_bot.comment_write(bot_name_suffix=suffix1, project=PROJECT, message=COMMENT)
        self.assertFalse(self.comments_filtered(self.bot)[0])
        self.assertTrue(self.comments_filtered(bot_suffixed1)[0])
        self.assertFalse(self.comments_filtered(bot_suffixed2)[0])

        self.review_bot.comment_write(bot_name_suffix=suffix2, project=PROJECT, message=COMMENT)
        self.assertFalse(self.comments_filtered(self.bot)[0])
        self.assertTrue(self.comments_filtered(bot_suffixed1)[0])
        self.assertTrue(self.comments_filtered(bot_suffixed2)[0])

        self.review_bot.comment_write(bot_name_suffix=suffix1, project=PROJECT, message=COMMENT + '\nnew')
        comment, _ = self.comments_filtered(bot_suffixed1)
        self.assertTrue(comment['comment'].endswith(COMMENT + '\nnew'))

        comment, _ = self.comments_filtered(bot_suffixed2)
        self.assertTrue(comment['comment'].endswith(COMMENT))

    def comments_filtered(self, bot):
        comments = self.api.get_comments(project_name=PROJECT)
        return self.api.comment_find(comments, bot)
class PkglistComments(object):
    """Handling staging comments of diffs"""

    def __init__(self, apiurl):
        self.apiurl = apiurl
        self.comment = CommentAPI(apiurl)

    def read_summary_file(self, file):
        ret = dict()
        with open(file, 'r') as f:
            for line in f:
                pkg, group = line.strip().split(':')
                ret.setdefault(pkg, [])
                ret[pkg].append(group)
        return ret

    def write_summary_file(self, file, content):
        output = []
        for pkg in sorted(content):
            for group in sorted(content[pkg]):
                output.append(f"{pkg}:{group}")

        with open(file, 'w') as f:
            for line in sorted(output):
                f.write(line + '\n')

    def calculcate_package_diff(self, old_file, new_file):
        old_file = self.read_summary_file(old_file)
        new_file = self.read_summary_file(new_file)

        # remove common part
        keys = list(old_file.keys())
        for key in keys:
            if new_file.get(key, []) == old_file[key]:
                del new_file[key]
                del old_file[key]

        if not old_file and not new_file:
            return None

        removed = dict()
        for pkg in old_file:
            old_groups = old_file[pkg]
            if new_file.get(pkg):
                continue
            removekey = ','.join(old_groups)
            removed.setdefault(removekey, [])
            removed[removekey].append(pkg)

        report = ''
        for rm in sorted(removed.keys()):
            report += f"**Remove from {rm}**\n\n```\n"
            paragraph = ', '.join(removed[rm])
            report += "\n".join(textwrap.wrap(paragraph, width=90, break_long_words=False, break_on_hyphens=False))
            report += "\n```\n\n"

        moved = dict()
        for pkg in old_file:
            old_groups = old_file[pkg]
            new_groups = new_file.get(pkg)
            if not new_groups:
                continue
            movekey = ','.join(old_groups) + ' to ' + ','.join(new_groups)
            moved.setdefault(movekey, [])
            moved[movekey].append(pkg)

        for move in sorted(moved.keys()):
            report += f"**Move from {move}**\n\n```\n"
            paragraph = ', '.join(moved[move])
            report += "\n".join(textwrap.wrap(paragraph, width=90, break_long_words=False, break_on_hyphens=False))
            report += "\n```\n\n"

        added = dict()
        for pkg in new_file:
            if pkg in old_file:
                continue
            addkey = ','.join(new_file[pkg])
            added.setdefault(addkey, [])
            added[addkey].append(pkg)

        for group in sorted(added):
            report += f"**Add to {group}**\n\n```\n"
            paragraph = ', '.join(added[group])
            report += "\n".join(textwrap.wrap(paragraph, width=90, break_long_words=False, break_on_hyphens=False))
            report += "\n```\n\n"

        return report.strip()

    def handle_package_diff(self, project, old_file, new_file):
        comments = self.comment.get_comments(project_name=project)
        comment, _ = self.comment.comment_find(comments, MARKER)

        report = self.calculcate_package_diff(old_file, new_file)
        if not report:
            if comment:
                self.comment.delete(comment['id'])
            return 0
        report = self.comment.add_marker(report, MARKER)

        if comment:
            write_comment = report != comment['comment']
        else:
            write_comment = True
        if write_comment:
            if comment:
                self.comment.delete(comment['id'])
            self.comment.add_comment(project_name=project, comment=report)
        else:
            for c in comments.values():
                if c['parent'] == comment['id']:
                    ct = c['comment']
                    if ct.startswith('ignore ') or ct == 'ignore':
                        print(c)
                        return 0
                    if ct.startswith('approve ') or ct == 'approve':
                        print(c)
                        return 0

        return 1

    def is_approved(self, comment, comments):
        if not comment:
            return None

        for c in comments.values():
            if c['parent'] == comment['id']:
                ct = c['comment']
                if ct.startswith('approve ') or ct == 'approve':
                    return c['who']
        return None

    def parse_title(self, line):
        m = re.match(r'\*\*Add to (.*)\*\*', line)
        if m:
            return {'cmd': 'add', 'to': m.group(1).split(','), 'pkgs': []}
        m = re.match(r'\*\*Move from (.*) to (.*)\*\*', line)
        if m:
            return {'cmd': 'move', 'from': m.group(1).split(','), 'to': m.group(2).split(','), 'pkgs': []}
        m = re.match(r'\*\*Remove from (.*)\*\*', line)
        if m:
            return {'cmd': 'remove', 'from': m.group(1).split(','), 'pkgs': []}
        return None

    def parse_sections(self, comment):
        current_section = None
        sections = []
        in_quote = False
        for line in comment.split('\n'):
            if line.startswith('**'):
                if current_section:
                    sections.append(current_section)
                current_section = self.parse_title(line)
                continue
            if line.startswith("```"):
                in_quote = not in_quote
                continue
            if in_quote:
                for pkg in line.split(','):
                    pkg = pkg.strip()
                    if pkg:
                        current_section['pkgs'].append(pkg)
        if current_section:
            sections.append(current_section)
        return sections

    def apply_move(self, content, section):
        for pkg in section['pkgs']:
            pkg_content = content[pkg]
            for group in section['from']:
                try:
                    pkg_content.remove(group)
                except ValueError:
                    logging.error(f"Can't remove {pkg} from {group}, not there. Mismatch.")
                    sys.exit(1)
            for group in section['to']:
                pkg_content.append(group)
            content[pkg] = pkg_content

    def apply_add(self, content, section):
        for pkg in section['pkgs']:
            content.setdefault(pkg, [])
            content[pkg] += section['to']

    def apply_remove(self, content, section):
        for pkg in section['pkgs']:
            pkg_content = content[pkg]
            for group in section['from']:
                try:
                    pkg_content.remove(group)
                except ValueError:
                    logging.error(f"Can't remove {pkg} from {group}, not there. Mismatch.")
                    sys.exit(1)
            content[pkg] = pkg_content

    def apply_commands(self, filename, sections):
        content = self.read_summary_file(filename)
        for section in sections:
            if section['cmd'] == 'move':
                self.apply_move(content, section)
            elif section['cmd'] == 'add':
                self.apply_add(content, section)
            elif section['cmd'] == 'remove':
                self.apply_remove(content, section)
        self.write_summary_file(filename, content)

    def format_pkgs(self, pkgs):
        text = ', '.join(pkgs)
        return "  " + "\n  ".join(textwrap.wrap(text, width=68, break_long_words=False, break_on_hyphens=False)) + "\n\n"

    def format_move(self, section):
        gfrom = ','.join(section['from'])
        gto = ','.join(section['to'])
        text = f"  * Move from {gfrom} to {gto}:\n"
        return text + self.format_pkgs(section['pkgs'])

    def format_add(self, section):
        gto = ','.join(section['to'])
        text = f"  * Add to {gto}:\n"
        return text + self.format_pkgs(section['pkgs'])

    def format_remove(self, section):
        gfrom = ','.join(section['from'])
        text = f"  * Remove from {gfrom}:\n"
        return text + self.format_pkgs(section['pkgs'])

    def apply_changes(self, filename, sections, approver):
        text = "-------------------------------------------------------------------\n"
        now = datetime.datetime.utcnow()
        date = now.strftime("%a %b %d %H:%M:%S UTC %Y")
        url = makeurl(self.apiurl, ['person', approver])
        root = ET.parse(http_GET(url))
        realname = root.find('realname').text
        email = root.find('email').text
        text += f"{date} - {realname} <{email}>\n\n- Approved changes to summary-staging.txt\n"
        for section in sections:
            if section['cmd'] == 'move':
                text += self.format_move(section)
            elif section['cmd'] == 'add':
                text += self.format_add(section)
            elif section['cmd'] == 'remove':
                text += self.format_remove(section)
        with open(filename + '.new', 'w') as writer:
            writer.write(text)
            with open(filename, 'r') as reader:
                for line in reader:
                    writer.write(line)
        os.rename(filename + '.new', filename)

    def check_staging_accept(self, project, target):
        comments = self.comment.get_comments(project_name=project)
        comment, _ = self.comment.comment_find(comments, MARKER)
        approver = self.is_approved(comment, comments)
        if not approver:
            return
        sections = self.parse_sections(comment['comment'])
        with tempfile.TemporaryDirectory() as tmpdirname:
            checkout_package(self.apiurl, target, '000package-groups', expand_link=True, outdir=tmpdirname)
            self.apply_commands(tmpdirname + '/summary-staging.txt', sections)
            self.apply_changes(tmpdirname + '/package-groups.changes', sections, approver)
            package = Package(tmpdirname)
            package.commit(msg='Approved packagelist changes', skip_local_service_run=True)
예제 #34
0
class TestAccept(unittest.TestCase):

    def setup_wf(self):
        wf = OBSLocal.StagingWorkflow()
        wf.setup_rings()

        self.c_api = CommentAPI(wf.api.apiurl)

        staging_b = wf.create_staging('B', freeze=True)
        self.prj = staging_b.name

        self.winerq = wf.create_submit_request('devel:wine', 'wine', text='Hallo World')
        self.assertEqual(True, SelectCommand(wf.api, self.prj).perform(['wine']))
        self.comments = self.c_api.get_comments(project_name=self.prj)
        return wf

    def test_accept_comments(self):
        wf = self.setup_wf()

        self.assertEqual(True, AcceptCommand(wf.api).accept_all(['B']))

        # Comments are cleared up
        accepted_comments = self.c_api.get_comments(project_name=self.prj)
        self.assertEqual(len(accepted_comments), 0)

    def test_accept_final_comment(self):
        wf = self.setup_wf()

        # snipe out cleanup to see the comments before the final countdown
        wf.api.staging_deactivate = MagicMock(return_value=True)

        self.assertEqual(True, AcceptCommand(wf.api).accept_all(['B']))

        comments = self.c_api.get_comments(project_name=self.prj)
        self.assertGreater(len(comments), len(self.comments))

        # check which id was added
        new_id = (set(comments.keys()) - set(self.comments.keys())).pop()
        comment = comments[new_id]['comment']
        self.assertEqual('Project "{}" accepted. The following packages have been submitted to openSUSE:Factory: wine.'.format(self.prj), comment)

    def test_accept_new_multibuild_package(self):
        wf = self.setup_wf()

        staging = wf.create_staging('A', freeze=True)

        project = wf.create_project('devel:gcc')
        package = OBSLocal.Package(name='gcc9', project=project)
        package.create_commit(filename='gcc9.spec')
        package.create_commit(filename='gcc9-tests.spec')
        package.create_commit('<multibuild><flavor>gcc9-tests.spec</flavor></multibuild>', filename='_multibuild')
        wf.submit_package(package)

        ret = SelectCommand(wf.api, staging.name).perform(['gcc9'])
        ac = AcceptCommand(wf.api)
        self.assertEqual(True, ac.accept_all(['A'], True))

        # no stale links
        self.assertEqual([], package_list(wf.apiurl, staging.name))
        self.assertEqual(['gcc9', 'wine'], package_list(wf.apiurl, wf.project))

    def test_accept_new_multispec_package(self):
        wf = self.setup_wf()

        staging = wf.create_staging('A', freeze=True)

        project = wf.create_project('devel:gcc')
        package = OBSLocal.Package(name='gcc9', project=project)
        package.create_commit(filename='gcc9.spec')
        package.create_commit(filename='gcc9-tests.spec')
        wf.submit_package(package)

        ret = SelectCommand(wf.api, staging.name).perform(['gcc9'])
        ac = AcceptCommand(wf.api)
        self.assertEqual(True, ac.accept_all(['A'], True))

        # no stale links
        self.assertEqual([], package_list(wf.apiurl, staging.name))
        self.assertEqual(['gcc9', 'gcc9-tests', 'wine'], package_list(wf.apiurl, wf.project))

    def test_accept_switch_to_multibuild_package(self):
        wf = self.setup_wf()

        staging = wf.create_staging('A', freeze=True)

        tpackage = wf.create_package('target', 'gcc9')
        tpackage.create_commit(filename='gcc9.spec')
        tpackage.create_commit(filename='gcc9-tests.spec')
        lpackage = wf.create_package('target', 'gcc9-tests')
        lpackage.create_commit('<link package="gcc9" cicount="copy" />', filename='_link')

        project = wf.create_project('devel:gcc')
        package = OBSLocal.Package(name='gcc9', project=project)
        package.create_commit(filename='gcc9.spec')
        package.create_commit(filename='gcc9-tests.spec')
        package.create_commit('<multibuild><flavor>gcc9-tests.spec</flavor></multibuild>', filename='_multibuild')

        wf.submit_package(package)

        ret = SelectCommand(wf.api, staging.name).perform(['gcc9'])
        ac = AcceptCommand(wf.api)
        self.assertEqual(True, ac.accept_all(['A'], True))

        # no stale links
        self.assertEqual([], package_list(wf.apiurl, staging.name))
        self.assertEqual(['gcc9', 'wine'], package_list(wf.apiurl, wf.project))
예제 #35
0
class OpenQAReport(object):
    def __init__(self, api):
        self.api = api
        self.comment = CommentAPI(api.apiurl)

    def _package_url(self, package):
        link = 'https://build.opensuse.org/package/live_build_log/%s/%s/%s/%s'
        link = link % (package['project'],
                       package['package'],
                       package['repository'],
                       package['arch'])
        text = '[%s](%s)' % (package['arch'], link)
        return text

    def _openQA_url(self, job):
        test_name = job['name'].split('-')[-1]
        link = '%s/tests/%s' % (self.api.copenqa, job['id'])
        text = '[%s](%s)' % (test_name, link)
        return text

    def _openQA_module_url(self, job, module):
        link = '%s/tests/%s/modules/%s/steps/1' % (
            self.api.copenqa, job['id'], module['name']
        )
        text = '[%s](%s)' % (module['name'], link)
        return text

    def old_enough(self, _date):
        time_delta = datetime.utcnow() - _date
        safe_margin = timedelta(hours=MARGIN_HOURS)
        return safe_margin <= time_delta

    def get_info(self, project):
        _prefix = '{}:'.format(self.api.cstaging)
        if project.startswith(_prefix):
            project = project.replace(_prefix, '')

        query = {'format': 'json'}
        url = self.api.makeurl(('project', 'staging_projects',
                                self.api.project, project), query=query)
        info = json.load(self.api.retried_GET(url))
        return info

    def get_broken_package_status(self, info):
        status = info['broken_packages']
        subproject = info['subproject']
        if subproject:
            status.extend(subproject['broken_packages'])
        return status

    def get_openQA_status(self, info):
        status = info['openqa_jobs']
        subproject = info['subproject']
        if subproject:
            status.extend(subproject['openqa_jobs'])
        return status

    def is_there_openqa_comment(self, project):
        """Return True if there is a previous comment."""
        signature = '<!-- openQA status -->'
        comments = self.comment.get_comments(project_name=project)
        comment = [c for c in comments.values() if signature in c['comment']]
        return len(comment) > 0

    def update_status_comment(self, project, report, force=False):
        signature = '<!-- openQA status -->'
        report = '%s\n%s' % (signature, str(report))

        write_comment = False

        comments = self.comment.get_comments(project_name=project)
        comment = [c for c in comments.values() if signature in c['comment']]
        if comment and len(comment) > 1:
            print 'ERROR. There are more than one openQA status comment in %s' % project
            # for c in comment:
            #     self.comment.delete(c['id'])
            # write_comment = True
        elif comment and comment[0]['comment'] != report and self.old_enough(comment[0]['when']):
            self.comment.delete(comment[0]['id'])
            write_comment = True
        elif not comment:
            write_comment = True

        if write_comment or force:
            if osc.conf.config['debug']:
                print 'Updating comment'
            self.comment.add_comment(project_name=project, comment=report)

    def _report_broken_packages(self, info):
        broken_package_status = self.get_broken_package_status(info)

        # Group packages by name
        groups = defaultdict(list)
        for package in broken_package_status:
            groups[package['package']].append(package)

        failing_lines = [
            '* Build failed %s (%s)' % (key, ', '.join(self._package_url(p) for p in value))
            for key, value in groups.iteritems()
        ]

        report = '\n'.join(failing_lines[:MAX_LINES])
        if len(failing_lines) > MAX_LINES:
            report += '* and more (%s) ...' % (len(failing_lines) - MAX_LINES)
        return report

    def _report_openQA(self, info):
        failing_lines, green_lines = [], []

        openQA_status = self.get_openQA_status(info)
        for job in openQA_status:
            test_name = job['name'].split('-')[-1]
            fails = [
                '  * %s (%s)' % (test_name, self._openQA_module_url(job, module))
                for module in job['modules'] if module['result'] == 'failed'
            ]

            if fails:
                failing_lines.extend(fails)
            else:
                green_lines.append(self._openQA_url(job))

        failing_report, green_report = '', ''
        if failing_lines:
            failing_report = '* Failing tests:\n' + '\n'.join(failing_lines[:MAX_LINES])
            if len(failing_lines) > MAX_LINES:
                failing_report += '\n  * and more (%s) ...' % (len(failing_lines) - MAX_LINES)
        if green_lines:
            green_report = '* Succeeding tests:' + ', '.join(green_lines[:MAX_LINES])
            if len(green_lines) > MAX_LINES:
                green_report += ', and more (%s) ...' % (len(green_lines) - MAX_LINES)

        return '\n'.join((failing_report, green_report)).strip(), bool(failing_lines)

    def report(self, project):
        info = self.get_info(project)

        # Some staging projects do not have info like
        # openSUSE:Factory:Staging:Gcc49
        if not info:
            return

        if info['overall_state'] == 'empty':
            return

        # The 'unacceptable' status means that the project will be
        # replaced soon. Better do not disturb with noise.
        if info['overall_state'] == 'unacceptable':
            return

        report_broken_packages = self._report_broken_packages(info)
        report_openQA, some_openqa_fail = self._report_openQA(info)

        if report_broken_packages or some_openqa_fail:
            if report_broken_packages:
                report_broken_packages = 'Broken:\n\n' + report_broken_packages
            if report_openQA:
                report_openQA = 'openQA:\n\n' + report_openQA
            report = '\n\n'.join((report_broken_packages, report_openQA))
            report = report.strip()
            if report:
                if osc.conf.config['debug']:
                    print project
                    print '-' * len(project)
                    print report
                self.update_status_comment(project, report)
            elif not info['overall_state'] == 'acceptable' and self.is_there_openqa_comment(project):
                report = 'Congratulations! All fine now.'
                if osc.conf.config['debug']:
                    print project
                    print '-' * len(project)
                    print report
                self.update_status_comment(project, report, force=True)
예제 #36
0
class OpenQAReport(object):
    def __init__(self, api):
        self.api = api
        self.comment = CommentAPI(api.apiurl)

    def _package_url(self, package):
        link = "https://build.opensuse.org/package/live_build_log/%s/%s/%s/%s"
        link = link % (package["project"], package["package"], package["repository"], package["arch"])
        text = "[%s](%s)" % (package["arch"], link)
        return text

    def _openQA_url(self, job):
        test_name = job["name"].split("-")[-1]
        link = "%s/tests/%s" % (self.api.copenqa, job["id"])
        text = "[%s](%s)" % (test_name, link)
        return text

    def _openQA_module_url(self, job, module):
        link = "%s/tests/%s/modules/%s/steps/1" % (self.api.copenqa, job["id"], module["name"])
        text = "[%s](%s)" % (module["name"], link)
        return text

    def old_enough(self, _date):
        time_delta = datetime.utcnow() - _date
        safe_margin = timedelta(hours=MARGIN_HOURS)
        return safe_margin <= time_delta

    def get_info(self, project):
        _prefix = "{}:".format(self.api.cstaging)
        if project.startswith(_prefix):
            project = project.replace(_prefix, "")

        query = {"format": "json"}
        url = self.api.makeurl(("project", "staging_projects", self.api.project, project), query=query)
        info = json.load(self.api.retried_GET(url))
        return info

    def get_broken_package_status(self, info):
        status = info["broken_packages"]
        subproject = info["subproject"]
        if subproject:
            status.extend(subproject["broken_packages"])
        return status

    def get_openQA_status(self, info):
        status = info["openqa_jobs"]
        subproject = info["subproject"]
        if subproject:
            status.extend(subproject["openqa_jobs"])
        return status

    def is_there_openqa_comment(self, project):
        """Return True if there is a previous comment."""
        signature = "<!-- openQA status -->"
        comments = self.comment.get_comments(project_name=project)
        comment = [c for c in comments.values() if signature in c["comment"]]
        return len(comment) > 0

    def update_status_comment(self, project, report, force=False):
        signature = "<!-- openQA status -->"
        report = "%s\n%s" % (signature, str(report))

        write_comment = False

        comments = self.comment.get_comments(project_name=project)
        comment = [c for c in comments.values() if signature in c["comment"]]
        if comment and len(comment) > 1:
            print "ERROR. There are more than one openQA status comment in %s" % project
            # for c in comment:
            #     self.comment.delete(c['id'])
            # write_comment = True
        elif comment and comment[0]["comment"] != report and self.old_enough(comment[0]["when"]):
            self.comment.delete(comment[0]["id"])
            write_comment = True
        elif not comment:
            write_comment = True

        if write_comment or force:
            if osc.conf.config["debug"]:
                print "Updating comment"
            self.comment.add_comment(project_name=project, comment=report)

    def _report_broken_packages(self, info):
        broken_package_status = self.get_broken_package_status(info)

        # Group packages by name
        groups = defaultdict(list)
        for package in broken_package_status:
            groups[package["package"]].append(package)

        failing_lines = [
            "* Build failed %s (%s)" % (key, ", ".join(self._package_url(p) for p in value))
            for key, value in groups.iteritems()
        ]

        report = "\n".join(failing_lines[:MAX_LINES])
        if len(failing_lines) > MAX_LINES:
            report += "* and more (%s) ..." % (len(failing_lines) - MAX_LINES)
        return report

    def _report_openQA(self, info):
        failing_lines, green_lines = [], []

        openQA_status = self.get_openQA_status(info)
        for job in openQA_status:
            test_name = job["name"].split("-")[-1]
            fails = [
                "  * %s (%s)" % (test_name, self._openQA_module_url(job, module))
                for module in job["modules"]
                if module["result"] == "failed"
            ]

            if fails:
                failing_lines.extend(fails)
            else:
                green_lines.append(self._openQA_url(job))

        failing_report, green_report = "", ""
        if failing_lines:
            failing_report = "* Failing openQA tests:\n" + "\n".join(failing_lines[:MAX_LINES])
            if len(failing_lines) > MAX_LINES:
                failing_report += "\n  * and more (%s) ..." % (len(failing_lines) - MAX_LINES)
        if green_lines:
            green_report = "* Succeeding tests:" + ", ".join(green_lines[:MAX_LINES])
            if len(green_lines) > MAX_LINES:
                green_report += ", and more (%s) ..." % (len(green_lines) - MAX_LINES)

        return "\n".join((failing_report, green_report)), bool(failing_lines)

    def report(self, project):
        info = self.get_info(project)

        # Some staging projects do not have info like
        # openSUSE:Factory:Staging:Gcc49
        if not info:
            return

        if info["overall_state"] == "empty":
            return

        # The 'unacceptable' status means that the project will be
        # replaced soon. Better do not disturb with noise.
        if info["overall_state"] == "unacceptable":
            return

        report_broken_packages = self._report_broken_packages(info)
        report_openQA, some_openqa_fail = self._report_openQA(info)

        if report_broken_packages or some_openqa_fail:
            report = "\n\n".join((report_broken_packages, report_openQA))
            report = report.strip()
            if report:
                if osc.conf.config["debug"]:
                    print project
                    print "-" * len(project)
                    print report
                self.update_status_comment(project, report)
            elif not info["overall_state"] == "acceptable" and self.is_there_openqa_comment(project):
                report = "Congratulations! All fine now."
                if osc.conf.config["debug"]:
                    print project
                    print "-" * len(project)
                    print report
                self.update_status_comment(project, report, force=True)
class InstallChecker(object):
    def __init__(self, api, config):
        self.api = api
        self.config = conf.config[api.project]
        self.logger = logging.getLogger('InstallChecker')
        self.commentapi = CommentAPI(api.apiurl)

        self.arch_whitelist = self.config.get('repo_checker-arch-whitelist')
        if self.arch_whitelist:
            self.arch_whitelist = set(self.arch_whitelist.split(' '))

        self.ring_whitelist = set(self.config.get('repo_checker-binary-whitelist-ring', '').split(' '))

        self.cycle_packages = self.config.get('repo_checker-allowed-in-cycles')
        self.calculate_allowed_cycles()

        self.existing_problems = self.binary_list_existing_problem(api.project, api.cmain_repo)
        self.ignore_duplicated = set(self.config.get('installcheck-ignore-duplicated-binaries', '').split(' '))

    def check_required_by(self, fileinfo, provides, requiredby, built_binaries, comments):
        if requiredby.get('name') in built_binaries:
            return True
        # extract >= and the like
        provide = provides.get('dep')
        provide = provide.split(' ')[0]
        comments.append('{} provides {} required by {}'.format(fileinfo.find('name').text, provide, requiredby.get('name')))
        url = api.makeurl(['build', api.project, api.cmain_repo, 'x86_64', '_repository', requiredby.get('name') + '.rpm'],
                      {'view': 'fileinfo_ext'})
        reverse_fileinfo = ET.parse(osc.core.http_GET(url)).getroot()
        for require in reverse_fileinfo.findall('requires_ext'):
            # extract >= and the like here too
            dep = require.get('dep').split(' ')[0]
            if dep != provide:
                continue
            for provided_by in require.findall('providedby'):
                if provided_by.get('name') in built_binaries:
                    continue
                comments.append('  also provided by {} -> ignoring'.format(provided_by.get('name')))
                return True
        comments.append('Error: missing alternative provides for {}'.format(provide))
        return False

    def check_delete_request(self, req, to_ignore, comments):
        package = req['package']
        if package in to_ignore:
            self.logger.info('Delete request for package {} ignored'.format(package))
            return True

        built_binaries = set([])
        file_infos = []
        for fileinfo in fileinfo_ext_all(self.api.apiurl, self.api.project, self.api.cmain_repo, 'x86_64', package):
            built_binaries.add(fileinfo.find('name').text)
            file_infos.append(fileinfo)

        result = True
        for fileinfo in file_infos:
            for provides in fileinfo.findall('provides_ext'):
                for requiredby in provides.findall('requiredby[@name]'):
                    result = result and self.check_required_by(fileinfo, provides, requiredby, built_binaries, comments)

        what_depends_on = depends_on(api.apiurl, api.project, api.cmain_repo, [package], True)

        # filter out dependency on package itself (happens with eg
        # java bootstrapping itself with previous build)
        if package in what_depends_on:
            what_depends_on.remove(package)

        if len(what_depends_on):
            comments.append('{} is still a build requirement of:\n\n- {}'.format(
                package, '\n- '.join(sorted(what_depends_on))))
            return False

        return result

    def packages_to_ignore(self, project):
        comments = self.commentapi.get_comments(project_name=project)
        ignore_re = re.compile(r'^installcheck: ignore (?P<args>.*)$', re.MULTILINE)

        # the last wins, for now we don't care who said it
        args = []
        for comment in comments.values():
            match = ignore_re.search(comment['comment'].replace('\r', ''))
            if not match:
                continue
            args = match.group('args').strip()
            # allow space and comma to seperate
            args = args.replace(',', ' ').split(' ')
        return args

    def staging(self, project, force=False):
        api = self.api

        repository = self.api.cmain_repo

        # fetch the build ids at the beginning - mirroring takes a while
        buildids = {}
        try:
            architectures = self.target_archs(project, repository)
        except HTTPError as e:
            if e.code == 404:
                # adi disappear all the time, so don't worry
                return False
            raise e

        all_done = True
        for arch in architectures:
            pra = '{}/{}/{}'.format(project, repository, arch)
            buildid = self.buildid(project, repository, arch)
            if not buildid:
                self.logger.error('No build ID in {}'.format(pra))
                return False
            buildids[arch] = buildid
            url = self.report_url(project, repository, arch, buildid)
            try:
                root = ET.parse(osc.core.http_GET(url)).getroot()
                check = root.find('check[@name="installcheck"]/state')
                if check is not None and check.text != 'pending':
                    self.logger.info('{} already "{}", ignoring'.format(pra, check.text))
                else:
                    all_done = False
            except HTTPError:
                self.logger.info('{} has no status report'.format(pra))
                all_done = False

        if all_done and not force:
            return True

        repository_pairs = repository_path_expand(api.apiurl, project, repository)
        staging_pair = [project, repository]

        result = True

        status = api.project_status(project)
        if not status:
            self.logger.error('no project status for {}'.format(project))
            return False

        result_comment = []

        to_ignore = self.packages_to_ignore(project)
        meta = api.load_prj_pseudometa(status['description'])
        for req in meta['requests']:
            if req['type'] == 'delete':
                result = result and self.check_delete_request(req, to_ignore, result_comment)

        for arch in architectures:
            # hit the first repository in the target project (if existant)
            target_pair = None
            directories = []
            for pair_project, pair_repository in repository_pairs:
                # ignore repositories only inherited for config
                if repository_arch_state(self.api.apiurl, pair_project, pair_repository, arch):
                    if not target_pair and pair_project == api.project:
                        target_pair = [pair_project, pair_repository]

                    directories.append(self.mirror(pair_project, pair_repository, arch))

            if not api.is_adi_project(project):
                # For "leaky" ring packages in letter stagings, where the
                # repository setup does not include the target project, that are
                # not intended to to have all run-time dependencies satisfied.
                whitelist = self.ring_whitelist
            else:
                whitelist = self.existing_problems

            whitelist |= set(to_ignore)

            check = self.cycle_check(project, repository, arch)
            if not check.success:
                self.logger.warning('Cycle check failed')
                result_comment.append(check.comment)
                result = False

            check = self.install_check(target_pair, arch, directories, None, whitelist)
            if not check.success:
                self.logger.warning('Install check failed')
                result_comment.append(check.comment)
                result = False

        duplicates = duplicated_binaries_in_repo(self.api.apiurl, project, repository)
        # remove white listed duplicates
        for arch in list(duplicates):
            for binary in self.ignore_duplicated:
                duplicates[arch].pop(binary, None)
            if not len(duplicates[arch]):
                del duplicates[arch]
        if len(duplicates):
            self.logger.warning('Found duplicated binaries')
            result_comment.append(yaml.dump(duplicates, default_flow_style=False))
            result = False

        if result:
            self.report_state('success', self.gocd_url(), project, repository, buildids)
        else:
            result_comment.insert(0, 'Generated from {}\n'.format(self.gocd_url()))
            self.report_state('failure', self.upload_failure(project, result_comment), project, repository, buildids)
            self.logger.warning('Not accepting {}'.format(project))
            return False

        return result

    def upload_failure(self, project, comment):
        print(project, '\n'.join(comment))
        url = self.api.makeurl(['source', 'home:repo-checker', 'reports', project])
        osc.core.http_PUT(url, data='\n'.join(comment))

        url = self.api.apiurl.replace('api.', 'build.')
        return '{}/package/view_file/home:repo-checker/reports/{}'.format(url, project)

    def report_state(self, state, report_url, project, repository, buildids):
        architectures = self.target_archs(project, repository)
        for arch in architectures:
            self.report_pipeline(state, report_url, project, repository, arch, buildids[arch], arch == architectures[-1])

    def gocd_url(self):
        if not os.environ.get('GO_SERVER_URL'):
            # placeholder :)
            return 'http://stephan.kulow.org/'
        report_url = os.environ.get('GO_SERVER_URL').replace(':8154', '')
        return report_url + '/tab/build/detail/{}/{}/{}/{}/{}#tab-console'.format(os.environ.get('GO_PIPELINE_NAME'),
                            os.environ.get('GO_PIPELINE_COUNTER'),
                            os.environ.get('GO_STAGE_NAME'),
                            os.environ.get('GO_STAGE_COUNTER'),
                            os.environ.get('GO_JOB_NAME'))

    def buildid(self, project, repository, architecture):
        url = self.api.makeurl(['build', project, repository, architecture], {'view': 'status'})
        root = ET.parse(osc.core.http_GET(url)).getroot()
        buildid = root.find('buildid')
        if buildid is None:
            return False
        return buildid.text

    def report_url(self, project, repository, architecture, buildid):
        return self.api.makeurl(['status_reports', 'built', project,
                                repository, architecture, 'reports', buildid])

    def report_pipeline(self, state, report_url, project, repository, architecture, buildid, is_last):
        url = self.report_url(project, repository, architecture, buildid)
        name = 'installcheck'
        # this is a little bit ugly, but we don't need 2 failures. So save a success for the
        # other archs to mark them as visited - pending we put in both
        if not is_last:
            if state == 'failure':
                state = 'success'

        xml = self.check_xml(report_url, state, name)
        try:
            osc.core.http_POST(url, data=xml)
        except HTTPError:
            print('failed to post status to ' + url)
            sys.exit(1)

    def check_xml(self, url, state, name):
        check = ET.Element('check')
        if url:
            se = ET.SubElement(check, 'url')
            se.text = url
        se = ET.SubElement(check, 'state')
        se.text = state
        se = ET.SubElement(check, 'name')
        se.text = name
        return ET.tostring(check)

    def target_archs(self, project, repository):
        archs = target_archs(self.api.apiurl, project, repository)

        # Check for arch whitelist and use intersection.
        if self.arch_whitelist:
            archs = list(self.arch_whitelist.intersection(set(archs)))

        # Trick to prioritize x86_64.
        return sorted(archs, reverse=True)

    @memoize(ttl=60, session=True, add_invalidate=True)
    def mirror(self, project, repository, arch):
        """Call bs_mirrorfull script to mirror packages."""
        directory = os.path.join(CACHEDIR, project, repository, arch)
        if not os.path.exists(directory):
            os.makedirs(directory)

        script = os.path.join(SCRIPT_PATH, 'bs_mirrorfull')
        path = '/'.join((project, repository, arch))
        url = '{}/public/build/{}'.format(self.api.apiurl, path)
        parts = ['LC_ALL=C', 'perl', script, '--nodebug', url, directory]
        parts = [pipes.quote(part) for part in parts]

        self.logger.info('mirroring {}'.format(path))
        if os.system(' '.join(parts)):
            raise Exception('failed to mirror {}'.format(path))

        return directory

    @memoize(session=True)
    def binary_list_existing_problem(self, project, repository):
        """Determine which binaries are mentioned in repo_checker output."""
        binaries = set()

        filename = self.project_pseudometa_file_name(project, repository)
        content = project_pseudometa_file_load(self.api.apiurl, project, filename)
        if not content:
            self.logger.warning('no project_only run from which to extract existing problems')
            return binaries

        sections = self.install_check_parse(content)
        for section in sections:
            for binary in section.binaries:
                match = re.match(BINARY_REGEX, binary)
                if match:
                    binaries.add(match.group('name'))

        return binaries

    def install_check(self, target_project_pair, arch, directories,
                      ignore=None, whitelist=[], parse=False, no_filter=False):
        self.logger.info('install check: start (ignore:{}, whitelist:{}, parse:{}, no_filter:{})'.format(
            bool(ignore), len(whitelist), parse, no_filter))

        with tempfile.NamedTemporaryFile() as ignore_file:
            # Print ignored rpms on separate lines in ignore file.
            if ignore:
                for item in ignore:
                    ignore_file.write(item + '\n')
                ignore_file.flush()

            # Invoke repo_checker.pl to perform an install check.
            script = os.path.join(SCRIPT_PATH, 'repo_checker.pl')
            parts = ['LC_ALL=C', 'perl', script, arch, ','.join(directories),
                     '-f', ignore_file.name, '-w', ','.join(whitelist)]
            if no_filter:
                parts.append('--no-filter')

            parts = [pipes.quote(part) for part in parts]
            p = subprocess.Popen(' '.join(parts), shell=True,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE, close_fds=True)
            stdout, stderr = p.communicate()

        if p.returncode:
            self.logger.info('install check: failed')
            if p.returncode == 126:
                self.logger.warning('mirror cache reset due to corruption')
                self._invalidate_all()
            elif parse:
                # Parse output for later consumption for posting comments.
                sections = self.install_check_parse(stdout)
                self.install_check_sections_group(
                    target_project_pair[0], target_project_pair[1], arch, sections)

            # Format output as markdown comment.
            parts = []

            stdout = stdout.decode('utf-8').strip()
            if stdout:
                parts.append(stdout + '\n')
            stderr = stderr.strip()
            if stderr:
                parts.append(stderr + '\n')

            header = '### [install check & file conflicts for {}]'.format(arch)
            return CheckResult(False, header + '\n\n' + ('\n' + ('-' * 80) + '\n\n').join(parts))

        self.logger.info('install check: passed')
        return CheckResult(True, None)

    def install_check_sections_group(self, project, repository, arch, sections):
        _, binary_map = package_binary_list(self.api.apiurl, project, repository, arch)

        for section in sections:
            # If switch to creating bugs likely makes sense to join packages to
            # form grouping key and create shared bugs for conflicts.
            # Added check for b in binary_map after encountering:
            # https://lists.opensuse.org/opensuse-buildservice/2017-08/msg00035.html
            # Under normal circumstances this should never occur.
            packages = set([binary_map[b] for b in section.binaries if b in binary_map])
            for package in packages:
                self.package_results.setdefault(package, [])
                self.package_results[package].append(section)

    def install_check_parse(self, output):
        section = None
        text = None

        # Loop over lines and parse into chunks assigned to binaries.
        for line in output.splitlines(True):
            if line.startswith(' '):
                if section:
                    text += line
            else:
                if section:
                    yield InstallSection(section, text)

                match = re.match(INSTALL_REGEX, line)
                if match:
                    # Remove empty groups since regex matches different patterns.
                    binaries = [b for b in match.groups() if b is not None]
                    section = binaries
                    text = line
                else:
                    section = None

        if section:
            yield InstallSection(section, text)

    def calculate_allowed_cycles(self):
        self.allowed_cycles = []
        if self.cycle_packages:
            for comma_list in self.cycle_packages.split(';'):
                self.allowed_cycles.append(comma_list.split(','))

    def cycle_check(self, project, repository, arch):
        self.logger.info('cycle check: start %s/%s/%s' % (project, repository, arch))
        comment = []

        depinfo = builddepinfo(self.api.apiurl, project, repository, arch, order = False)
        for cycle in depinfo.findall('cycle'):
            for package in cycle.findall('package'):
                package = package.text
                allowed = False
                for acycle in self.allowed_cycles:
                    if package in acycle:
                        allowed = True
                        break
                if not allowed:
                    cycled = [p.text for p in cycle.findall('package')]
                    comment.append('Package {} appears in cycle {}'.format(package, '/'.join(cycled)))

        if len(comment):
            # New cycles, post comment.
            self.logger.info('cycle check: failed')
            return CheckResult(False, '\n'.join(comment) + '\n')

        self.logger.info('cycle check: passed')
        return CheckResult(True, None)

    def project_pseudometa_file_name(self, project, repository):
        filename = 'repo_checker'

        main_repo = Config.get(self.api.apiurl, project).get('main-repo')
        if not main_repo:
            filename += '.' + repository

        return filename
class TestCommentOBS(OBSLocalTestCase):
    def setUp(self):
        super(TestCommentOBS, self).setUp()
        self.api = CommentAPI(self.apiurl)
        # Ensure different test runs operate in unique namespace.
        self.bot = '::'.join([type(self).__name__, str(random.getrandbits(8))])

    def test_basic(self):
        self.osc_user('staging-bot')

        self.assertFalse(self.comments_filtered(self.bot)[0])

        self.assertTrue(self.api.add_comment(
            project_name=PROJECT, comment=self.api.add_marker(COMMENT, self.bot)))
        comment, _ = self.comments_filtered(self.bot)
        self.assertTrue(comment)

        self.assertTrue(self.api.delete(comment['id']))
        self.assertFalse(self.comments_filtered(self.bot)[0])

    def test_delete_nested(self):
        self.osc_user('staging-bot')
        comment_marked = self.api.add_marker(COMMENT, self.bot)

        # Allow for existing comments by basing assertion on delta from initial count.
        comment_count = len(self.api.get_comments(project_name=PROJECT))
        self.assertFalse(self.comments_filtered(self.bot)[0])

        self.assertTrue(self.api.add_comment(project_name=PROJECT, comment=comment_marked))
        comment, _ = self.comments_filtered(self.bot)
        self.assertTrue(comment)

        for i in range(0, 3):
            self.assertTrue(self.api.add_comment(
                project_name=PROJECT, comment=comment_marked, parent_id=comment['id']))

        comments = self.api.get_comments(project_name=PROJECT)
        parented_count = 0
        for comment in comments.values():
            if comment['parent']:
                parented_count += 1

        self.assertEqual(parented_count, 3)
        self.assertTrue(len(comments) == comment_count + 4)

        self.api.delete_from(project_name=PROJECT)
        self.assertFalse(len(self.api.get_comments(project_name=PROJECT)))

    def test_delete_batch(self):
        users = ['factory-auto', 'repo-checker', 'staging-bot']
        for user in users:
            self.osc_user(user)
            from osc import conf
            bot = '::'.join([self.bot, user])
            comment = self.api.add_marker(COMMENT, bot)

            self.assertFalse(self.comments_filtered(bot)[0])
            self.assertTrue(self.api.add_comment(project_name=PROJECT, comment=comment))
            self.assertTrue(self.comments_filtered(bot)[0])

        # Allow for existing comments by basing assertion on delta from initial count.
        comment_count = len(self.api.get_comments(project_name=PROJECT))
        self.assertTrue(comment_count >= len(users))

        self.api.delete_from_where_user(users[0], project_name=PROJECT)
        self.assertTrue(len(self.api.get_comments(project_name=PROJECT)) == comment_count - 1)

        self.api.delete_from(project_name=PROJECT)
        self.assertFalse(len(self.api.get_comments(project_name=PROJECT)))

    def comments_filtered(self, bot):
        comments = self.api.get_comments(project_name=PROJECT)
        return self.api.comment_find(comments, bot)
class OpenQABot(ReviewBot.ReviewBot):
    """ check ABI of library packages
    """
    def __init__(self, *args, **kwargs):
        ReviewBot.ReviewBot.__init__(self, *args, **kwargs)

        self.force = False
        self.openqa = None
        self.commentapi = CommentAPI(self.apiurl)
        self.update_test_builds = dict()
        self.openqa_jobs = dict()

    def gather_test_builds(self):
        for prj, u in TARGET_REPO_SETTINGS[self.openqa.baseurl].items():
            buildnr = 0
            cjob = 0
            for j in self.jobs_for_target(u):
                # avoid going backwards in job ID
                if cjob > int(j['id']):
                    continue
                buildnr = j['settings']['BUILD']
                cjob = int(j['id'])
            self.update_test_builds[prj] = buildnr
            jobs = self.jobs_for_target(u, build=buildnr)
            self.openqa_jobs[prj] = jobs
            if self.calculate_qa_status(jobs) == QA_INPROGRESS:
                self.pending_target_repos.add(prj)

    # reimplemention from baseclass
    def check_requests(self):

        if self.apiurl.endswith('.suse.de'):
            self.check_suse_incidents()

        # first calculate the latest build number for current jobs
        self.pending_target_repos = set()
        self.gather_test_builds()

        started = []
        # then check progress on running incidents
        for req in self.requests:
            jobs = self.request_get_openqa_jobs(req,
                                                incident=True,
                                                test_repo=True)
            ret = self.calculate_qa_status(jobs)
            if ret != QA_UNKNOWN:
                started.append(req)

        all_requests = self.requests
        self.requests = started
        ReviewBot.ReviewBot.check_requests(self)

        self.requests = all_requests

        skipped_one = False
        # now make sure the jobs are for current repo
        for prj, u in TARGET_REPO_SETTINGS[self.openqa.baseurl].items():
            if prj in self.pending_target_repos:
                skipped_one = True
                continue
            self.trigger_build_for_target(prj, u)

        # do not schedule new incidents unless we finished
        # last wave
        if skipped_one:
            return

        ReviewBot.ReviewBot.check_requests(self)

    def check_action_maintenance_release(self, req, a):
        # we only look at the binaries of the patchinfo
        if a.src_package != 'patchinfo':
            return None

        if a.tgt_project not in PROJECT_OPENQA_SETTINGS:
            self.logger.warn("not handling %s" % a.tgt_project)
            return None

        # TODO - this needs to be moved
        return None

        packages = []
        # patchinfo collects the binaries and is build for an
        # unpredictable architecture so we need iterate over all
        url = osc.core.makeurl(
            self.apiurl,
            ('build', a.src_project, a.tgt_project.replace(':', '_')))
        root = ET.parse(osc.core.http_GET(url)).getroot()
        for arch in [n.attrib['name'] for n in root.findall('entry')]:
            query = {'nosource': 1}
            url = osc.core.makeurl(
                self.apiurl,
                ('build', a.src_project, a.tgt_project.replace(
                    ':', '_'), arch, a.src_package),
                query=query)

            root = ET.parse(osc.core.http_GET(url)).getroot()

            for binary in root.findall('binary'):
                m = pkgname_re.match(binary.attrib['filename'])
                if m:
                    # can't use arch here as the patchinfo mixes all
                    # archs
                    packages.append(
                        Package(m.group('name'), m.group('version'),
                                m.group('release')))

        if not packages:
            raise Exception("no packages found")

        update.calculate_lastest_good_updates(self.openqa, settings)

        return None

    # check a set of repos for their primary checksums
    @staticmethod
    def calculate_repo_hash(repos):
        m = md5.new()
        # if you want to force it, increase this number
        m.update('b')
        for url in repos:
            url += '/repodata/repomd.xml'
            root = ET.parse(osc.core.http_GET(url)).getroot()
            cs = root.find(
                './/{http://linux.duke.edu/metadata/repo}data[@type="primary"]/{http://linux.duke.edu/metadata/repo}checksum'
            )
            m.update(cs.text)
        return m.hexdigest()

    def is_incident_in_testing(self, incident):
        # hard coded for now as we only run this code for SUSE Maintenance workflow
        project = 'SUSE:Maintenance:%s' % incident

        xpath = "(state/@name='review') and (action/source/@project='%s' and action/@type='maintenance_release')" % (
            project)
        res = osc.core.search(self.apiurl, request=xpath)['request']
        # return the one and only (or None)
        return res.find('request')

    def calculate_incidents(self, incidents):
        """
        get incident numbers from SUSE:Maintenance:Test project
        returns dict with openQA var name : string with numbers
        """
        l_incidents = []
        for kind, prj in incidents.items():
            packages = osc.core.meta_get_packagelist(self.apiurl, prj)
            incidents = []
            # filter out incidents in staging
            for incident in packages:
                # remove patchinfo. prefix
                incident = incident.replace('_', '.').split('.')[1]
                req = self.is_incident_in_testing(incident)
                # without release request it's in staging
                if req is None:
                    continue

                # skip kgraft patches from aggregation
                req_ = osc.core.Request()
                req_.read(req)
                src_prjs = set([a.src_project for a in req_.actions])
                if SUSEUpdate.kgraft_target(self.apiurl, src_prjs.pop()):
                    continue

                incidents.append(incident)

            l_incidents.append((kind + '_TEST_ISSUES', ','.join(incidents)))

        return l_incidents

    def jobs_for_target(self, data, build=None):
        s = data['settings'][0]
        values = {
            'distri': s['DISTRI'],
            'version': s['VERSION'],
            'arch': s['ARCH'],
            'flavor': s['FLAVOR'],
            'scope': 'relevant',
            'latest': '1',
        }
        if build:
            values['build'] = build
        else:
            values['test'] = data['test']
        return self.openqa.openqa_request('GET', 'jobs', values)['jobs']

    # we don't know the current BUILD and querying all jobs is too expensive
    # so we need to check for one known TEST first
    # if that job doesn't contain the proper hash, we trigger a new one
    # and then we know the build
    def trigger_build_for_target(self, prj, data):
        today = date.today().strftime("%Y%m%d")
        repohash = self.calculate_repo_hash(data['repos'])
        buildnr = None
        j = self.jobs_for_target(data)
        for job in j:
            if job['settings'].get('REPOHASH', '') == repohash:
                # take the last in the row
                buildnr = job['settings']['BUILD']
        self.update_test_builds[prj] = buildnr
        # ignore old build numbers, we want a fresh run every day
        # to find regressions in the tests and to get data about
        # randomly failing tests
        if buildnr and buildnr.startswith(today):
            return

        buildnr = 0

        # not found, then check for the next free build nr
        for job in j:
            build = job['settings']['BUILD']
            if build and build.startswith(today):
                try:
                    nr = int(build.split('-')[1])
                    if nr > buildnr:
                        buildnr = nr
                except BaseException:
                    continue

        buildnr = "%s-%d" % (today, buildnr + 1)

        for s in data['settings']:
            # now schedule it for real
            if 'incidents' in data.keys():
                for x, y in self.calculate_incidents(data['incidents']):
                    s[x] = y
            s['BUILD'] = buildnr
            s['REPOHASH'] = repohash
            self.logger.debug(pformat(s))
            if not self.dryrun:
                try:
                    self.openqa.openqa_request('POST',
                                               'isos',
                                               data=s,
                                               retries=1)
                except Exception as e:
                    self.logger.debug(e)
        self.update_test_builds[prj] = buildnr

    def check_source_submission(self, src_project, src_package, src_rev,
                                dst_project, dst_package):
        ReviewBot.ReviewBot.check_source_submission(self, src_project,
                                                    src_package, src_rev,
                                                    dst_project, dst_package)

    def request_get_openqa_jobs(self, req, incident=True, test_repo=False):
        ret = None
        types = set([a.type for a in req.actions])
        if 'maintenance_release' in types:
            src_prjs = set([a.src_project for a in req.actions])
            if len(src_prjs) != 1:
                raise Exception(
                    "can't handle maintenance_release from different incidents"
                )
            build = src_prjs.pop()
            tgt_prjs = set([a.tgt_project for a in req.actions])
            ret = []
            if incident:
                ret += self.openqa_jobs[build]
            for prj in sorted(tgt_prjs):
                repo_settings = TARGET_REPO_SETTINGS.get(
                    self.openqa.baseurl, {})
                if test_repo and prj in repo_settings:
                    repo_jobs = self.openqa_jobs[prj]
                    ret += repo_jobs

        return ret

    def calculate_qa_status(self, jobs=None):
        if not jobs:
            return QA_UNKNOWN

        j = dict()
        has_failed = False
        in_progress = False
        for job in jobs:
            if job['clone_id']:
                continue
            name = job['name']
            if name in j and int(job['id']) < int(j[name]['id']):
                continue
            j[name] = job
            #self.logger.debug('job %s in openQA: %s %s %s %s', job['id'], job['settings']['VERSION'], job['settings']['TEST'], job['state'], job['result'])
            if job['state'] not in ('cancelled', 'done'):
                in_progress = True
            else:
                if job['result'] != 'passed' and job['result'] != 'softfailed':
                    has_failed = True

        if not j:
            return QA_UNKNOWN
        if in_progress:
            return QA_INPROGRESS
        if has_failed:
            return QA_FAILED

        return QA_PASSED

    def add_comment(self, msg, state, request_id=None, result=None):
        if not self.do_comments:
            return

        comment = "<!-- openqa state=%s%s -->\n" % (state, ' result=%s' %
                                                    result if result else '')
        comment += "\n" + msg

        info = self.find_obs_request_comment(request_id=request_id)
        comment_id = info.get('id', None)

        if state == info.get('state', 'missing'):
            lines_before = len(info['comment'].split('\n'))
            lines_after = len(comment.split('\n'))
            if lines_before == lines_after:
                self.logger.debug(
                    "not worth the update, previous comment %s is state %s",
                    comment_id, info['state'])
                return

        self.logger.debug("adding comment to %s, state %s result %s",
                          request_id, state, result)
        self.logger.debug("message: %s", msg)
        if not self.dryrun:
            if comment_id is not None:
                self.commentapi.delete(comment_id)
            self.commentapi.add_comment(request_id=request_id,
                                        comment=str(comment))

    # escape markdown
    @staticmethod
    def emd(str):
        return str.replace('_', '\_')

    def get_step_url(self, testurl, modulename):
        failurl = testurl + '/modules/%s/fails' % modulename
        fails = requests.get(failurl).json()
        failed_step = fails.get('first_failed_step', 1)
        return "[%s](%s#step/%s/%d)" % (self.emd(modulename), testurl,
                                        modulename, failed_step)

    def job_test_name(self, job):
        return "%s@%s" % (self.emd(
            job['settings']['TEST']), self.emd(job['settings']['MACHINE']))

    def summarize_one_openqa_job(self, job):
        testurl = osc.core.makeurl(self.openqa.baseurl,
                                   ['tests', str(job['id'])])
        if not job['result'] in ['passed', 'failed', 'softfailed']:
            rstring = job['result']
            if rstring == 'none':
                return None
            return '\n- [%s](%s) is %s' % (self.job_test_name(job), testurl,
                                           rstring)

        modstrings = []
        for module in job['modules']:
            if module['result'] != 'failed':
                continue
            modstrings.append(self.get_step_url(testurl, module['name']))

        if len(modstrings):
            return '\n- [%s](%s) failed in %s' % (
                self.job_test_name(job), testurl, ','.join(modstrings))
        elif job['result'] == 'failed':  # rare case: fail without module fails
            return '\n- [%s](%s) failed' % (self.job_test_name(job), testurl)
        return ''

    def summarize_openqa_jobs(self, jobs):
        groups = dict()
        for job in jobs:
            gl = "%s@%s" % (self.emd(
                job['group']), self.emd(job['settings']['FLAVOR']))
            if gl not in groups:
                groupurl = osc.core.makeurl(
                    self.openqa.baseurl, ['tests', 'overview'], {
                        'version': job['settings']['VERSION'],
                        'groupid': job['group_id'],
                        'flavor': job['settings']['FLAVOR'],
                        'distri': job['settings']['DISTRI'],
                        'build': job['settings']['BUILD'],
                    })
                groups[gl] = {
                    'title': "__Group [%s](%s)__\n" % (gl, groupurl),
                    'passed': 0,
                    'unfinished': 0,
                    'failed': []
                }

            job_summary = self.summarize_one_openqa_job(job)
            if job_summary is None:
                groups[gl]['unfinished'] = groups[gl]['unfinished'] + 1
                continue
            # None vs ''
            if not len(job_summary):
                groups[gl]['passed'] = groups[gl]['passed'] + 1
                continue
            # if there is something to report, hold the request
            qa_state = QA_FAILED
            gmsg = groups[gl]
            groups[gl]['failed'].append(job_summary)

        msg = ''
        for group in sorted(groups.keys()):
            msg += "\n\n" + groups[group]['title']
            infos = []
            if groups[group]['passed']:
                infos.append("%d tests passed" % groups[group]['passed'])
            if len(groups[group]['failed']):
                infos.append("%d tests failed" % len(groups[group]['failed']))
            if groups[group]['unfinished']:
                infos.append("%d unfinished tests" %
                             groups[group]['unfinished'])
            msg += "(" + ', '.join(infos) + ")\n"
            for fail in groups[group]['failed']:
                msg += fail

        return msg

    def check_one_request(self, req):
        ret = None

        try:
            jobs = self.request_get_openqa_jobs(req)
            qa_state = self.calculate_qa_status(jobs)
            self.logger.debug("request %s state %s", req.reqid, qa_state)
            msg = None
            if self.force or qa_state == QA_UNKNOWN:
                ret = ReviewBot.ReviewBot.check_one_request(self, req)
                jobs = self.request_get_openqa_jobs(req)

                if self.force:
                    # make sure to delete previous comments if we're forcing
                    info = self.find_obs_request_comment(request_id=req.reqid)
                    if 'id' in info:
                        self.logger.debug("deleting old comment %s",
                                          info['id'])
                        if not self.dryrun:
                            self.commentapi.delete(info['id'])

                if not jobs:
                    msg = "no openQA tests defined"
                    self.add_comment(msg,
                                     'done',
                                     request_id=req.reqid,
                                     result='accepted')
                    ret = True
                else:
                    # no notification until the result is done
                    osc.core.change_review_state(
                        self.apiurl,
                        req.reqid,
                        newstate='new',
                        by_group=self.review_group,
                        by_user=self.review_user,
                        message='now testing in openQA')
            elif qa_state == QA_FAILED or qa_state == QA_PASSED:
                # don't take test repo results into the calculation of total
                # this is for humans to decide which incident broke the test repo
                jobs += self.request_get_openqa_jobs(req,
                                                     incident=False,
                                                     test_repo=True)
                if self.calculate_qa_status(jobs) == QA_INPROGRESS:
                    self.logger.debug(
                        "incident tests for request %s are done, but need to wait for test repo",
                        req.reqid)
                    return
                if qa_state == QA_PASSED:
                    msg = "openQA tests passed\n"
                    result = 'accepted'
                    ret = True
                else:
                    msg = "openQA tests problematic\n"
                    result = 'declined'
                    ret = False

                msg += self.summarize_openqa_jobs(jobs)
                self.add_comment(msg,
                                 'done',
                                 result=result,
                                 request_id=req.reqid)
            elif qa_state == QA_INPROGRESS:
                self.logger.debug("request %s still in progress", req.reqid)
            else:
                raise Exception("unknown QA state %d", qa_state)

        except Exception:
            import traceback
            self.logger.error("unhandled exception in openQA Bot")
            self.logger.error(traceback.format_exc())
            ret = None

        return ret

    def find_obs_request_comment(self, request_id=None, project_name=None):
        """Return previous comments (should be one)."""
        if self.do_comments:
            comments = self.commentapi.get_comments(request_id=request_id,
                                                    project_name=project_name)
            for c in comments.values():
                m = comment_marker_re.match(c['comment'])
                if m:
                    return {
                        'id': c['id'],
                        'state': m.group('state'),
                        'result': m.group('result'),
                        'comment': c['comment'],
                        'revision': m.group('revision')
                    }
        return {}

    def check_product(self, job, product_prefix):
        pmap = API_MAP[product_prefix]
        posts = []
        for arch in pmap['archs']:
            need = False
            settings = {
                'VERSION': pmap['version'],
                'ARCH': arch,
                'DISTRI': 'sle'
            }
            issues = pmap.get('issues', {})
            issues['OS_TEST_ISSUES'] = product_prefix
            for key, prefix in issues.items():
                if prefix + arch in job['channels']:
                    settings[key] = str(job['id'])
                    need = True
            if need:
                u = PROJECT_OPENQA_SETTINGS[product_prefix + arch]
                u.apiurl = self.apiurl
                for s in u.settings(
                        u.maintenance_project() + ':' + str(job['id']),
                        product_prefix + arch, []):
                    if job.get('openqa_build') is None:
                        job['openqa_build'] = u.get_max_revision(job)
                    if job.get('openqa_build') is None:
                        return []
                    s['BUILD'] += '.' + str(job['openqa_build'])
                    s.update(settings)
                    posts.append(s)
        return posts

    def incident_openqa_jobs(self, s):
        return self.openqa.openqa_request(
            'GET', 'jobs', {
                'distri': s['DISTRI'],
                'version': s['VERSION'],
                'arch': s['ARCH'],
                'flavor': s['FLAVOR'],
                'build': s['BUILD'],
                'scope': 'relevant',
                'latest': '1'
            })['jobs']

    def check_suse_incidents(self):
        for inc in requests.get(
                'https://maintenance.suse.de/api/incident/active/').json():
            # if not inc in ['5219']: continue
            # if not inc.startswith('52'): continue
            print inc
            # continue
            job = requests.get('https://maintenance.suse.de/api/incident/' +
                               inc).json()
            if job['meta']['state'] in ['final', 'gone']:
                continue
            # required in job: project, id, channels
            self.test_job(job['base'])

    def test_job(self, job):
        incident_project = str(job['project'])
        comment_info = self.find_obs_request_comment(
            project_name=incident_project)
        comment_id = comment_info.get('id', None)
        comment_build = str(comment_info.get('revision', ''))

        openqa_posts = []
        for prod in API_MAP.keys():
            openqa_posts += self.check_product(job, prod)
        openqa_jobs = []
        for s in openqa_posts:
            jobs = self.incident_openqa_jobs(s)
            # take the project comment as marker for not posting jobs
            if not len(jobs) and comment_build != str(job['openqa_build']):
                if self.dryrun:
                    print 'WOULD POST', json.dumps(s, sort_keys=True)
                else:
                    ret = self.openqa.openqa_request('POST',
                                                     'isos',
                                                     data=s,
                                                     retries=1)
                    openqa_jobs += self.incident_openqa_jobs(s)
            else:
                print s, 'got', len(jobs)
                openqa_jobs += jobs
        self.openqa_jobs[incident_project] = openqa_jobs
        if len(openqa_jobs) == 0:
            self.logger.debug("No openqa jobs defined")
            return
        # print openqa_jobs
        msg = self.summarize_openqa_jobs(openqa_jobs)
        state = 'seen'
        result = 'none'
        qa_status = self.calculate_qa_status(openqa_jobs)
        if qa_status == QA_PASSED:
            result = 'accepted'
            state = 'done'
        if qa_status == QA_FAILED:
            result = 'declined'
            state = 'done'
        comment = "<!-- openqa state=%s result=%s revision=%s -->\n" % (
            state, result, job.get('openqa_build'))
        comment += "\nCC @coolo\n" + msg

        if comment_id and state != 'done':
            self.logger.debug("%s is already commented, wait until done",
                              incident_project)
            return
        if comment_info.get('comment', '') == comment:
            self.logger.debug("%s comment did not change", incident_project)
            return

        self.logger.debug("adding comment to %s, state %s", incident_project,
                          state)
        #self.logger.debug("message: %s", msg)
        if not self.dryrun:
            if comment_id is not None:
                self.commentapi.delete(comment_id)
            self.commentapi.add_comment(project_name=str(incident_project),
                                        comment=str(comment))
예제 #40
0
class Leaper(ReviewBot.ReviewBot):

    def __init__(self, *args, **kwargs):
        ReviewBot.ReviewBot.__init__(self, *args, **kwargs)

        self.do_comments = True
        self.commentapi = CommentAPI(self.apiurl)

        self.maintbot = MaintenanceChecker(*args, **kwargs)
        # for FactorySourceChecker
        self.factory = FactorySourceChecker(*args, **kwargs)

        self.needs_reviewteam = False
        self.pending_factory_submission = False
        self.source_in_factory = None
        self.needs_release_manager = False
        self.release_manager_group = 'leap-reviewers'
        self.must_approve_version_updates = False
        self.must_approve_maintenance_updates = False

        self.comment_marker_re = re.compile(r'<!-- leaper state=(?P<state>done|seen)(?: result=(?P<result>accepted|declined))? -->')

        self.comment_log = None
        self.commentlogger = LogToString(self, 'comment_log')
        self.logger.addFilter(self.commentlogger)

    def prepare_review(self):

        # update lookup information on every run
        self.factory.parse_lookup('openSUSE:Leap:42.2')
        self.factory.parse_lookup('openSUSE:Leap:42.2:NonFree')
        self.lookup_422 = self.factory.lookup.copy()
        self.factory.lookup = {}
        self.factory.parse_lookup('openSUSE:Leap:42.1:Update')
        self.lookup_421 = self.factory.lookup.copy()
        self.factory.lookup = {}

    def check_source_submission(self, src_project, src_package, src_rev, target_project, target_package):
        self.logger.info("%s/%s@%s -> %s/%s"%(src_project, src_package, src_rev, target_project, target_package))
        src_srcinfo = self.get_sourceinfo(src_project, src_package, src_rev)
        package = target_package

        if src_srcinfo is None:
            # source package does not exist?
            # handle here to avoid crashing on the next line
            self.logger.warn("Could not get source info for %s/%s@%s" % (src_project, src_package, src_rev))
            return False

        origin = None
        if package in self.lookup_422:
            origin = self.lookup_422[package]

        is_fine_if_factory = False
        not_in_factory_okish = False
        if origin:
            self.logger.info("expected origin is '%s'", origin)
            if origin.startswith('Devel;'):
                (dummy, origin, dummy) = origin.split(';')
                if origin != src_project:
                    self.logger.debug("not submitted from devel project")
                    return False
                is_fine_if_factory = True
                not_in_factory_okish = True
                if self.must_approve_version_updates:
                    self.needs_release_manager = True
                # fall through to check history and requests
            elif origin.startswith('openSUSE:Factory'):
                if self.must_approve_version_updates:
                    self.needs_release_manager = True
                if origin == src_project:
                    self.source_in_factory = True
                    return True
                is_fine_if_factory = True
                # fall through to check history and requests
            elif origin == 'FORK':
                is_fine_if_factory = True
                not_in_factory_okish = True
                self.needs_release_manager = True
                # fall through to check history and requests
            elif origin.startswith('openSUSE:Leap:42.1'):
                if self.must_approve_maintenance_updates:
                    self.needs_release_manager = True
                # submitted from :Update
                if src_project.startswith(origin):
                    self.logger.debug("submission from 42.1 ok")
                    return True
                # submitted from elsewhere but is in :Update
                else:
                    good = self.factory._check_project('openSUSE:Leap:42.1:Update', target_package, src_srcinfo.verifymd5)
                    if good:
                        self.logger.info("submission found in 42.1")
                        return good
                    # check release requests too
                    good = self.factory._check_requests('openSUSE:Leap:42.1:Update', target_package, src_srcinfo.verifymd5)
                    if good or good == None:
                        self.logger.debug("found request")
                        return good
                # let's see where it came from before
                if package in self.lookup_421:
                    oldorigin = self.lookup_421[package]
                    self.logger.debug("oldorigin {}".format(oldorigin))
                    # Factory. So it's ok to keep upgrading it to Factory
                    # TODO: whitelist packages where this is ok and block others?
                    if oldorigin.startswith('openSUSE:Factory'):
                        self.logger.info("Package was from Factory in 42.1")
                        # check if an attempt to switch to SLE package is made
                        good = self.factory._check_project('SUSE:SLE-12-SP2:GA', target_package, src_srcinfo.verifymd5)
                        if good:
                            self.logger.info("request sources come from SLE")
                            self.needs_release_manager = True
                            return good
                # the release manager needs to review attempts to upgrade to Factory
                is_fine_if_factory = True
                self.needs_release_manager = True

            elif origin.startswith('SUSE:SLE-12'):
                if self.must_approve_maintenance_updates:
                    self.needs_release_manager = True
                # submitted from :Update
                if origin == src_project:
                    self.logger.debug("submission origin ok")
                    return True
                elif origin.endswith(':GA') \
                    and src_project == origin[:-2]+'Update':
                    self.logger.debug("sle update submission")
                    return True
                # submitted from higher SP
                if origin.startswith('SUSE:SLE-12:'):
                    if src_project.startswith('SUSE:SLE-12-SP1:') \
                        or src_project.startswith('SUSE:SLE-12-SP2:'):
                            self.logger.info("submission from service pack ok")
                            return True
                elif origin.startswith('SUSE:SLE-12-SP1:'):
                    if src_project.startswith('SUSE:SLE-12-SP2:'):
                        self.logger.info("submission from service pack ok")
                        return True

                self.needs_release_manager = True
                good = self._check_project_and_request('openSUSE:Leap:42.2:SLE-workarounds', target_package, src_srcinfo)
                if good or good == None:
                    self.logger.info("found sources in SLE-workarounds")
                    return good
                # the release manager needs to review attempts to upgrade to Factory
                is_fine_if_factory = True
            else:
                self.logger.error("unhandled origin %s", origin)
                return False
        else: # no origin
            # submission from SLE is ok
            if src_project.startswith('SUSE:SLE-12'):
                return True

            is_fine_if_factory = True
            self.needs_release_manager = True

        # we came here because none of the above checks find it good, so
        # let's see if the package is in Factory at least
        is_in_factory = self._check_factory(target_package, src_srcinfo)
        if is_in_factory:
            self.source_in_factory = True
            self.needs_reviewteam = False
        elif is_in_factory is None:
            self.pending_factory_submission = True
            self.needs_reviewteam = False
        else:
            if src_project.startswith('SUSE:SLE-12') \
                or src_project.startswith('openSUSE:Leap:42.'):
                self.needs_reviewteam = False
            else:
                self.needs_reviewteam = True
            self.source_in_factory = False

        if is_fine_if_factory:
            if self.source_in_factory:
                return True
            elif self.pending_factory_submission:
                return None
            elif not_in_factory_okish:
                self.needs_reviewteam = True
                return True

        return False

    def _check_factory(self, target_package, src_srcinfo):
            good = self.factory._check_project('openSUSE:Factory', target_package, src_srcinfo.verifymd5)
            if good:
                return good
            good = self.factory._check_requests('openSUSE:Factory', target_package, src_srcinfo.verifymd5)
            if good or good == None:
                self.logger.debug("found request to Factory")
                return good
            good = self.factory._check_project('openSUSE:Factory:NonFree', target_package, src_srcinfo.verifymd5)
            if good:
                return good
            good = self.factory._check_requests('openSUSE:Factory:NonFree', target_package, src_srcinfo.verifymd5)
            if good or good == None:
                self.logger.debug("found request to Factory:NonFree")
                return good
            return False

    def _check_project_and_request(self, project, target_package, src_srcinfo):
        good = self.factory._check_project(project, target_package, src_srcinfo.verifymd5)
        if good:
            return good
        good = self.factory._check_requests(project, target_package, src_srcinfo.verifymd5)
        if good or good == None:
            return good
        return False

    def check_one_request(self, req):
        self.review_messages = self.DEFAULT_REVIEW_MESSAGES.copy()
        self.needs_reviewteam = False
        self.needs_release_manager = False
        self.pending_factory_submission = False
        self.source_in_factory = None
        self.comment_log = []

        if len(req.actions) != 1:
            msg = "only one action per request please"
            self.review_messages['declined'] = msg
            return False

        request_ok = ReviewBot.ReviewBot.check_one_request(self, req)
        has_correct_maintainer = self.maintbot.check_one_request(req)

        self.logger.debug("review result: %s", request_ok)
        self.logger.debug("has_correct_maintainer: %s", has_correct_maintainer)
        if self.pending_factory_submission:
            self.logger.info("submission is waiting for a Factory request to complete")
        elif self.source_in_factory:
            self.logger.info("the submitted sources are in or accepted for Factory")
        elif self.source_in_factory == False:
            self.logger.info("the submitted sources are NOT in Factory")

        if request_ok == False:
            self.logger.info("NOTE: if you think the automated review was wrong here, please talk to the release team before reopening the request")
        elif self.needs_release_manager:
            self.logger.info("request needs review by release management")

        if self.comment_log:
            result = None
            if request_ok is None:
                state = 'seen'
            elif request_ok:
                state = 'done'
                result = 'accepted'
            else:
                state = 'done'
                result = 'declined'
            self.add_comment(req, '\n\n'.join(self.comment_log), state)
        self.comment_log = None

        if self.needs_release_manager:
            add_review = True
            for r in req.reviews:
                if r.by_group == self.release_manager_group and (r.state == 'new' or r.state == 'accepted'):
                    add_review = False
                    self.logger.debug("%s already is a reviewer", self.release_manager_group)
                    break
            if add_review:
                if self.add_review(req, by_group = self.release_manager_group) != True:
                    self.review_messages['declined'] += '\nadding %s failed' % self.release_manager_group
                    return False

        if self.needs_reviewteam:
            add_review = True
            self.logger.info("%s needs review by opensuse-review-team"%req.reqid)
            for r in req.reviews:
                if r.by_group == 'opensuse-review-team':
                    add_review = False
                    self.logger.debug("opensuse-review-team already is a reviewer")
                    break
            if add_review:
                if self.add_review(req, by_group = "opensuse-review-team") != True:
                    self.review_messages['declined'] += '\nadding opensuse-review-team failed'
                    return False

        return request_ok

    def check_action__default(self, req, a):
        # decline all other requests for fallback reviewer
        self.logger.debug("auto decline request type %s"%a.type)
        return False

    # TODO: make generic, move to Reviewbot. Used by multiple bots
    def add_comment(self, req, msg, state, result=None):
        if not self.do_comments:
            return

        comment = "<!-- leaper state=%s%s -->\n" % (state, ' result=%s' % result if result else '')
        comment += "\n" + msg

        (comment_id, comment_state, comment_result, comment_text) = self.find_obs_request_comment(req, state)

        if comment_id is not None and state == comment_state:
            # count number of lines as aproximation to avoid spamming requests
            # for slight wording changes in the code
            if len(comment_text.split('\n')) == len(comment.split('\n')):
                self.logger.debug("not worth the update, previous comment %s is state %s", comment_id, comment_state)
                return

        self.logger.debug("adding comment to %s, state %s result %s", req.reqid, state, result)
        self.logger.debug("message: %s", msg)
        if not self.dryrun:
            if comment_id is not None:
                self.commentapi.delete(comment_id)
            self.commentapi.add_comment(request_id=req.reqid, comment=str(comment))

    def find_obs_request_comment(self, req, state=None):
        """Return previous comments (should be one)."""
        if self.do_comments:
            comments = self.commentapi.get_comments(request_id=req.reqid)
            for c in comments.values():
                m = self.comment_marker_re.match(c['comment'])
                if m and (state is None or state == m.group('state')):
                    return c['id'], m.group('state'), m.group('result'), c['comment']
        return None, None, None, None

    def check_action__default(self, req, a):
        self.logger.info("unhandled request type %s"%a.type)
        self.needs_release_manager = True
        return True