def setUp(self):
     self.api = CommentAPI('bogus')
     self.bot = type(self).__name__
     self.comments = {
         1: {'comment': '<!-- {} -->\n\nshort comment'.format(self.bot)},
         2: {'comment': '<!-- {} foo=bar distro=openSUSE -->\n\nshort comment'.format(self.bot)}
     }
예제 #2
0
    def test_select_comments(self):
        c_api = CommentAPI(self.api.apiurl)
        staging_b = 'openSUSE:Factory:Staging:B'
        comments = c_api.get_comments(project_name=staging_b)

        # First select
        self.assertEqual(True, SelectCommand(self.api, staging_b).perform(['gcc', 'wine']))
        first_select_comments = c_api.get_comments(project_name=staging_b)
        last_id = sorted(first_select_comments.keys())[-1]
        first_select_comment = first_select_comments[last_id]
        # Only one comment is added
        self.assertEqual(len(first_select_comments), len(comments) + 1)
        # With the right content
        self.assertTrue('request#123 for package gcc submitted by Admin' in first_select_comment['comment'])

        # Second select
        self.assertEqual(True, SelectCommand(self.api, staging_b).perform(['puppet']))
        second_select_comments = c_api.get_comments(project_name=staging_b)
        last_id = sorted(second_select_comments.keys())[-1]
        second_select_comment = second_select_comments[last_id]
        # The number of comments increased by one
        self.assertEqual(len(second_select_comments) - 1, len(first_select_comments))
        self.assertNotEqual(second_select_comment['comment'], first_select_comment['comment'])
        # The new comments contains new, but not old
        self.assertFalse('request#123 for package gcc submitted by Admin' in second_select_comment['comment'])
        self.assertTrue('added request#321 for package puppet submitted by Admin' in second_select_comment['comment'])
예제 #3
0
    def test_select_comments(self):
        c_api = CommentAPI(self.api.apiurl)
        staging_b = 'openSUSE:Factory:Staging:B'
        comments = c_api.get_comments(project_name=staging_b)

        # First select
        self.assertEqual(True, SelectCommand(self.api).perform(staging_b, ['gcc', 'wine']))
        first_select_comments = c_api.get_comments(project_name=staging_b)
        last_id = sorted(first_select_comments.keys())[-1]
        first_select_comment = first_select_comments[last_id]
        # Only one comment is added
        self.assertEqual(len(first_select_comments), len(comments) + 1)
        # With the right content
        self.assertTrue('Request#123 for package gcc submitted by @Admin' in first_select_comment['comment'])

        # Second select
        self.assertEqual(True, SelectCommand(self.api).perform(staging_b, ['puppet']))
        second_select_comments = c_api.get_comments(project_name=staging_b)
        last_id = sorted(second_select_comments.keys())[-1]
        second_select_comment = second_select_comments[last_id]
        # The number of comments remains, but they are different
        self.assertEqual(len(second_select_comments), len(first_select_comments))
        self.assertNotEqual(second_select_comment['comment'], first_select_comment['comment'])
        # The new comments contents both old and new information
        self.assertTrue('Request#123 for package gcc submitted by @Admin' in second_select_comment['comment'])
        self.assertTrue('Request#321 for package puppet submitted by @Admin' in second_select_comment['comment'])
예제 #4
0
class IgnoreCommand(object):
    MESSAGE = 'Ignored: removed from active backlog.'

    def __init__(self, api):
        self.api = api
        self.comment = CommentAPI(self.api.apiurl)

    def perform(self, requests, message=None):
        """
        Ignore a request from "list" and "adi" commands until unignored.
        """

        requests_ignored = self.api.get_ignored_requests()
        length = len(requests_ignored)

        for request_id in RequestFinder.find_sr(requests, self.api):
            if request_id in requests_ignored:
                print('{}: already ignored'.format(request_id))
                continue

            print('{}: ignored'.format(request_id))
            requests_ignored[request_id] = message
            comment = message if message else self.MESSAGE
            self.comment.add_comment(request_id=str(request_id),
                                     comment=comment)

        diff = len(requests_ignored) - length
        if diff > 0:
            self.api.set_ignored_requests(requests_ignored)
            print('Ignored {} requests'.format(diff))
        else:
            print('No new requests to ignore')

        return True
예제 #5
0
    def __init__(self,
                 apiurl=None,
                 dryrun=False,
                 logger=None,
                 user=None,
                 group=None):
        self.apiurl = apiurl
        self.ibs = apiurl.startswith('https://api.suse.de')
        self.dryrun = dryrun
        self.logger = logger
        self.review_user = user
        self.review_group = group
        self.requests = []
        self.review_messages = ReviewBot.DEFAULT_REVIEW_MESSAGES
        self._review_mode = 'normal'
        self.fallback_user = None
        self.fallback_group = None
        self.comment_api = CommentAPI(self.apiurl)
        self.bot_name = self.__class__.__name__
        self.only_one_action = False
        self.request_default_return = None
        self.comment_handler = False
        self.override_allow = True
        self.override_group_key = '{}-override-group'.format(
            self.bot_name.lower())
        self.request_age_min_default = 0
        self.request_age_min_key = '{}-request-age-min'.format(
            self.bot_name.lower())
        self.lookup = PackageLookup(self.apiurl)

        self.load_config()
class IgnoreCommand(object):
    MESSAGE = 'Ignored: removed from active backlog.'

    def __init__(self, api):
        self.api = api
        self.comment = CommentAPI(self.api.apiurl)

    def perform(self, requests, message=None):
        """
        Ignore a request from "list" and "adi" commands until unignored.
        """

        requests_ignored = self.api.get_ignored_requests()
        length = len(requests_ignored)

        for request_id in RequestFinder.find_sr(requests, self.api):
            if request_id in requests_ignored:
                print('{}: already ignored'.format(request_id))
                continue

            print('{}: ignored'.format(request_id))
            requests_ignored[request_id] = message
            comment = message if message else self.MESSAGE
            self.comment.add_comment(request_id=str(request_id), comment=comment)

        diff = len(requests_ignored) - length
        if diff > 0:
            self.api.set_ignored_requests(requests_ignored)
            print('Ignored {} requests'.format(diff))
        else:
            print('No new requests to ignore')

        return True
예제 #7
0
    def test_select_comments(self):
        c_api = CommentAPI(self.api.apiurl)
        staging_b = 'openSUSE:Factory:Staging:B'
        comments = c_api.get_comments(project_name=staging_b)

        # First select
        self.assertEqual(
            True,
            SelectCommand(self.api, staging_b).perform(['gcc', 'wine']))
        first_select_comments = c_api.get_comments(project_name=staging_b)
        last_id = sorted(first_select_comments.keys())[-1]
        first_select_comment = first_select_comments[last_id]
        # Only one comment is added
        self.assertEqual(len(first_select_comments), len(comments) + 1)
        # With the right content
        self.assertTrue('Request#123 for package gcc submitted by @Admin' in
                        first_select_comment['comment'])

        # Second select
        self.assertEqual(
            True,
            SelectCommand(self.api, staging_b).perform(['puppet']))
        second_select_comments = c_api.get_comments(project_name=staging_b)
        last_id = sorted(second_select_comments.keys())[-1]
        second_select_comment = second_select_comments[last_id]
        # The number of comments remains, but they are different
        self.assertEqual(len(second_select_comments),
                         len(first_select_comments))
        self.assertNotEqual(second_select_comment['comment'],
                            first_select_comment['comment'])
        # The new comments contents both old and new information
        self.assertTrue('Request#123 for package gcc submitted by @Admin' in
                        second_select_comment['comment'])
        self.assertTrue('Request#321 for package puppet submitted by @Admin' in
                        second_select_comment['comment'])
예제 #8
0
    def test_select_comments(self):
        self.wf.setup_rings()

        staging_b = self.wf.create_staging('B', freeze=True)

        c_api = CommentAPI(self.wf.api.apiurl)
        comments = c_api.get_comments(project_name=staging_b.name)

        r1 = self.wf.create_submit_request('devel:wine', 'wine')
        r2 = self.wf.create_submit_request('devel:gcc', 'gcc')

        # First select
        self.assertEqual(True, SelectCommand(self.wf.api, staging_b.name).perform(['gcc', 'wine']))
        first_select_comments = c_api.get_comments(project_name=staging_b.name)
        last_id = sorted(first_select_comments.keys())[-1]
        first_select_comment = first_select_comments[last_id]
        # Only one comment is added
        self.assertEqual(len(first_select_comments), len(comments) + 1)
        # With the right content
        expected = 'request#{} for package gcc submitted by Admin'.format(r2.reqid)
        self.assertTrue(expected in first_select_comment['comment'])

        # Second select
        r3 = self.wf.create_submit_request('devel:gcc', 'gcc8')
        self.assertEqual(True, SelectCommand(self.wf.api, staging_b.name).perform(['gcc8']))
        second_select_comments = c_api.get_comments(project_name=staging_b.name)
        last_id = sorted(second_select_comments.keys())[-1]
        second_select_comment = second_select_comments[last_id]
        # The number of comments increased by one
        self.assertEqual(len(second_select_comments) - 1, len(first_select_comments))
        self.assertNotEqual(second_select_comment['comment'], first_select_comment['comment'])
        # The new comments contains new, but not old
        self.assertFalse('request#{} for package gcz submitted by Admin'.format(r2.reqid) in second_select_comment['comment'])
        self.assertTrue('added request#{} for package gcc8 submitted by Admin'.format(r3.reqid) in second_select_comment['comment'])
    def __init__(self, *args, **kwargs):
        ReviewBot.ReviewBot.__init__(self, *args, **kwargs)

        self.force = False
        self.openqa = None
        self.commentapi = CommentAPI(self.apiurl)
        self.update_test_builds = dict()
        self.openqa_jobs = dict()
예제 #10
0
 def __init__(self, *args, **kwargs):
     super(OpenQABot, self).__init__(*args, **kwargs)
     self.tgt_repo = {}
     self.project_settings = {}
     self.api_map = {}
     self.bot_name = 'openqa'
     self.force = False
     self.openqa = None
     self.commentapi = CommentAPI(self.apiurl)
    def setUp(self):
        super(TestReviewBotComment, self).setUp()
        self.api = CommentAPI(self.apiurl)

        # Ensure different test runs operate in unique namespace.
        self.bot = '::'.join([type(self).__name__, str(random.getrandbits(8))])
        self.review_bot = ReviewBot(self.apiurl, logger=logging.getLogger(self.bot))
        self.review_bot.bot_name = self.bot

        self.osc_user('factory-auto')
예제 #12
0
def remind_comment(apiurl, repeat_age, request_id, project, package=None):
    comment_api = CommentAPI(apiurl)
    comments = comment_api.get_comments(request_id=request_id)
    comment, _ = comment_api.comment_find(comments, BOT_NAME)

    if comment:
        delta = datetime.utcnow() - comment['when']
        if delta.days < repeat_age:
            print(
                '  skipping due to previous reminder from {} days ago'.format(
                    delta.days))
            return

        # Repeat notification so remove old comment.
        try:
            comment_api.delete(comment['id'])
        except HTTPError as e:
            if e.code == 403:
                # Gracefully skip when previous reminder was by another user.
                print('  unable to remove previous reminder')
                return
            raise e

    userids = sorted(maintainers_get(apiurl, project, package))
    if len(userids):
        users = ['@' + userid for userid in userids]
        message = '{}: {}'.format(', '.join(users), REMINDER)
    else:
        message = REMINDER
    print('  ' + message)
    message = comment_api.add_marker(message, BOT_NAME)
    comment_api.add_comment(request_id=request_id, comment=message)
예제 #13
0
def check_comment(apiurl, bot, **kwargs):
    if not len(kwargs):
        return False

    api = CommentAPI(apiurl)
    comments = api.get_comments(**kwargs)
    comment = api.comment_find(comments, bot)[0]
    if comment:
        return (datetime.utcnow() - comment['when']).total_seconds()

    return False
예제 #14
0
def check_comment(apiurl, bot, **kwargs):
    if not len(kwargs):
        return False

    api = CommentAPI(apiurl)
    comments = api.get_comments(**kwargs)
    comment = api.comment_find(comments, bot)[0]
    if comment:
        return (datetime.utcnow() - comment['when']).total_seconds()

    return False
예제 #15
0
    def __init__(self, *args, **kwargs):
        super(OpenQABot, self).__init__(*args, **kwargs)
        self.tgt_repo = {}
        self.project_settings = {}
        self.api_map = {}

        self.force = False
        self.openqa = None
        self.commentapi = CommentAPI(self.apiurl)
        self.update_test_builds = {}
        self.pending_target_repos = set()
        self.openqa_jobs = {}
예제 #16
0
 def setUp(self):
     super(TestCommentOBS, self).setUp()
     self.wf = OBSLocal.FactoryWorkflow()
     self.wf.create_user('factory-auto')
     self.wf.create_user('repo-checker')
     self.wf.create_user('staging-bot')
     self.wf.create_group('factory-staging', ['staging-bot'])
     self.wf.create_project(PROJECT,
                            maintainer={'groups': ['factory-staging']})
     self.api = CommentAPI(self.apiurl)
     # Ensure different test runs operate in unique namespace.
     self.bot = '::'.join([type(self).__name__, str(random.getrandbits(8))])
예제 #17
0
    def setup_wf(self):
        wf = OBSLocal.StagingWorkflow()
        wf.setup_rings()

        self.c_api = CommentAPI(wf.api.apiurl)

        staging_b = wf.create_staging('B', freeze=True)
        self.prj = staging_b.name

        self.winerq = wf.create_submit_request('devel:wine', 'wine', text='Hallo World')
        self.assertEqual(True, SelectCommand(wf.api, self.prj).perform(['wine']))
        self.comments = self.c_api.get_comments(project_name=self.prj)
        return wf
예제 #18
0
def remind_comment(apiurl, repeat_age, request_id, project, package=None):
    comment_api = CommentAPI(apiurl)
    comments = comment_api.get_comments(request_id=request_id)
    comment, _ = comment_api.comment_find(comments, BOT_NAME)

    if comment:
        delta = datetime.utcnow() - comment['when']
        if delta.days < repeat_age:
            print('  skipping due to previous reminder from {} days ago'.format(delta.days))
            return

        # Repeat notification so remove old comment.
        try:
            comment_api.delete(comment['id'])
        except HTTPError as e:
            if e.code == 403:
                # Gracefully skip when previous reminder was by another user.
                print('  unable to remove previous reminder')
                return
            raise e

    userids = sorted(maintainers_get(apiurl, project, package))
    if len(userids):
        users = ['@' + userid for userid in userids]
        message = '{}: {}'.format(', '.join(users), REMINDER)
    else:
        message = REMINDER
    print('  ' + message)
    message = comment_api.add_marker(message, BOT_NAME)
    comment_api.add_comment(request_id=request_id, comment=message)
    def setUp(self):
        super(TestReviewBotComment, self).setUp()
        self.api = CommentAPI(self.apiurl)
        self.wf = OBSLocal.StagingWorkflow()
        self.wf.create_user('factory-auto')
        self.project = self.wf.create_project(PROJECT)

        # Ensure different test runs operate in unique namespace.
        self.bot = '::'.join([type(self).__name__, str(random.getrandbits(8))])
        self.review_bot = ReviewBot(self.apiurl,
                                    logger=logging.getLogger(self.bot))
        self.review_bot.bot_name = self.bot

        self.osc_user('factory-auto')
예제 #20
0
    def setup_wf(self, description=''):
        wf = OBSLocal.FactoryWorkflow()
        wf.setup_rings()

        self.c_api = CommentAPI(wf.api.apiurl)

        staging_b = wf.create_staging('B', freeze=True)
        self.prj = staging_b.name

        self.winerq = wf.create_submit_request('devel:wine', 'wine', text='Hallo World', description=description)
        self.assertEqual(True, SelectCommand(wf.api, self.prj).perform(['wine']))
        self.comments = self.c_api.get_comments(project_name=self.prj)
        wf.create_attribute_type('OSRT', 'ProductVersion', 1)
        return wf
    def __init__(self, api, config):
        self.api = api
        self.config = conf.config[api.project]
        self.logger = logging.getLogger('InstallChecker')
        self.commentapi = CommentAPI(api.apiurl)

        self.arch_whitelist = self.config.get('repo_checker-arch-whitelist')
        if self.arch_whitelist:
            self.arch_whitelist = set(self.arch_whitelist.split(' '))

        self.ring_whitelist = set(self.config.get('repo_checker-binary-whitelist-ring', '').split(' '))

        self.cycle_packages = self.config.get('repo_checker-allowed-in-cycles')
        self.calculate_allowed_cycles()

        self.existing_problems = self.binary_list_existing_problem(api.project, api.cmain_repo)
예제 #22
0
    def __init__(self, *args, **kwargs):
        self.no_review = False
        self.force = False
        if 'no_review' in kwargs:
            if kwargs['no_review'] == True:
                self.no_review = True
            del kwargs['no_review']
        if 'force' in kwargs:
            if kwargs['force'] == True:
                self.force = True
            del kwargs['force']

        ReviewBot.ReviewBot.__init__(self, *args, **kwargs)

        self.ts = rpm.TransactionSet()
        self.ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)

        self.pkgcache = PkgCache(BINCACHE)

        # reports of source submission
        self.reports = []
        # textual report summary for use in accept/decline message
        # or comments
        self.text_summary = ''

        self.session = DB.db_session()

        self.dblogger = LogToDB(self.session)

        self.logger.addFilter(self.dblogger)

        self.commentapi = CommentAPI(self.apiurl)
 def setUp(self):
     self.api = CommentAPI('bogus')
     self.bot = type(self).__name__
     self.comments = {
         1: {'comment': '<!-- {} -->\n\nshort comment'.format(self.bot)},
         2: {'comment': '<!-- {} foo=bar distro=openSUSE -->\n\nshort comment'.format(self.bot)}
     }
예제 #24
0
 def __init__(self, *args, **kwargs):
     super(OpenQABot, self).__init__(*args, **kwargs)
     self.tgt_repo = {}
     self.project_settings = {}
     self.api_map = {}
     self.bot_name = 'openqa'
     self.force = False
     self.openqa = None
     self.commentapi = CommentAPI(self.apiurl)
예제 #25
0
class UnignoreCommand(object):
    MESSAGE = 'Unignored: returned to active backlog.'

    def __init__(self, api):
        self.api = api
        self.comment = CommentAPI(self.api.apiurl)

    def perform(self, requests, cleanup=False):
        """
        Unignore a request by removing from ignore list.
        """

        requests_ignored = self.api.get_ignored_requests()
        length = len(requests_ignored)

        if len(requests) == 1 and requests[0] == 'all':
            requests_ignored = {}
        else:
            for request_id in RequestFinder.find_sr(requests, self.api):
                if request_id in requests_ignored:
                    print('{}: unignored'.format(request_id))
                    del requests_ignored[request_id]
                    self.comment.add_comment(request_id=str(request_id), comment=self.MESSAGE)

        if cleanup:
            now = datetime.now()
            for request_id in set(requests_ignored):
                request = get_request(self.api.apiurl, str(request_id))
                if request.state.name not in ('new', 'review'):
                    changed = dateutil.parser.parse(request.state.when)
                    diff = now - changed
                    if diff.days > 3:
                        print('Removing {} which was {} {} days ago'
                              .format(request_id, request.state.name, diff.days))
                        del requests_ignored[request_id]

        diff = length - len(requests_ignored)
        if diff > 0:
            self.api.set_ignored_requests(requests_ignored)
            print('Unignored {} requests'.format(diff))
        else:
            print('No requests to unignore')

        return True
예제 #26
0
    def setUp(self):
        super(TestReviewBotComment, self).setUp()
        self.api = CommentAPI(self.apiurl)

        # Ensure different test runs operate in unique namespace.
        self.bot = '::'.join([type(self).__name__, str(random.getrandbits(8))])
        self.review_bot = ReviewBot(self.apiurl, logger=logging.getLogger(self.bot))
        self.review_bot.bot_name = self.bot

        self.osc_user('factory-auto')
예제 #27
0
class IgnoreCommand(object):
    MESSAGE = 'Ignored: removed from active backlog.'

    def __init__(self, api):
        self.api = api
        self.comment = CommentAPI(self.api.apiurl)

    def perform(self, requests, message=None):
        """
        Ignore a request from "list" and "adi" commands until unignored.
        """

        for request_id in RequestFinder.find_sr(requests, self.api):
            print('{}: ignored'.format(request_id))
            comment = message if message else self.MESSAGE
            self.api.add_ignored_request(request_id, comment)
            self.comment.add_comment(request_id=str(request_id), comment=comment)

        return True
예제 #28
0
    def test_accept_comments(self):
        c_api = CommentAPI(self.api.apiurl)
        staging_c = 'openSUSE:Factory:Staging:C'
        comments = c_api.get_comments(project_name=staging_c)

        # Accept staging C (containing apparmor and mariadb)
        self.assertEqual(True, AcceptCommand(self.api).perform(staging_c))

        # Comments are cleared up
        accepted_comments = c_api.get_comments(project_name=staging_c)
        self.assertNotEqual(len(comments), 0)
        self.assertEqual(len(accepted_comments), 0)

        # But the comment was written at some point
        self.assertEqual(len(self.obs.comment_bodies), 1)
        comment = self.obs.comment_bodies[0]
        self.assertTrue('The following packages have been submitted to openSUSE:Factory' in comment)
        self.assertTrue('apparmor' in comment)
        self.assertTrue('mariadb' in comment)
    def create_comments(self, state):
        comments = dict()
        for source, details in state['check'].items():
            rebuild = dateutil.parser.parse(details["rebuild"])
            if datetime.now() - rebuild < timedelta(days=2):
                self.logger.debug(f"Ignore {source} - problem too recent")
                continue
            _, _, arch, rpm = source.split('/')
            rpm = rpm.split(':')[0]
            comments.setdefault(rpm, {})
            comments[rpm][arch] = details['problem']

        url = makeurl(self.apiurl, ['comments', 'user'])
        root = ET.parse(http_GET(url)).getroot()
        for comment in root.findall('.//comment'):
            if comment.get('project') != self.project:
                continue
            if comment.get('package') in comments:
                continue
            self.logger.info("Removing comment for package {}".format(
                comment.get('package')))
            url = makeurl(self.apiurl, ['comment', comment.get('id')])
            http_DELETE(url)

        commentapi = CommentAPI(self.apiurl)
        MARKER = 'Installcheck'

        for package in comments:
            newcomment = ''
            for arch in sorted(comments[package]):
                newcomment += f"\n\n**Installcheck problems for {arch}**\n\n"
                for problem in sorted(comments[package][arch]):
                    newcomment += "+ " + problem + "\n"

            newcomment = commentapi.add_marker(newcomment.strip(), MARKER)
            oldcomments = commentapi.get_comments(project_name=self.project,
                                                  package_name=package)
            oldcomment, _ = commentapi.comment_find(oldcomments, MARKER)
            if oldcomment and oldcomment['comment'] == newcomment:
                continue

            if oldcomment:
                commentapi.delete(oldcomment['id'])
            self.logger.debug("Adding comment to {}/{}".format(
                self.project, package))
            commentapi.add_comment(project_name=self.project,
                                   package_name=package,
                                   comment=newcomment)
예제 #30
0
 def setUp(self):
     super(TestCommentOBS, self).setUp()
     self.wf = OBSLocal.StagingWorkflow()
     self.wf.create_user('factory-auto')
     self.wf.create_user('repo-checker')
     self.wf.create_user('staging-bot')
     self.wf.create_group('factory-staging', ['staging-bot'])
     self.wf.create_project(PROJECT, maintainer={'groups': ['factory-staging']})
     self.api = CommentAPI(self.apiurl)
     # Ensure different test runs operate in unique namespace.
     self.bot = '::'.join([type(self).__name__, str(random.getrandbits(8))])
예제 #31
0
class TestAccept(unittest.TestCase):

    def setup_vcr(self):
        wf = OBSLocal.StagingWorkflow()
        wf.setup_rings()

        self.c_api = CommentAPI(wf.api.apiurl)

        staging_b = wf.create_staging('B', freeze=True)
        self.prj = staging_b.name

        self.winerq = wf.create_submit_request('devel:wine', 'wine', text='Hallo World')
        self.assertEqual(True, SelectCommand(wf.api, self.prj).perform(['wine']))
        self.comments = self.c_api.get_comments(project_name=self.prj)
        self.assertGreater(len(self.comments), 0)
        return wf

    def test_accept_comments(self):
        wf = self.setup_vcr()

        self.assertEqual(True, AcceptCommand(wf.api).perform(self.prj))

        # Comments are cleared up
        accepted_comments = self.c_api.get_comments(project_name=self.prj)
        self.assertEqual(len(accepted_comments), 0)

    def test_accept_final_comment(self):
        wf = self.setup_vcr()

        # snipe out cleanup to see the comments before the final countdown
        wf.api.staging_deactivate = MagicMock(return_value=True)

        self.assertEqual(True, AcceptCommand(wf.api).perform(self.prj))

        comments = self.c_api.get_comments(project_name=self.prj)
        self.assertGreater(len(comments), len(self.comments))

        # check which id was added
        new_id = (set(comments.keys()) - set(self.comments.keys())).pop()
        comment = comments[new_id]['comment']
        self.assertEqual('Project "{}" accepted. The following packages have been submitted to openSUSE:Factory: wine.'.format(self.prj), comment)
예제 #32
0
    def __init__(self, *args, **kwargs):
        ReviewBot.ReviewBot.__init__(self, *args, **kwargs)

        self.do_comments = True
        self.legaldb = None
        self.commentapi = CommentAPI(self.apiurl)
        self.apinick = None
        self.message = None
        if self.ibs:
            self.apinick = 'ibs#'
        else:
            self.apinick = 'obs#'
예제 #33
0
    def test_accept_comments(self):
        c_api = CommentAPI(self.api.apiurl)
        staging_c = 'openSUSE:Factory:Staging:C'
        comments = c_api.get_comments(project_name=staging_c)

        # Accept staging C (containing apparmor and mariadb)
        self.assertEqual(True, AcceptCommand(self.api).perform(staging_c))

        # Comments are cleared up
        accepted_comments = c_api.get_comments(project_name=staging_c)
        self.assertNotEqual(len(comments), 0)
        self.assertEqual(len(accepted_comments), 0)

        # But the comment was written at some point
        self.assertEqual(len(self.obs.comment_bodies), 1)
        comment = self.obs.comment_bodies[0]
        self.assertTrue(
            'The following packages have been submitted to openSUSE:Factory' in
            comment)
        self.assertTrue('apparmor' in comment)
        self.assertTrue('mariadb' in comment)
예제 #34
0
class IgnoreCommand(object):
    def __init__(self, api):
        self.api = api
        self.comment = CommentAPI(self.api.apiurl)

    def perform(self, request_ids, message=None):
        """
        Ignore a request from "list" and "adi" commands until unignored.
        """

        requests_ignored = self.api.get_ignored_requests()
        length = len(requests_ignored)

        for request_id in request_ids:
            print('Processing {}'.format(request_id))
            check = self.check_and_comment(request_id, message)
            if check is not True:
                print('- {}'.format(check))
            elif request_id not in requests_ignored:
                requests_ignored[int(request_id)] = message

        diff = len(requests_ignored) - length
        if diff > 0:
            print('Ignoring {} requests'.format(diff))
            self.api.set_ignored_requests(requests_ignored)
        else:
            print('No new requests to ignore')

        return True

    def check_and_comment(self, request_id, message=None):
        request = get_request(self.api.apiurl, request_id)
        if not request:
            return 'not found'
        if request.actions[0].tgt_project != self.api.project:
            return 'not targeting {}'.format(self.api.project)
        if message:
            self.comment.add_comment(request_id=request_id, comment=message)

        return True
예제 #35
0
    def __init__(self, *args, **kwargs):
        ReviewBot.ReviewBot.__init__(self, *args, **kwargs)

        self.no_review = False
        self.force = False

        self.ts = rpm.TransactionSet()
        self.ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)

        # reports of source submission
        self.reports = []
        # textual report summary for use in accept/decline message
        # or comments
        self.text_summary = ''

        self.session = DB.db_session()

        self.dblogger = LogToDB(self.session)

        self.logger.addFilter(self.dblogger)

        self.commentapi = CommentAPI(self.apiurl)
    def __init__(self, api, config):
        self.api = api
        self.logger = logging.getLogger('InstallChecker')
        self.commentapi = CommentAPI(api.apiurl)

        self.arch_whitelist = config.get('repo_checker-arch-whitelist')
        if self.arch_whitelist:
            self.arch_whitelist = set(self.arch_whitelist.split(' '))

        self.ring_whitelist = set(
            config.get('repo_checker-binary-whitelist-ring', '').split(' '))

        self.cycle_packages = config.get('repo_checker-allowed-in-cycles')
        self.calculate_allowed_cycles()

        self.ignore_duplicated = set(
            config.get('installcheck-ignore-duplicated-binaries',
                       '').split(' '))
        self.ignore_conflicts = set(
            config.get('installcheck-ignore-conflicts', '').split(' '))
        self.ignore_deletes = str2bool(
            config.get('installcheck-ignore-deletes', 'False'))
예제 #37
0
    def setUp(self):
        super(TestReviewBotComment, self).setUp()
        self.api = CommentAPI(self.apiurl)
        self.wf = OBSLocal.StagingWorkflow()
        self.wf.create_user('factory-auto')
        self.project = self.wf.create_project(PROJECT)

        # Ensure different test runs operate in unique namespace.
        self.bot = '::'.join([type(self).__name__, str(random.getrandbits(8))])
        self.review_bot = ReviewBot(self.apiurl, logger=logging.getLogger(self.bot))
        self.review_bot.bot_name = self.bot

        self.osc_user('factory-auto')
예제 #38
0
    def setup_vcr(self):
        wf = OBSLocal.StagingWorkflow()
        wf.setup_rings()

        self.c_api = CommentAPI(wf.api.apiurl)

        staging_b = wf.create_staging('B', freeze=True)
        self.prj = staging_b.name

        self.winerq = wf.create_submit_request('devel:wine', 'wine', text='Hallo World')
        self.assertEqual(True, SelectCommand(wf.api, self.prj).perform(['wine']))
        self.comments = self.c_api.get_comments(project_name=self.prj)
        self.assertGreater(len(self.comments), 0)
        return wf
예제 #39
0
    def __init__(self, *args, **kwargs):
        ReviewBot.ReviewBot.__init__(self, *args, **kwargs)

        self.do_comments = True
        self.legaldb = None
        self.legaldb_headers = {}
        self.commentapi = CommentAPI(self.apiurl)
        self.apinick = None
        self.message = None
        if self.ibs:
            self.apinick = 'ibs#'
        else:
            self.apinick = 'obs#'
        self.override_allow = False  # Handled via external tool.
예제 #40
0
    def __init__(self, *args, **kwargs):
        ReviewBot.ReviewBot.__init__(self, *args, **kwargs)

        self.do_comments = True
        self.commentapi = CommentAPI(self.apiurl)

        self.maintbot = MaintenanceChecker(*args, **kwargs)
        # for FactorySourceChecker
        self.factory = FactorySourceChecker(*args, **kwargs)

        self.needs_reviewteam = False
        self.pending_factory_submission = False
        self.source_in_factory = None
        self.needs_release_manager = False
        self.release_manager_group = 'leap-reviewers'
        self.must_approve_version_updates = False
        self.must_approve_maintenance_updates = False

        self.comment_marker_re = re.compile(r'<!-- leaper state=(?P<state>done|seen)(?: result=(?P<result>accepted|declined))? -->')

        self.comment_log = None
        self.commentlogger = LogToString(self, 'comment_log')
        self.logger.addFilter(self.commentlogger)
예제 #41
0
    def __init__(self,
                 apiurl=None,
                 dryrun=False,
                 logger=None,
                 user=None,
                 group=None):
        self.apiurl = apiurl
        self.ibs = apiurl.startswith('https://api.suse.de')
        self.dryrun = dryrun
        self.logger = logger
        self.review_user = user
        self.review_group = group
        self.requests = []
        self.review_messages = ReviewBot.DEFAULT_REVIEW_MESSAGES
        self._review_mode = 'normal'
        self.fallback_user = None
        self.fallback_group = None
        self.comment_api = CommentAPI(self.apiurl)
        self.bot_name = self.__class__.__name__
        self.only_one_action = False
        self.request_default_return = None
        self.comment_handler = False

        self.load_config()
    def __init__(self, api, config):
        self.api = api
        self.config = conf.config[api.project]
        self.logger = logging.getLogger('InstallChecker')
        self.commentapi = CommentAPI(api.apiurl)

        self.arch_whitelist = self.config.get('repo_checker-arch-whitelist')
        if self.arch_whitelist:
            self.arch_whitelist = set(self.arch_whitelist.split(' '))

        self.ring_whitelist = set(self.config.get('repo_checker-binary-whitelist-ring', '').split(' '))

        self.cycle_packages = self.config.get('repo_checker-allowed-in-cycles')
        self.calculate_allowed_cycles()

        self.existing_problems = self.binary_list_existing_problem(api.project, api.cmain_repo)
        self.ignore_duplicated = set(self.config.get('installcheck-ignore-duplicated-binaries', '').split(' '))
예제 #43
0
    def __init__(self, *args, **kwargs):
        ReviewBot.ReviewBot.__init__(self, *args, **kwargs)

        self.no_review = False
        self.force = False

        self.ts = rpm.TransactionSet()
        self.ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)

        # reports of source submission
        self.reports = []
        # textual report summary for use in accept/decline message
        # or comments
        self.text_summary = ''

        self.session = DB.db_session()

        self.dblogger = LogToDB(self.session)

        self.logger.addFilter(self.dblogger)

        self.commentapi = CommentAPI(self.apiurl)
예제 #44
0
    def __init__(self, apiurl = None, dryrun = False, logger = None, user = None, group = None):
        self.apiurl = apiurl
        self.ibs = apiurl.startswith('https://api.suse.de')
        self.dryrun = dryrun
        self.logger = logger
        self.review_user = user
        self.review_group = group
        self.requests = []
        self.review_messages = ReviewBot.DEFAULT_REVIEW_MESSAGES
        self._review_mode = 'normal'
        self.fallback_user = None
        self.fallback_group = None
        self.comment_api = CommentAPI(self.apiurl)
        self.bot_name = self.__class__.__name__
        self.only_one_action = False
        self.request_default_return = None
        self.comment_handler = False
        self.override_allow = True
        self.override_group_key = '{}-override-group'.format(self.bot_name.lower())
        self.lookup = PackageLookup(self.apiurl)

        self.load_config()
예제 #45
0
    def __init__(self, *args, **kwargs):
        self.force = False
        self.openqa = None
        self.do_comments = True
        if 'force' in kwargs:
            if kwargs['force'] is True:
                self.force = True
            del kwargs['force']
        if 'openqa' in kwargs:
            self.openqa = OpenQA_Client(server=kwargs['openqa'])
            del kwargs['openqa']
        if 'do_comments' in kwargs:
            if kwargs['do_comments'] is not None:
                self.do_comments = kwargs['do_comments']
            del kwargs['do_comments']

        ReviewBot.ReviewBot.__init__(self, *args, **kwargs)

        self.logger.debug(self.do_comments)

        self.commentapi = CommentAPI(self.apiurl)
        self.update_test_builds = dict()
예제 #46
0
    def update_status_comments(self, project, command):
        """
        Refresh the status comments, used for notification purposes, based on
        the current list of requests. To ensure that all involved users
        (and nobody else) get notified, old status comments are deleted and
        a new one is created.
        :param project: project name
        :param command: name of the command to include in the message
        """

        # TODO: we need to discuss the best way to keep track of status
        # comments. Right now they are marked with an initial markdown
        # comment. Maybe a cleaner approach would be to store something
        # like 'last_status_comment_id' in the pseudometa. But the current
        # OBS API for adding comments doesn't return the id of the created
        # comment.

        comment_api = CommentAPI(self.apiurl)

        comments = comment_api.get_comments(project_name=project)
        for comment in comments.values():
            # TODO: update the comment removing the user mentions instead of
            # deleting the whole comment. But there is currently not call in
            # OBS API to update a comment
            if comment['comment'].startswith('<!--- osc staging'):
                comment_api.delete(comment['id'])
                break  # There can be only one! (if we keep deleting them)

        meta = self.get_prj_pseudometa(project)
        lines = ['<!--- osc staging %s --->' % command]
        lines.append('The list of requests tracked in %s has changed:\n' %
                     project)
        for req in meta['requests']:
            author = req.get('autor', None)
            if not author:
                # Old style metadata
                author = get_request(self.apiurl, str(req['id'])).get_creator()
            lines.append('  * Request#%s for package %s submitted by @%s' %
                         (req['id'], req['package'], author))
        msg = '\n'.join(lines)
        comment_api.add_comment(project_name=project, comment=msg)
예제 #47
0
    def update_status_comments(self, project, command):
        """
        Refresh the status comments, used for notification purposes, based on
        the current list of requests. To ensure that all involved users
        (and nobody else) get notified, old status comments are deleted and
        a new one is created.
        :param project: project name
        :param command: name of the command to include in the message
        """

        # TODO: we need to discuss the best way to keep track of status
        # comments. Right now they are marked with an initial markdown
        # comment. Maybe a cleaner approach would be to store something
        # like 'last_status_comment_id' in the pseudometa. But the current
        # OBS API for adding comments doesn't return the id of the created
        # comment.

        comment_api = CommentAPI(self.apiurl)

        comments = comment_api.get_comments(project_name=project)
        for comment in comments.values():
            # TODO: update the comment removing the user mentions instead of
            # deleting the whole comment. But there is currently not call in
            # OBS API to update a comment
            if comment['comment'].startswith('<!--- osc staging'):
                comment_api.delete(comment['id'])
                break  # There can be only one! (if we keep deleting them)

        meta = self.get_prj_pseudometa(project)
        lines = ['<!--- osc staging %s --->' % command]
        lines.append('The list of requests tracked in %s has changed:\n' % project)
        for req in meta['requests']:
            author = req.get('autor', None)
            if not author:
                # Old style metadata
                author = get_request(self.apiurl, str(req['id'])).get_creator()
            lines.append('  * Request#%s for package %s submitted by @%s' % (req['id'], req['package'], author))
        msg = '\n'.join(lines)
        comment_api.add_comment(project_name=project, comment=msg)
예제 #48
0
class OpenQABot(ReviewBot.ReviewBot):

    """ check ABI of library packages
    """

    def __init__(self, *args, **kwargs):
        super(OpenQABot, self).__init__(*args, **kwargs)
        self.tgt_repo = {}
        self.project_settings = {}
        self.api_map = {}
        self.bot_name = 'openqa'
        self.force = False
        self.openqa = None
        self.commentapi = CommentAPI(self.apiurl)

    def gather_test_builds(self):
        for prj, u in self.tgt_repo[self.openqa.baseurl].items():
            buildnr = 0
            cjob = 0
            for j in self.jobs_for_target(u):
                # avoid going backwards in job ID
                if cjob > int(j['id']):
                    continue
                buildnr = j['settings']['BUILD']
                cjob = int(j['id'])
            self.update_test_builds[prj] = buildnr
            jobs = self.jobs_for_target(u, build=buildnr)
            self.openqa_jobs[prj] = jobs
            if self.calculate_qa_status(jobs) == QA_INPROGRESS:
                self.pending_target_repos.add(prj)

    # reimplemention from baseclass
    def check_requests(self):

        # to be filled by repos of active
        self.incident_repos = dict()
        self.update_test_builds = {}
        self.pending_target_repos = set()
        self.openqa_jobs = {}

        if self.ibs:
            self.check_suse_incidents()
        else:
            self.check_opensuse_incidents()

        # first calculate the latest build number for current jobs
        self.gather_test_builds()

        super(OpenQABot, self).check_requests()

        # now make sure the jobs are for current repo
        for prj, u in self.tgt_repo[self.openqa.baseurl].items():
            if prj in self.pending_target_repos:
                self.logger.debug("Do not trigger for " + prj)
                continue
            self.trigger_build_for_target(prj, u)

    # check a set of repos for their primary checksums
    @staticmethod
    def calculate_repo_hash(repos, incidents):
        m = md5.new()
        # if you want to force it, increase this number
        m.update('b')
        for url in repos:
            url += '/repodata/repomd.xml'
            try:
                root = ET.parse(osc.core.http_GET(url)).getroot()
            except HTTPError:
                raise
            cs = root.find(
                './/{http://linux.duke.edu/metadata/repo}data[@type="primary"]/{http://linux.duke.edu/metadata/repo}checksum')
            m.update(cs.text)
        # now add the open incidents
        m.update(json.dumps(incidents, sort_keys=True))
        digest = m.hexdigest()
        open_incidents = sorted(incidents.keys())
        if open_incidents:
            digest += ':' + ','.join(open_incidents)
        return digest

    def is_incident_in_testing(self, incident):
        # hard coded for now as we only run this code for SUSE Maintenance workflow
        project = 'SUSE:Maintenance:{}'.format(incident)

        xpath = "(state/@name='review') and (action/source/@project='{}' and action/@type='maintenance_release')".format(project)
        res = osc.core.search(self.apiurl, request=xpath)['request']
        # return the one and only (or None)
        return res.find('request')

    def calculate_incidents(self, incidents):
        """
        get incident numbers from SUSE:Maintenance:Test project
        returns dict with openQA var name : string with numbers
        """
        self.logger.debug("calculate_incidents: {}".format(pformat(incidents)))
        l_incidents = []
        for kind, prj in incidents.items():
            packages = osc.core.meta_get_packagelist(self.apiurl, prj)
            incidents = []
            # filter out incidents in staging
            for incident in packages:
                # remove patchinfo. prefix
                incident = incident.replace('_', '.').split('.')[1]
                req = self.is_incident_in_testing(incident)
                # without release request it's in staging
                if not req:
                    continue

                # skip kgraft patches from aggregation
                req_ = osc.core.Request()
                req_.read(req)
                src_prjs = {a.src_project for a in req_.actions}
                if SUSEUpdate.kgraft_target(self.apiurl, src_prjs.pop()):
                    self.logger.debug(
                        "calculate_incidents: Incident is kgraft - {} ".format(incident))
                    continue

                incidents.append(incident)

            l_incidents.append((kind + '_TEST_ISSUES', ','.join(incidents)))
        self.logger.debug("Calculate incidents:{}".format(pformat(l_incidents)))
        return l_incidents

    def jobs_for_target(self, data, build=None):
        settings = data['settings']
        values = {
            'distri': settings['DISTRI'],
            'version': settings['VERSION'],
            'arch': settings['ARCH'],
            'flavor': settings['FLAVOR'],
            'scope': 'relevant',
            'latest': '1',
        }
        if build:
            values['build'] = build
        else:
            values['test'] = data['test']
        self.logger.debug("Get jobs: {}".format(pformat(values)))
        return self.openqa.openqa_request('GET', 'jobs', values)['jobs']

    # we don't know the current BUILD and querying all jobs is too expensive
    # so we need to check for one known TEST first
    # if that job doesn't contain the proper hash, we trigger a new one
    # and then we know the build
    def trigger_build_for_target(self, prj, data):
        today = date.today().strftime("%Y%m%d")

        try:
            repohash = self.calculate_repo_hash(data['repos'], self.incident_repos.get(prj, {}))
        except HTTPError as e:
            self.logger.debug("REPOHASH not calculated with response {}".format(e))
            return

        buildnr = None
        jobs = self.jobs_for_target(data)
        for job in jobs:
            if job['settings'].get('REPOHASH', '') == repohash:
                # take the last in the row
                buildnr = job['settings']['BUILD']
        self.update_test_builds[prj] = buildnr
        # ignore old build numbers, we want a fresh run every day
        # to find regressions in the tests and to get data about
        # randomly failing tests
        if buildnr and buildnr.startswith(today):
            return

        buildnr = 0

        # not found, then check for the next free build nr
        for job in jobs:
            build = job['settings']['BUILD']
            if build and build.startswith(today):
                try:
                    nr = int(build.split('-')[1])
                    if nr > buildnr:
                        buildnr = nr
                except ValueError:
                    continue

        buildnr = "{!s}-{:d}".format(today, buildnr + 1)

        s = data['settings']
        # now schedule it for real
        if 'incidents' in data.keys():
            for x, y in self.calculate_incidents(data['incidents']):
                s[x] = y
        s['BUILD'] = buildnr
        s['REPOHASH'] = repohash
        self.logger.debug("Prepared: {}".format(pformat(s)))
        if not self.dryrun:
            try:
                self.logger.info("Openqa isos POST {}".format(pformat(s)))
                self.openqa.openqa_request('POST', 'isos', data=s, retries=1)
            except Exception as e:
                self.logger.error(e)
        self.update_test_builds[prj] = buildnr

    def request_get_openqa_status(self, req):
        types = {a.type for a in req.actions}
        if 'maintenance_release' not in types:
            return [], QA_UNKNOWN

        src_prjs = {a.src_project for a in req.actions}
        if len(src_prjs) != 1:
            raise Exception("can't handle maintenance_release from different incidents")
        build = src_prjs.pop()
        incident_id = build.split(':')[-1]
        tgt_prjs = {a.tgt_project for a in req.actions}
        jobs = self.openqa_jobs.get(build, [])
        qa_status = self.calculate_qa_status(jobs)
        if qa_status == QA_UNKNOWN or qa_status == QA_INPROGRESS:
            return jobs, qa_status

        # check if the repo jobs include the incident
        repo_jobs = []
        for prj in sorted(tgt_prjs):
            repo_settings = self.tgt_repo.get(self.openqa.baseurl, {})
            if prj in repo_settings:
                repo_jobs += self.openqa_jobs[prj]
        for job in repo_jobs:
            foundissue = False
            for key, value in job['settings'].items():
                if key.endswith('_TEST_ISSUES'):
                    if incident_id in value.split(','):
                        foundissue = True
            if not foundissue:
                self.logger.info("Repo job {} not for {} - ignoring".format(job['id'], incident_id))
                return jobs, QA_INPROGRESS
            # print(foundissue, incident_id, json.dumps(job['settings'], indent=4))

        jobs += repo_jobs
        return jobs, self.calculate_qa_status(jobs)

    def calculate_qa_status(self, jobs=None):
        if not jobs:
            return QA_UNKNOWN

        j = {}
        has_failed = False
        in_progress = False

        for job in jobs:
            if job['clone_id']:
                continue
            name = job['name']

            if name in j and int(job['id']) < int(j[name]['id']):
                continue
            j[name] = job

            if job['state'] not in ('cancelled', 'done'):
                in_progress = True
            else:
                if job['result'] != 'passed' and job['result'] != 'softfailed':
                    has_failed = True

        if not j:
            return QA_UNKNOWN
        if in_progress:
            return QA_INPROGRESS
        if has_failed:
            return QA_FAILED

        return QA_PASSED

    # escape markdown
    @staticmethod
    def emd(str):
        return str.replace('_', r'\_')

    @staticmethod
    def get_step_url(testurl, modulename):
        failurl = testurl + '/modules/{!s}/fails'.format(quote_plus(modulename))
        fails = requests.get(failurl).json()
        failed_step = fails.get('first_failed_step', 1)
        return "[{!s}]({!s}#step/{!s}/{:d})".format(OpenQABot.emd(modulename), testurl, modulename, failed_step)

    @staticmethod
    def job_test_name(job):
        return "{!s}@{!s}".format(OpenQABot.emd(job['settings']['TEST']), OpenQABot.emd(job['settings']['MACHINE']))

    def summarize_one_openqa_job(self, job):
        testurl = osc.core.makeurl(self.openqa.baseurl, ['tests', str(job['id'])])
        if not job['result'] in ['passed', 'failed', 'softfailed']:
            rstring = job['result']
            if rstring == 'none':
                return None
            return '\n- [{!s}]({!s}) is {!s}'.format(self.job_test_name(job), testurl, rstring)

        modstrings = []
        for module in job['modules']:
            if module['result'] != 'failed':
                continue
            modstrings.append(self.get_step_url(testurl, module['name']))

        if modstrings:
            return '\n- [{!s}]({!s}) failed in {!s}'.format(self.job_test_name(job), testurl, ','.join(modstrings))
        elif job['result'] == 'failed':  # rare case: fail without module fails
            return '\n- [{!s}]({!s}) failed'.format(self.job_test_name(job), testurl)
        return ''

    def summarize_openqa_jobs(self, jobs):
        groups = {}
        for job in jobs:
            gl = "{!s}@{!s}".format(self.emd(job['group']), self.emd(job['settings']['FLAVOR']))
            if gl not in groups:
                groupurl = osc.core.makeurl(self.openqa.baseurl, ['tests', 'overview'],
                                            {'version': job['settings']['VERSION'],
                                             'groupid': job['group_id'],
                                             'flavor': job['settings']['FLAVOR'],
                                             'distri': job['settings']['DISTRI'],
                                             'build': job['settings']['BUILD'],
                                             })
                groups[gl] = {'title': "__Group [{!s}]({!s})__\n".format(gl, groupurl),
                              'passed': 0, 'unfinished': 0, 'failed': []}

            job_summary = self.summarize_one_openqa_job(job)
            if job_summary is None:
                groups[gl]['unfinished'] = groups[gl]['unfinished'] + 1
                continue
            # None vs ''
            if not len(job_summary):
                groups[gl]['passed'] = groups[gl]['passed'] + 1
                continue
            # if there is something to report, hold the request
            # TODO: what is this ?
            # qa_state = QA_FAILED
            # gmsg = groups[gl]

            groups[gl]['failed'].append(job_summary)

        msg = ''
        for group in sorted(groups.keys()):
            msg += "\n\n" + groups[group]['title']
            infos = []
            if groups[group]['passed']:
                infos.append("{:d} tests passed".format(groups[group]['passed']))
            if len(groups[group]['failed']):
                infos.append("{:d} tests failed".format(len(groups[group]['failed'])))
            if groups[group]['unfinished']:
                infos.append("{:d} unfinished tests".format(groups[group]['unfinished']))
            msg += "(" + ', '.join(infos) + ")\n"
            for fail in groups[group]['failed']:
                msg += fail
        return msg.rstrip('\n')

    def check_one_request(self, req):
        try:
            jobs, qa_state = self.request_get_openqa_status(req)
            self.logger.debug("request %s state %s", req.reqid, qa_state)
            msg = None
            if qa_state == QA_UNKNOWN:
                incident_id = req.to_xml().findall('.action/source')[0].get('project').split(":")[-1]
                if not jobs and incident_id not in self.wait_for_build:
                    msg = "no openQA tests defined"
                    self.comment_write(state='done', message=msg, request=req, result='accepted')
                    return True
                else:
                    self.logger.debug("request {} waits for build".format(req.reqid))
            elif qa_state == QA_FAILED or qa_state == QA_PASSED:
                if qa_state == QA_PASSED:
                    msg = "openQA tests passed\n"
                    result = 'accepted'
                    ret = True
                else:
                    msg = "openQA tests problematic\n"
                    result = 'declined'
                    ret = False

                msg += self.summarize_openqa_jobs(jobs)
                self.comment_write(state='done', message=msg, result=result, request=req)
                return ret
            elif qa_state == QA_INPROGRESS:
                self.logger.info("request %s still in progress", req.reqid)
            else:
                raise Exception("unknown QA state %d", qa_state)

        except Exception:
            import traceback
            self.logger.error("unhandled exception in openQA Bot")
            self.logger.error(traceback.format_exc())
            return None

        return

    def find_obs_request_comment(self, request_id=None, project_name=None):
        """Return previous comments (should be one)."""
        comments = self.commentapi.get_comments(request_id=request_id, project_name=project_name)
        comment, info = self.commentapi.comment_find(comments, self.bot_name)
        if comment:
            # we only care for two fields
            return {'id': comment['id'], 'revision': info['revision']}

        return {}

    def check_product_arch(self, job, product_prefix, pmap, arch):
        need = False
        settings = {'VERSION': pmap['version']}
        settings['ARCH'] = arch if arch else 'x86_64'
        settings['DISTRI'] = pmap.get('distri', 'sle')
        issues = pmap.get('issues', {})
        issues['OS_TEST_ISSUES'] = issues.get('OS_TEST_ISSUES', product_prefix)
        required_issue = pmap.get('required_issue', False)
        for key, prefix in issues.items():
            # self.logger.debug("KP {} {}".format(key, prefix) + str(job))
            channel = prefix
            if arch:
                channel += arch
            if channel in job['channels']:
                settings[key] = str(job['id'])
                need = True
        if required_issue:
            if required_issue not in settings:
                need = False

        if not need:
            return []

        product_key = product_prefix
        if arch:
            product_key += arch
        update = self.project_settings[product_key]
        update.apiurl = self.apiurl
        update.logger = self.logger
        posts = []
        for j in update.settings(
                update.maintenance_project + ':' + str(job['id']),
                product_key):
            if not job.get('openqa_build'):
                job['openqa_build'] = update.get_max_revision(job)
            if not job.get('openqa_build'):
                self.wait_for_build.add(str(job['id']))
                return []
            self.incident_repos.setdefault(product_key, dict())[
                str(job['id'])] = job.get('openqa_build')
            j['BUILD'] += '.' + str(job['openqa_build'])
            j.update(settings)
            # kGraft jobs can have different version
            if 'real_version' in j:
                j['VERSION'] = j['real_version']
                del j['real_version']
            posts.append(j)
        return posts

    def check_product(self, job, product_prefix):
        pmap = self.api_map[product_prefix]
        posts = []
        if 'archs' in pmap:
            for arch in pmap['archs']:
                posts += self.check_product_arch(job, product_prefix, pmap, arch)
        else:
            posts += self.check_product_arch(job, product_prefix, pmap, None)

        self.logger.debug("Pmap: {} Posts: {}".format(pmap, posts))
        return posts

    def incident_openqa_jobs(self, s):
        return self.openqa.openqa_request(
            'GET', 'jobs',
            {
                'distri': s['DISTRI'],
                'version': s['VERSION'],
                'arch': s['ARCH'],
                'flavor': s['FLAVOR'],
                'build': s['BUILD'],
                'scope': 'relevant',
                'latest': '1'
            })['jobs']

    # for SUSE we use mesh, for openSUSE we limit the jobs to open release requests
    def check_opensuse_incidents(self):
        requests = dict()  # collecting unique requests
        self.wait_for_build = set()
        for prj in self.tgt_repo[self.openqa.baseurl].keys():
            for r in self.ids_project(prj, 'maintenance_release'):
                requests[r.reqid] = r

        # to be stored in settings
        issues = dict()
        for req in sorted(requests.keys()):
            req = requests[req]
            types = set([a.type for a in req.actions])
            if 'maintenance_release' not in types:
                continue

            src_prjs = set([a.src_project for a in req.actions])
            if len(src_prjs) != 1:
                raise Exception("can't handle maintenance_release from different incidents")
            build = src_prjs.pop()
            incident_id = build.split(':')[-1]
            tgt_prjs = set()
            for a in req.actions:
                prj = a.tgt_project
                # ignore e.g. Backports
                if prj not in self.project_settings:
                    continue

                issues.setdefault(prj, set()).add(incident_id)
                tgt_prjs.add(prj)

            self.test_job({'project': build, 'id': incident_id, 'channels': list(tgt_prjs)})

        for prj in self.tgt_repo[self.openqa.baseurl].keys():
            s = self.tgt_repo[self.openqa.baseurl][prj]['settings']
            s['OS_TEST_ISSUES'] = ','.join(sorted(issues.get(prj, set())))

    def check_suse_incidents(self):
        self.wait_for_build = set()
        for inc in requests.get('https://maintenance.suse.de/api/incident/active/').json():
            self.logger.info("Incident number: {}".format(inc))

            mesh_job = requests.get('https://maintenance.suse.de/api/incident/' + inc).json()

            if mesh_job['meta']['state'] in ['final', 'gone']:
                continue
            # required in mesh_job: project, id, channels
            self.test_job(mesh_job['base'])

    def test_job(self, mesh_job):
        self.logger.debug("Called test_job with: {}".format(mesh_job))
        incident_project = str(mesh_job['project'])
        try:
            comment_info = self.find_obs_request_comment(project_name=incident_project)
        except HTTPError as e:
            self.logger.debug("Couldn't load comments - {}".format(e))
            return
        comment_build = str(comment_info.get('revision', ''))

        openqa_posts = []
        for prod in self.api_map.keys():
            self.logger.debug("{} -- product in apimap".format(prod))
            openqa_posts += self.check_product(mesh_job, prod)
        openqa_jobs = []
        for s in openqa_posts:
            if 'skip_job' in s:
                self.wait_for_build.add(str(mesh_job['id']))
                continue
            jobs = self.incident_openqa_jobs(s)
            # take the project comment as marker for not posting jobs
            if not len(jobs) and comment_build != str(mesh_job['openqa_build']):
                if self.dryrun:
                    self.logger.info('WOULD POST:{}'.format(pformat(json.dumps(s, sort_keys=True))))
                else:
                    self.logger.info("Posted: {}".format(pformat(json.dumps(s, sort_keys=True))))
                    self.openqa.openqa_request('POST', 'isos', data=s, retries=1)
                    openqa_jobs += self.incident_openqa_jobs(s)
            else:
                self.logger.info("{} got {}".format(pformat(s), len(jobs)))
                openqa_jobs += jobs

        self.openqa_jobs[incident_project] = openqa_jobs

        if len(openqa_jobs) == 0:
            self.logger.debug("No openqa jobs defined")
            return
        # print openqa_jobs
        msg = self.summarize_openqa_jobs(openqa_jobs)
        state = 'seen'
        result = 'none'
        qa_status = self.calculate_qa_status(openqa_jobs)
        if qa_status == QA_PASSED:
            result = 'accepted'
            state = 'done'
        if qa_status == QA_FAILED:
            result = 'declined'
            state = 'done'
        self.comment_write(project=str(incident_project), state=state,
                           result=result, message=msg,
                           info_extra={'revision': str(mesh_job.get('openqa_build'))})
class TestComment(unittest.TestCase):
    def setUp(self):
        self.api = CommentAPI('bogus')
        self.bot = type(self).__name__
        self.comments = {
            1: {'comment': '<!-- {} -->\n\nshort comment'.format(self.bot)},
            2: {'comment': '<!-- {} foo=bar distro=openSUSE -->\n\nshort comment'.format(self.bot)}
        }

    def test_truncate(self):
        comment = "string of text"
        for i in xrange(len(comment) + 1):
            truncated = self.api.truncate(comment, length=i)
            print(truncated)
            self.assertEqual(len(truncated), i)

    def test_truncate_pre(self):
        comment = """
Some text.

<pre>
bar
mar
car
</pre>

## section 2

<pre>
more
lines
than
you
can
handle
</pre>
""".strip()

        for i in xrange(len(comment) + len('...\n</pre>')):
            truncated = self.api.truncate(comment, length=i)
            print('=' * 80)
            print(truncated)
            self.assertTrue(len(truncated) <= i, '{} <= {}'.format(len(truncated), i))
            self.assertEqual(truncated.count('<pre>'), truncated.count('</pre>'))
            self.assertFalse(len(re.findall(r'</?\w+[^\w>]', truncated)))
            tag_count = truncated.count('<pre>') + truncated.count('</pre>')
            self.assertEqual(tag_count, truncated.count('<'))
            self.assertEqual(tag_count, truncated.count('>'))

    def test_add_marker(self):
        comment_marked = self.api.add_marker(COMMENT, self.bot)
        self.assertEqual(comment_marked, self.comments[1]['comment'])

        comment_marked = self.api.add_marker(COMMENT, self.bot, COMMENT_INFO)
        self.assertEqual(comment_marked, self.comments[2]['comment'])

    def test_remove_marker(self):
        comment = self.api.remove_marker(COMMENT)
        self.assertEqual(comment, COMMENT)

        comment = self.api.remove_marker(self.comments[1]['comment'])
        self.assertEqual(comment, COMMENT)

        comment = self.api.remove_marker(self.comments[2]['comment'])
        self.assertEqual(comment, COMMENT)

    def test_comment_find(self):
        comment, info = self.api.comment_find(self.comments, self.bot)
        self.assertEqual(comment, self.comments[1])

        comment, info = self.api.comment_find(self.comments, self.bot, COMMENT_INFO)
        self.assertEqual(comment, self.comments[2])
        self.assertEqual(info, COMMENT_INFO)

        info_partial = dict(COMMENT_INFO)
        del info_partial['foo']
        comment, info = self.api.comment_find(self.comments, self.bot, info_partial)
        self.assertEqual(comment, self.comments[2])
        self.assertEqual(info, COMMENT_INFO)
class TestCommentOBS(OBSLocalTestCase):
    def setUp(self):
        super(TestCommentOBS, self).setUp()
        self.api = CommentAPI(self.apiurl)
        # Ensure different test runs operate in unique namespace.
        self.bot = '::'.join([type(self).__name__, str(random.getrandbits(8))])

    def test_basic(self):
        self.osc_user('staging-bot')

        self.assertFalse(self.comments_filtered(self.bot)[0])

        self.assertTrue(self.api.add_comment(
            project_name=PROJECT, comment=self.api.add_marker(COMMENT, self.bot)))
        comment, _ = self.comments_filtered(self.bot)
        self.assertTrue(comment)

        self.assertTrue(self.api.delete(comment['id']))
        self.assertFalse(self.comments_filtered(self.bot)[0])

    def test_delete_nested(self):
        self.osc_user('staging-bot')
        comment_marked = self.api.add_marker(COMMENT, self.bot)

        # Allow for existing comments by basing assertion on delta from initial count.
        comment_count = len(self.api.get_comments(project_name=PROJECT))
        self.assertFalse(self.comments_filtered(self.bot)[0])

        self.assertTrue(self.api.add_comment(project_name=PROJECT, comment=comment_marked))
        comment, _ = self.comments_filtered(self.bot)
        self.assertTrue(comment)

        for i in range(0, 3):
            self.assertTrue(self.api.add_comment(
                project_name=PROJECT, comment=comment_marked, parent_id=comment['id']))

        comments = self.api.get_comments(project_name=PROJECT)
        parented_count = 0
        for comment in comments.values():
            if comment['parent']:
                parented_count += 1

        self.assertEqual(parented_count, 3)
        self.assertTrue(len(comments) == comment_count + 4)

        self.api.delete_from(project_name=PROJECT)
        self.assertFalse(len(self.api.get_comments(project_name=PROJECT)))

    def test_delete_batch(self):
        users = ['factory-auto', 'repo-checker', 'staging-bot']
        for user in users:
            self.osc_user(user)
            from osc import conf
            bot = '::'.join([self.bot, user])
            comment = self.api.add_marker(COMMENT, bot)

            self.assertFalse(self.comments_filtered(bot)[0])
            self.assertTrue(self.api.add_comment(project_name=PROJECT, comment=comment))
            self.assertTrue(self.comments_filtered(bot)[0])

        # Allow for existing comments by basing assertion on delta from initial count.
        comment_count = len(self.api.get_comments(project_name=PROJECT))
        self.assertTrue(comment_count >= len(users))

        self.api.delete_from_where_user(users[0], project_name=PROJECT)
        self.assertTrue(len(self.api.get_comments(project_name=PROJECT)) == comment_count - 1)

        self.api.delete_from(project_name=PROJECT)
        self.assertFalse(len(self.api.get_comments(project_name=PROJECT)))

    def comments_filtered(self, bot):
        comments = self.api.get_comments(project_name=PROJECT)
        return self.api.comment_find(comments, bot)
 def setUp(self):
     super(TestCommentOBS, self).setUp()
     self.api = CommentAPI(self.apiurl)
     # Ensure different test runs operate in unique namespace.
     self.bot = '::'.join([type(self).__name__, str(random.getrandbits(8))])
예제 #52
0
class AcceptCommand(object):
    def __init__(self, api):
        self.api = api
        self.comment = CommentAPI(self.api.apiurl)

    def find_new_requests(self, project):
        query = "match=state/@name='new'+and+(action/target/@project='{}'+and+action/@type='submit')".format(project)
        url = self.api.makeurl(['search', 'request'], query)

        f = http_GET(url)
        root = ET.parse(f).getroot()

        rqs = []
        for rq in root.findall('request'):
            pkgs = []
            actions = rq.findall('action')
            for action in actions:
                targets = action.findall('target')
                for t in targets:
                    pkgs.append(str(t.get('package')))

            rqs.append({'id': int(rq.get('id')), 'packages': pkgs})
        return rqs

    def perform(self, project, force=False):
        """Accept the staging project for review and submit to Factory /
        openSUSE 13.2 ...

        Then disable the build to disabled
        :param project: staging project we are working with

        """

        status = self.api.check_project_status(project)

        if not status:
            print('The project "{}" is not yet acceptable.'.format(project))
            if not force:
                return False

        meta = self.api.get_prj_pseudometa(project)
        requests = []
        packages = []
        for req in meta['requests']:
            self.api.rm_from_prj(project, request_id=req['id'], msg='ready to accept')
            requests.append(req['id'])
            packages.append(req['package'])
            msg = 'Accepting staging review for {}'.format(req['package'])
            print(msg)

            oldspecs = self.api.get_filelist_for_package(pkgname=req['package'],
                                                         project=self.api.project,
                                                         extension='spec')
            change_request_state(self.api.apiurl,
                                 str(req['id']),
                                 'accepted',
                                 message='Accept to %s' % self.api.project)
            self.create_new_links(self.api.project, req['package'], oldspecs)

        # A single comment should be enough to notify everybody, since
        # they are already mentioned in the comments created by
        # select/unselect
        pkg_list = ", ".join(packages)
        cmmt = 'Project "{}" accepted.' \
               ' The following packages have been submitted to {}: {}.'.format(project,
                                                                               self.api.project,
                                                                               pkg_list)
        self.comment.add_comment(project_name=project, comment=cmmt)

        # XXX CAUTION - AFAIK the 'accept' command is expected to clean the messages here.
        self.comment.delete_from(project_name=project)

        self.api.build_switch_prj(project, 'disable')
        if self.api.item_exists(project + ':DVD'):
            self.api.build_switch_prj(project + ':DVD', 'disable')

        return True

    def cleanup(self, project):
        if not self.api.item_exists(project):
            return False

        pkglist = self.api.list_packages(project)
        clean_list = set(pkglist) - set(self.api.cstaging_nocleanup)

        for package in clean_list:
            print "[cleanup] deleted %s/%s" % (project, package)
            delete_package(self.api.apiurl, project, package, force=True, msg="autocleanup")
        return True

    def accept_other_new(self):
        changed = False
        rqlist = self.find_new_requests(self.api.project)
        if self.api.cnonfree:
            rqlist += self.find_new_requests(self.api.cnonfree)

        for req in rqlist:
            oldspecs = self.api.get_filelist_for_package(pkgname=req['packages'][0], project=self.api.project, extension='spec')
            print 'Accepting request %d: %s' % (req['id'], ','.join(req['packages']))
            change_request_state(self.api.apiurl, str(req['id']), 'accepted', message='Accept to %s' % self.api.project)
            # Check if all .spec files of the package we just accepted has a package container to build
            self.create_new_links(self.api.project, req['packages'][0], oldspecs)
            changed = True

        return changed

    def create_new_links(self, project, pkgname, oldspeclist):
        filelist = self.api.get_filelist_for_package(pkgname=pkgname, project=project, extension='spec')
        removedspecs = set(oldspeclist) - set(filelist)
        for spec in removedspecs:
            # Deleting all the packages that no longer have a .spec file
            url = self.api.makeurl(['source', project, spec[:-5]])
            print "Deleting package %s from project %s" % (spec[:-5], project)
            try:
                http_DELETE(url)
            except urllib2.HTTPError, err:
                if err.code == 404:
                    # the package link was not yet created, which was likely a mistake from earlier
                    pass
                else:
                    # If the package was there bug could not be delete, raise the error
                    raise
        if len(filelist) > 1:
            # There is more than one .spec file in the package; link package containers as needed
            origmeta = self.api.load_file_content(project, pkgname, '_meta')
            for specfile in filelist:
                package = specfile[:-5]  # stripping .spec off the filename gives the packagename
                if package == pkgname:
                    # This is the original package and does not need to be linked to itself
                    continue
                # Check if the target package already exists, if it does not, we get a HTTP error 404 to catch
                if not self.api.item_exists(project, package):
                    print "Creating new package %s linked to %s" % (package, pkgname)
                    # new package does not exist. Let's link it with new metadata
                    newmeta = re.sub(r'(<package.*name=.){}'.format(pkgname),
                                     r'\1{}'.format(package),
                                     origmeta)
                    newmeta = re.sub(r'<devel.*>',
                                     r'<devel package="{}"/>'.format(pkgname),
                                     newmeta)
                    newmeta = re.sub(r'<bcntsynctag>.*</bcntsynctag>',
                                     r'',
                                     newmeta)
                    newmeta = re.sub(r'</package>',
                                     r'<bcntsynctag>{}</bcntsynctag></package>'.format(pkgname),
                                     newmeta)
                    self.api.save_file_content(project, package, '_meta', newmeta)
                    link = "<link package=\"{}\" cicount=\"copy\" />".format(pkgname)
                    self.api.save_file_content(project, package, '_link', link)
        return True
class OpenQABot(ReviewBot.ReviewBot):
    """ check ABI of library packages
    """
    def __init__(self, *args, **kwargs):
        ReviewBot.ReviewBot.__init__(self, *args, **kwargs)

        self.force = False
        self.openqa = None
        self.commentapi = CommentAPI(self.apiurl)
        self.update_test_builds = dict()
        self.openqa_jobs = dict()

    def gather_test_builds(self):
        for prj, u in TARGET_REPO_SETTINGS[self.openqa.baseurl].items():
            buildnr = 0
            cjob = 0
            for j in self.jobs_for_target(u):
                # avoid going backwards in job ID
                if cjob > int(j['id']):
                    continue
                buildnr = j['settings']['BUILD']
                cjob = int(j['id'])
            self.update_test_builds[prj] = buildnr
            jobs = self.jobs_for_target(u, build=buildnr)
            self.openqa_jobs[prj] = jobs
            if self.calculate_qa_status(jobs) == QA_INPROGRESS:
                self.pending_target_repos.add(prj)

    # reimplemention from baseclass
    def check_requests(self):

        if self.apiurl.endswith('.suse.de'):
            self.check_suse_incidents()

        # first calculate the latest build number for current jobs
        self.pending_target_repos = set()
        self.gather_test_builds()

        started = []
        # then check progress on running incidents
        for req in self.requests:
            jobs = self.request_get_openqa_jobs(req,
                                                incident=True,
                                                test_repo=True)
            ret = self.calculate_qa_status(jobs)
            if ret != QA_UNKNOWN:
                started.append(req)

        all_requests = self.requests
        self.requests = started
        ReviewBot.ReviewBot.check_requests(self)

        self.requests = all_requests

        skipped_one = False
        # now make sure the jobs are for current repo
        for prj, u in TARGET_REPO_SETTINGS[self.openqa.baseurl].items():
            if prj in self.pending_target_repos:
                skipped_one = True
                continue
            self.trigger_build_for_target(prj, u)

        # do not schedule new incidents unless we finished
        # last wave
        if skipped_one:
            return

        ReviewBot.ReviewBot.check_requests(self)

    def check_action_maintenance_release(self, req, a):
        # we only look at the binaries of the patchinfo
        if a.src_package != 'patchinfo':
            return None

        if a.tgt_project not in PROJECT_OPENQA_SETTINGS:
            self.logger.warn("not handling %s" % a.tgt_project)
            return None

        # TODO - this needs to be moved
        return None

        packages = []
        # patchinfo collects the binaries and is build for an
        # unpredictable architecture so we need iterate over all
        url = osc.core.makeurl(
            self.apiurl,
            ('build', a.src_project, a.tgt_project.replace(':', '_')))
        root = ET.parse(osc.core.http_GET(url)).getroot()
        for arch in [n.attrib['name'] for n in root.findall('entry')]:
            query = {'nosource': 1}
            url = osc.core.makeurl(
                self.apiurl,
                ('build', a.src_project, a.tgt_project.replace(
                    ':', '_'), arch, a.src_package),
                query=query)

            root = ET.parse(osc.core.http_GET(url)).getroot()

            for binary in root.findall('binary'):
                m = pkgname_re.match(binary.attrib['filename'])
                if m:
                    # can't use arch here as the patchinfo mixes all
                    # archs
                    packages.append(
                        Package(m.group('name'), m.group('version'),
                                m.group('release')))

        if not packages:
            raise Exception("no packages found")

        update.calculate_lastest_good_updates(self.openqa, settings)

        return None

    # check a set of repos for their primary checksums
    @staticmethod
    def calculate_repo_hash(repos):
        m = md5.new()
        # if you want to force it, increase this number
        m.update('b')
        for url in repos:
            url += '/repodata/repomd.xml'
            root = ET.parse(osc.core.http_GET(url)).getroot()
            cs = root.find(
                './/{http://linux.duke.edu/metadata/repo}data[@type="primary"]/{http://linux.duke.edu/metadata/repo}checksum'
            )
            m.update(cs.text)
        return m.hexdigest()

    def is_incident_in_testing(self, incident):
        # hard coded for now as we only run this code for SUSE Maintenance workflow
        project = 'SUSE:Maintenance:%s' % incident

        xpath = "(state/@name='review') and (action/source/@project='%s' and action/@type='maintenance_release')" % (
            project)
        res = osc.core.search(self.apiurl, request=xpath)['request']
        # return the one and only (or None)
        return res.find('request')

    def calculate_incidents(self, incidents):
        """
        get incident numbers from SUSE:Maintenance:Test project
        returns dict with openQA var name : string with numbers
        """
        l_incidents = []
        for kind, prj in incidents.items():
            packages = osc.core.meta_get_packagelist(self.apiurl, prj)
            incidents = []
            # filter out incidents in staging
            for incident in packages:
                # remove patchinfo. prefix
                incident = incident.replace('_', '.').split('.')[1]
                req = self.is_incident_in_testing(incident)
                # without release request it's in staging
                if req is None:
                    continue

                # skip kgraft patches from aggregation
                req_ = osc.core.Request()
                req_.read(req)
                src_prjs = set([a.src_project for a in req_.actions])
                if SUSEUpdate.kgraft_target(self.apiurl, src_prjs.pop()):
                    continue

                incidents.append(incident)

            l_incidents.append((kind + '_TEST_ISSUES', ','.join(incidents)))

        return l_incidents

    def jobs_for_target(self, data, build=None):
        s = data['settings'][0]
        values = {
            'distri': s['DISTRI'],
            'version': s['VERSION'],
            'arch': s['ARCH'],
            'flavor': s['FLAVOR'],
            'scope': 'relevant',
            'latest': '1',
        }
        if build:
            values['build'] = build
        else:
            values['test'] = data['test']
        return self.openqa.openqa_request('GET', 'jobs', values)['jobs']

    # we don't know the current BUILD and querying all jobs is too expensive
    # so we need to check for one known TEST first
    # if that job doesn't contain the proper hash, we trigger a new one
    # and then we know the build
    def trigger_build_for_target(self, prj, data):
        today = date.today().strftime("%Y%m%d")
        repohash = self.calculate_repo_hash(data['repos'])
        buildnr = None
        j = self.jobs_for_target(data)
        for job in j:
            if job['settings'].get('REPOHASH', '') == repohash:
                # take the last in the row
                buildnr = job['settings']['BUILD']
        self.update_test_builds[prj] = buildnr
        # ignore old build numbers, we want a fresh run every day
        # to find regressions in the tests and to get data about
        # randomly failing tests
        if buildnr and buildnr.startswith(today):
            return

        buildnr = 0

        # not found, then check for the next free build nr
        for job in j:
            build = job['settings']['BUILD']
            if build and build.startswith(today):
                try:
                    nr = int(build.split('-')[1])
                    if nr > buildnr:
                        buildnr = nr
                except BaseException:
                    continue

        buildnr = "%s-%d" % (today, buildnr + 1)

        for s in data['settings']:
            # now schedule it for real
            if 'incidents' in data.keys():
                for x, y in self.calculate_incidents(data['incidents']):
                    s[x] = y
            s['BUILD'] = buildnr
            s['REPOHASH'] = repohash
            self.logger.debug(pformat(s))
            if not self.dryrun:
                try:
                    self.openqa.openqa_request('POST',
                                               'isos',
                                               data=s,
                                               retries=1)
                except Exception as e:
                    self.logger.debug(e)
        self.update_test_builds[prj] = buildnr

    def check_source_submission(self, src_project, src_package, src_rev,
                                dst_project, dst_package):
        ReviewBot.ReviewBot.check_source_submission(self, src_project,
                                                    src_package, src_rev,
                                                    dst_project, dst_package)

    def request_get_openqa_jobs(self, req, incident=True, test_repo=False):
        ret = None
        types = set([a.type for a in req.actions])
        if 'maintenance_release' in types:
            src_prjs = set([a.src_project for a in req.actions])
            if len(src_prjs) != 1:
                raise Exception(
                    "can't handle maintenance_release from different incidents"
                )
            build = src_prjs.pop()
            tgt_prjs = set([a.tgt_project for a in req.actions])
            ret = []
            if incident:
                ret += self.openqa_jobs[build]
            for prj in sorted(tgt_prjs):
                repo_settings = TARGET_REPO_SETTINGS.get(
                    self.openqa.baseurl, {})
                if test_repo and prj in repo_settings:
                    repo_jobs = self.openqa_jobs[prj]
                    ret += repo_jobs

        return ret

    def calculate_qa_status(self, jobs=None):
        if not jobs:
            return QA_UNKNOWN

        j = dict()
        has_failed = False
        in_progress = False
        for job in jobs:
            if job['clone_id']:
                continue
            name = job['name']
            if name in j and int(job['id']) < int(j[name]['id']):
                continue
            j[name] = job
            #self.logger.debug('job %s in openQA: %s %s %s %s', job['id'], job['settings']['VERSION'], job['settings']['TEST'], job['state'], job['result'])
            if job['state'] not in ('cancelled', 'done'):
                in_progress = True
            else:
                if job['result'] != 'passed' and job['result'] != 'softfailed':
                    has_failed = True

        if not j:
            return QA_UNKNOWN
        if in_progress:
            return QA_INPROGRESS
        if has_failed:
            return QA_FAILED

        return QA_PASSED

    def add_comment(self, msg, state, request_id=None, result=None):
        if not self.do_comments:
            return

        comment = "<!-- openqa state=%s%s -->\n" % (state, ' result=%s' %
                                                    result if result else '')
        comment += "\n" + msg

        info = self.find_obs_request_comment(request_id=request_id)
        comment_id = info.get('id', None)

        if state == info.get('state', 'missing'):
            lines_before = len(info['comment'].split('\n'))
            lines_after = len(comment.split('\n'))
            if lines_before == lines_after:
                self.logger.debug(
                    "not worth the update, previous comment %s is state %s",
                    comment_id, info['state'])
                return

        self.logger.debug("adding comment to %s, state %s result %s",
                          request_id, state, result)
        self.logger.debug("message: %s", msg)
        if not self.dryrun:
            if comment_id is not None:
                self.commentapi.delete(comment_id)
            self.commentapi.add_comment(request_id=request_id,
                                        comment=str(comment))

    # escape markdown
    @staticmethod
    def emd(str):
        return str.replace('_', '\_')

    def get_step_url(self, testurl, modulename):
        failurl = testurl + '/modules/%s/fails' % modulename
        fails = requests.get(failurl).json()
        failed_step = fails.get('first_failed_step', 1)
        return "[%s](%s#step/%s/%d)" % (self.emd(modulename), testurl,
                                        modulename, failed_step)

    def job_test_name(self, job):
        return "%s@%s" % (self.emd(
            job['settings']['TEST']), self.emd(job['settings']['MACHINE']))

    def summarize_one_openqa_job(self, job):
        testurl = osc.core.makeurl(self.openqa.baseurl,
                                   ['tests', str(job['id'])])
        if not job['result'] in ['passed', 'failed', 'softfailed']:
            rstring = job['result']
            if rstring == 'none':
                return None
            return '\n- [%s](%s) is %s' % (self.job_test_name(job), testurl,
                                           rstring)

        modstrings = []
        for module in job['modules']:
            if module['result'] != 'failed':
                continue
            modstrings.append(self.get_step_url(testurl, module['name']))

        if len(modstrings):
            return '\n- [%s](%s) failed in %s' % (
                self.job_test_name(job), testurl, ','.join(modstrings))
        elif job['result'] == 'failed':  # rare case: fail without module fails
            return '\n- [%s](%s) failed' % (self.job_test_name(job), testurl)
        return ''

    def summarize_openqa_jobs(self, jobs):
        groups = dict()
        for job in jobs:
            gl = "%s@%s" % (self.emd(
                job['group']), self.emd(job['settings']['FLAVOR']))
            if gl not in groups:
                groupurl = osc.core.makeurl(
                    self.openqa.baseurl, ['tests', 'overview'], {
                        'version': job['settings']['VERSION'],
                        'groupid': job['group_id'],
                        'flavor': job['settings']['FLAVOR'],
                        'distri': job['settings']['DISTRI'],
                        'build': job['settings']['BUILD'],
                    })
                groups[gl] = {
                    'title': "__Group [%s](%s)__\n" % (gl, groupurl),
                    'passed': 0,
                    'unfinished': 0,
                    'failed': []
                }

            job_summary = self.summarize_one_openqa_job(job)
            if job_summary is None:
                groups[gl]['unfinished'] = groups[gl]['unfinished'] + 1
                continue
            # None vs ''
            if not len(job_summary):
                groups[gl]['passed'] = groups[gl]['passed'] + 1
                continue
            # if there is something to report, hold the request
            qa_state = QA_FAILED
            gmsg = groups[gl]
            groups[gl]['failed'].append(job_summary)

        msg = ''
        for group in sorted(groups.keys()):
            msg += "\n\n" + groups[group]['title']
            infos = []
            if groups[group]['passed']:
                infos.append("%d tests passed" % groups[group]['passed'])
            if len(groups[group]['failed']):
                infos.append("%d tests failed" % len(groups[group]['failed']))
            if groups[group]['unfinished']:
                infos.append("%d unfinished tests" %
                             groups[group]['unfinished'])
            msg += "(" + ', '.join(infos) + ")\n"
            for fail in groups[group]['failed']:
                msg += fail

        return msg

    def check_one_request(self, req):
        ret = None

        try:
            jobs = self.request_get_openqa_jobs(req)
            qa_state = self.calculate_qa_status(jobs)
            self.logger.debug("request %s state %s", req.reqid, qa_state)
            msg = None
            if self.force or qa_state == QA_UNKNOWN:
                ret = ReviewBot.ReviewBot.check_one_request(self, req)
                jobs = self.request_get_openqa_jobs(req)

                if self.force:
                    # make sure to delete previous comments if we're forcing
                    info = self.find_obs_request_comment(request_id=req.reqid)
                    if 'id' in info:
                        self.logger.debug("deleting old comment %s",
                                          info['id'])
                        if not self.dryrun:
                            self.commentapi.delete(info['id'])

                if not jobs:
                    msg = "no openQA tests defined"
                    self.add_comment(msg,
                                     'done',
                                     request_id=req.reqid,
                                     result='accepted')
                    ret = True
                else:
                    # no notification until the result is done
                    osc.core.change_review_state(
                        self.apiurl,
                        req.reqid,
                        newstate='new',
                        by_group=self.review_group,
                        by_user=self.review_user,
                        message='now testing in openQA')
            elif qa_state == QA_FAILED or qa_state == QA_PASSED:
                # don't take test repo results into the calculation of total
                # this is for humans to decide which incident broke the test repo
                jobs += self.request_get_openqa_jobs(req,
                                                     incident=False,
                                                     test_repo=True)
                if self.calculate_qa_status(jobs) == QA_INPROGRESS:
                    self.logger.debug(
                        "incident tests for request %s are done, but need to wait for test repo",
                        req.reqid)
                    return
                if qa_state == QA_PASSED:
                    msg = "openQA tests passed\n"
                    result = 'accepted'
                    ret = True
                else:
                    msg = "openQA tests problematic\n"
                    result = 'declined'
                    ret = False

                msg += self.summarize_openqa_jobs(jobs)
                self.add_comment(msg,
                                 'done',
                                 result=result,
                                 request_id=req.reqid)
            elif qa_state == QA_INPROGRESS:
                self.logger.debug("request %s still in progress", req.reqid)
            else:
                raise Exception("unknown QA state %d", qa_state)

        except Exception:
            import traceback
            self.logger.error("unhandled exception in openQA Bot")
            self.logger.error(traceback.format_exc())
            ret = None

        return ret

    def find_obs_request_comment(self, request_id=None, project_name=None):
        """Return previous comments (should be one)."""
        if self.do_comments:
            comments = self.commentapi.get_comments(request_id=request_id,
                                                    project_name=project_name)
            for c in comments.values():
                m = comment_marker_re.match(c['comment'])
                if m:
                    return {
                        'id': c['id'],
                        'state': m.group('state'),
                        'result': m.group('result'),
                        'comment': c['comment'],
                        'revision': m.group('revision')
                    }
        return {}

    def check_product(self, job, product_prefix):
        pmap = API_MAP[product_prefix]
        posts = []
        for arch in pmap['archs']:
            need = False
            settings = {
                'VERSION': pmap['version'],
                'ARCH': arch,
                'DISTRI': 'sle'
            }
            issues = pmap.get('issues', {})
            issues['OS_TEST_ISSUES'] = product_prefix
            for key, prefix in issues.items():
                if prefix + arch in job['channels']:
                    settings[key] = str(job['id'])
                    need = True
            if need:
                u = PROJECT_OPENQA_SETTINGS[product_prefix + arch]
                u.apiurl = self.apiurl
                for s in u.settings(
                        u.maintenance_project() + ':' + str(job['id']),
                        product_prefix + arch, []):
                    if job.get('openqa_build') is None:
                        job['openqa_build'] = u.get_max_revision(job)
                    if job.get('openqa_build') is None:
                        return []
                    s['BUILD'] += '.' + str(job['openqa_build'])
                    s.update(settings)
                    posts.append(s)
        return posts

    def incident_openqa_jobs(self, s):
        return self.openqa.openqa_request(
            'GET', 'jobs', {
                'distri': s['DISTRI'],
                'version': s['VERSION'],
                'arch': s['ARCH'],
                'flavor': s['FLAVOR'],
                'build': s['BUILD'],
                'scope': 'relevant',
                'latest': '1'
            })['jobs']

    def check_suse_incidents(self):
        for inc in requests.get(
                'https://maintenance.suse.de/api/incident/active/').json():
            # if not inc in ['5219']: continue
            # if not inc.startswith('52'): continue
            print inc
            # continue
            job = requests.get('https://maintenance.suse.de/api/incident/' +
                               inc).json()
            if job['meta']['state'] in ['final', 'gone']:
                continue
            # required in job: project, id, channels
            self.test_job(job['base'])

    def test_job(self, job):
        incident_project = str(job['project'])
        comment_info = self.find_obs_request_comment(
            project_name=incident_project)
        comment_id = comment_info.get('id', None)
        comment_build = str(comment_info.get('revision', ''))

        openqa_posts = []
        for prod in API_MAP.keys():
            openqa_posts += self.check_product(job, prod)
        openqa_jobs = []
        for s in openqa_posts:
            jobs = self.incident_openqa_jobs(s)
            # take the project comment as marker for not posting jobs
            if not len(jobs) and comment_build != str(job['openqa_build']):
                if self.dryrun:
                    print 'WOULD POST', json.dumps(s, sort_keys=True)
                else:
                    ret = self.openqa.openqa_request('POST',
                                                     'isos',
                                                     data=s,
                                                     retries=1)
                    openqa_jobs += self.incident_openqa_jobs(s)
            else:
                print s, 'got', len(jobs)
                openqa_jobs += jobs
        self.openqa_jobs[incident_project] = openqa_jobs
        if len(openqa_jobs) == 0:
            self.logger.debug("No openqa jobs defined")
            return
        # print openqa_jobs
        msg = self.summarize_openqa_jobs(openqa_jobs)
        state = 'seen'
        result = 'none'
        qa_status = self.calculate_qa_status(openqa_jobs)
        if qa_status == QA_PASSED:
            result = 'accepted'
            state = 'done'
        if qa_status == QA_FAILED:
            result = 'declined'
            state = 'done'
        comment = "<!-- openqa state=%s result=%s revision=%s -->\n" % (
            state, result, job.get('openqa_build'))
        comment += "\nCC @coolo\n" + msg

        if comment_id and state != 'done':
            self.logger.debug("%s is already commented, wait until done",
                              incident_project)
            return
        if comment_info.get('comment', '') == comment:
            self.logger.debug("%s comment did not change", incident_project)
            return

        self.logger.debug("adding comment to %s, state %s", incident_project,
                          state)
        #self.logger.debug("message: %s", msg)
        if not self.dryrun:
            if comment_id is not None:
                self.commentapi.delete(comment_id)
            self.commentapi.add_comment(project_name=str(incident_project),
                                        comment=str(comment))
예제 #54
0
 def __init__(self, api):
     self.api = api
     self.comment = CommentAPI(api.apiurl)
예제 #55
0
class OpenQAReport(object):
    def __init__(self, api):
        self.api = api
        self.comment = CommentAPI(api.apiurl)

    def _package_url(self, package):
        link = "https://build.opensuse.org/package/live_build_log/%s/%s/%s/%s"
        link = link % (package["project"], package["package"], package["repository"], package["arch"])
        text = "[%s](%s)" % (package["arch"], link)
        return text

    def _openQA_url(self, job):
        test_name = job["name"].split("-")[-1]
        link = "%s/tests/%s" % (self.api.copenqa, job["id"])
        text = "[%s](%s)" % (test_name, link)
        return text

    def _openQA_module_url(self, job, module):
        link = "%s/tests/%s/modules/%s/steps/1" % (self.api.copenqa, job["id"], module["name"])
        text = "[%s](%s)" % (module["name"], link)
        return text

    def old_enough(self, _date):
        time_delta = datetime.utcnow() - _date
        safe_margin = timedelta(hours=MARGIN_HOURS)
        return safe_margin <= time_delta

    def get_info(self, project):
        _prefix = "{}:".format(self.api.cstaging)
        if project.startswith(_prefix):
            project = project.replace(_prefix, "")

        query = {"format": "json"}
        url = self.api.makeurl(("project", "staging_projects", self.api.project, project), query=query)
        info = json.load(self.api.retried_GET(url))
        return info

    def get_broken_package_status(self, info):
        status = info["broken_packages"]
        subproject = info["subproject"]
        if subproject:
            status.extend(subproject["broken_packages"])
        return status

    def get_openQA_status(self, info):
        status = info["openqa_jobs"]
        subproject = info["subproject"]
        if subproject:
            status.extend(subproject["openqa_jobs"])
        return status

    def is_there_openqa_comment(self, project):
        """Return True if there is a previous comment."""
        signature = "<!-- openQA status -->"
        comments = self.comment.get_comments(project_name=project)
        comment = [c for c in comments.values() if signature in c["comment"]]
        return len(comment) > 0

    def update_status_comment(self, project, report, force=False):
        signature = "<!-- openQA status -->"
        report = "%s\n%s" % (signature, str(report))

        write_comment = False

        comments = self.comment.get_comments(project_name=project)
        comment = [c for c in comments.values() if signature in c["comment"]]
        if comment and len(comment) > 1:
            print "ERROR. There are more than one openQA status comment in %s" % project
            # for c in comment:
            #     self.comment.delete(c['id'])
            # write_comment = True
        elif comment and comment[0]["comment"] != report and self.old_enough(comment[0]["when"]):
            self.comment.delete(comment[0]["id"])
            write_comment = True
        elif not comment:
            write_comment = True

        if write_comment or force:
            if osc.conf.config["debug"]:
                print "Updating comment"
            self.comment.add_comment(project_name=project, comment=report)

    def _report_broken_packages(self, info):
        broken_package_status = self.get_broken_package_status(info)

        # Group packages by name
        groups = defaultdict(list)
        for package in broken_package_status:
            groups[package["package"]].append(package)

        failing_lines = [
            "* Build failed %s (%s)" % (key, ", ".join(self._package_url(p) for p in value))
            for key, value in groups.iteritems()
        ]

        report = "\n".join(failing_lines[:MAX_LINES])
        if len(failing_lines) > MAX_LINES:
            report += "* and more (%s) ..." % (len(failing_lines) - MAX_LINES)
        return report

    def _report_openQA(self, info):
        failing_lines, green_lines = [], []

        openQA_status = self.get_openQA_status(info)
        for job in openQA_status:
            test_name = job["name"].split("-")[-1]
            fails = [
                "  * %s (%s)" % (test_name, self._openQA_module_url(job, module))
                for module in job["modules"]
                if module["result"] == "failed"
            ]

            if fails:
                failing_lines.extend(fails)
            else:
                green_lines.append(self._openQA_url(job))

        failing_report, green_report = "", ""
        if failing_lines:
            failing_report = "* Failing openQA tests:\n" + "\n".join(failing_lines[:MAX_LINES])
            if len(failing_lines) > MAX_LINES:
                failing_report += "\n  * and more (%s) ..." % (len(failing_lines) - MAX_LINES)
        if green_lines:
            green_report = "* Succeeding tests:" + ", ".join(green_lines[:MAX_LINES])
            if len(green_lines) > MAX_LINES:
                green_report += ", and more (%s) ..." % (len(green_lines) - MAX_LINES)

        return "\n".join((failing_report, green_report)), bool(failing_lines)

    def report(self, project):
        info = self.get_info(project)

        # Some staging projects do not have info like
        # openSUSE:Factory:Staging:Gcc49
        if not info:
            return

        if info["overall_state"] == "empty":
            return

        # The 'unacceptable' status means that the project will be
        # replaced soon. Better do not disturb with noise.
        if info["overall_state"] == "unacceptable":
            return

        report_broken_packages = self._report_broken_packages(info)
        report_openQA, some_openqa_fail = self._report_openQA(info)

        if report_broken_packages or some_openqa_fail:
            report = "\n\n".join((report_broken_packages, report_openQA))
            report = report.strip()
            if report:
                if osc.conf.config["debug"]:
                    print project
                    print "-" * len(project)
                    print report
                self.update_status_comment(project, report)
            elif not info["overall_state"] == "acceptable" and self.is_there_openqa_comment(project):
                report = "Congratulations! All fine now."
                if osc.conf.config["debug"]:
                    print project
                    print "-" * len(project)
                    print report
                self.update_status_comment(project, report, force=True)
class InstallChecker(object):
    def __init__(self, api, config):
        self.api = api
        self.config = conf.config[api.project]
        self.logger = logging.getLogger('InstallChecker')
        self.commentapi = CommentAPI(api.apiurl)

        self.arch_whitelist = self.config.get('repo_checker-arch-whitelist')
        if self.arch_whitelist:
            self.arch_whitelist = set(self.arch_whitelist.split(' '))

        self.ring_whitelist = set(self.config.get('repo_checker-binary-whitelist-ring', '').split(' '))

        self.cycle_packages = self.config.get('repo_checker-allowed-in-cycles')
        self.calculate_allowed_cycles()

        self.existing_problems = self.binary_list_existing_problem(api.project, api.cmain_repo)
        self.ignore_duplicated = set(self.config.get('installcheck-ignore-duplicated-binaries', '').split(' '))

    def check_required_by(self, fileinfo, provides, requiredby, built_binaries, comments):
        if requiredby.get('name') in built_binaries:
            return True
        # extract >= and the like
        provide = provides.get('dep')
        provide = provide.split(' ')[0]
        comments.append('{} provides {} required by {}'.format(fileinfo.find('name').text, provide, requiredby.get('name')))
        url = api.makeurl(['build', api.project, api.cmain_repo, 'x86_64', '_repository', requiredby.get('name') + '.rpm'],
                      {'view': 'fileinfo_ext'})
        reverse_fileinfo = ET.parse(osc.core.http_GET(url)).getroot()
        for require in reverse_fileinfo.findall('requires_ext'):
            # extract >= and the like here too
            dep = require.get('dep').split(' ')[0]
            if dep != provide:
                continue
            for provided_by in require.findall('providedby'):
                if provided_by.get('name') in built_binaries:
                    continue
                comments.append('  also provided by {} -> ignoring'.format(provided_by.get('name')))
                return True
        comments.append('Error: missing alternative provides for {}'.format(provide))
        return False

    def check_delete_request(self, req, to_ignore, comments):
        package = req['package']
        if package in to_ignore:
            self.logger.info('Delete request for package {} ignored'.format(package))
            return True

        built_binaries = set([])
        file_infos = []
        for fileinfo in fileinfo_ext_all(self.api.apiurl, self.api.project, self.api.cmain_repo, 'x86_64', package):
            built_binaries.add(fileinfo.find('name').text)
            file_infos.append(fileinfo)

        result = True
        for fileinfo in file_infos:
            for provides in fileinfo.findall('provides_ext'):
                for requiredby in provides.findall('requiredby[@name]'):
                    result = result and self.check_required_by(fileinfo, provides, requiredby, built_binaries, comments)

        what_depends_on = depends_on(api.apiurl, api.project, api.cmain_repo, [package], True)

        # filter out dependency on package itself (happens with eg
        # java bootstrapping itself with previous build)
        if package in what_depends_on:
            what_depends_on.remove(package)

        if len(what_depends_on):
            comments.append('{} is still a build requirement of:\n\n- {}'.format(
                package, '\n- '.join(sorted(what_depends_on))))
            return False

        return result

    def packages_to_ignore(self, project):
        comments = self.commentapi.get_comments(project_name=project)
        ignore_re = re.compile(r'^installcheck: ignore (?P<args>.*)$', re.MULTILINE)

        # the last wins, for now we don't care who said it
        args = []
        for comment in comments.values():
            match = ignore_re.search(comment['comment'].replace('\r', ''))
            if not match:
                continue
            args = match.group('args').strip()
            # allow space and comma to seperate
            args = args.replace(',', ' ').split(' ')
        return args

    def staging(self, project, force=False):
        api = self.api

        repository = self.api.cmain_repo

        # fetch the build ids at the beginning - mirroring takes a while
        buildids = {}
        try:
            architectures = self.target_archs(project, repository)
        except HTTPError as e:
            if e.code == 404:
                # adi disappear all the time, so don't worry
                return False
            raise e

        all_done = True
        for arch in architectures:
            pra = '{}/{}/{}'.format(project, repository, arch)
            buildid = self.buildid(project, repository, arch)
            if not buildid:
                self.logger.error('No build ID in {}'.format(pra))
                return False
            buildids[arch] = buildid
            url = self.report_url(project, repository, arch, buildid)
            try:
                root = ET.parse(osc.core.http_GET(url)).getroot()
                check = root.find('check[@name="installcheck"]/state')
                if check is not None and check.text != 'pending':
                    self.logger.info('{} already "{}", ignoring'.format(pra, check.text))
                else:
                    all_done = False
            except HTTPError:
                self.logger.info('{} has no status report'.format(pra))
                all_done = False

        if all_done and not force:
            return True

        repository_pairs = repository_path_expand(api.apiurl, project, repository)
        staging_pair = [project, repository]

        result = True

        status = api.project_status(project)
        if not status:
            self.logger.error('no project status for {}'.format(project))
            return False

        result_comment = []

        to_ignore = self.packages_to_ignore(project)
        meta = api.load_prj_pseudometa(status['description'])
        for req in meta['requests']:
            if req['type'] == 'delete':
                result = result and self.check_delete_request(req, to_ignore, result_comment)

        for arch in architectures:
            # hit the first repository in the target project (if existant)
            target_pair = None
            directories = []
            for pair_project, pair_repository in repository_pairs:
                # ignore repositories only inherited for config
                if repository_arch_state(self.api.apiurl, pair_project, pair_repository, arch):
                    if not target_pair and pair_project == api.project:
                        target_pair = [pair_project, pair_repository]

                    directories.append(self.mirror(pair_project, pair_repository, arch))

            if not api.is_adi_project(project):
                # For "leaky" ring packages in letter stagings, where the
                # repository setup does not include the target project, that are
                # not intended to to have all run-time dependencies satisfied.
                whitelist = self.ring_whitelist
            else:
                whitelist = self.existing_problems

            whitelist |= set(to_ignore)

            check = self.cycle_check(project, repository, arch)
            if not check.success:
                self.logger.warning('Cycle check failed')
                result_comment.append(check.comment)
                result = False

            check = self.install_check(target_pair, arch, directories, None, whitelist)
            if not check.success:
                self.logger.warning('Install check failed')
                result_comment.append(check.comment)
                result = False

        duplicates = duplicated_binaries_in_repo(self.api.apiurl, project, repository)
        # remove white listed duplicates
        for arch in list(duplicates):
            for binary in self.ignore_duplicated:
                duplicates[arch].pop(binary, None)
            if not len(duplicates[arch]):
                del duplicates[arch]
        if len(duplicates):
            self.logger.warning('Found duplicated binaries')
            result_comment.append(yaml.dump(duplicates, default_flow_style=False))
            result = False

        if result:
            self.report_state('success', self.gocd_url(), project, repository, buildids)
        else:
            result_comment.insert(0, 'Generated from {}\n'.format(self.gocd_url()))
            self.report_state('failure', self.upload_failure(project, result_comment), project, repository, buildids)
            self.logger.warning('Not accepting {}'.format(project))
            return False

        return result

    def upload_failure(self, project, comment):
        print(project, '\n'.join(comment))
        url = self.api.makeurl(['source', 'home:repo-checker', 'reports', project])
        osc.core.http_PUT(url, data='\n'.join(comment))

        url = self.api.apiurl.replace('api.', 'build.')
        return '{}/package/view_file/home:repo-checker/reports/{}'.format(url, project)

    def report_state(self, state, report_url, project, repository, buildids):
        architectures = self.target_archs(project, repository)
        for arch in architectures:
            self.report_pipeline(state, report_url, project, repository, arch, buildids[arch], arch == architectures[-1])

    def gocd_url(self):
        if not os.environ.get('GO_SERVER_URL'):
            # placeholder :)
            return 'http://stephan.kulow.org/'
        report_url = os.environ.get('GO_SERVER_URL').replace(':8154', '')
        return report_url + '/tab/build/detail/{}/{}/{}/{}/{}#tab-console'.format(os.environ.get('GO_PIPELINE_NAME'),
                            os.environ.get('GO_PIPELINE_COUNTER'),
                            os.environ.get('GO_STAGE_NAME'),
                            os.environ.get('GO_STAGE_COUNTER'),
                            os.environ.get('GO_JOB_NAME'))

    def buildid(self, project, repository, architecture):
        url = self.api.makeurl(['build', project, repository, architecture], {'view': 'status'})
        root = ET.parse(osc.core.http_GET(url)).getroot()
        buildid = root.find('buildid')
        if buildid is None:
            return False
        return buildid.text

    def report_url(self, project, repository, architecture, buildid):
        return self.api.makeurl(['status_reports', 'built', project,
                                repository, architecture, 'reports', buildid])

    def report_pipeline(self, state, report_url, project, repository, architecture, buildid, is_last):
        url = self.report_url(project, repository, architecture, buildid)
        name = 'installcheck'
        # this is a little bit ugly, but we don't need 2 failures. So save a success for the
        # other archs to mark them as visited - pending we put in both
        if not is_last:
            if state == 'failure':
                state = 'success'

        xml = self.check_xml(report_url, state, name)
        try:
            osc.core.http_POST(url, data=xml)
        except HTTPError:
            print('failed to post status to ' + url)
            sys.exit(1)

    def check_xml(self, url, state, name):
        check = ET.Element('check')
        if url:
            se = ET.SubElement(check, 'url')
            se.text = url
        se = ET.SubElement(check, 'state')
        se.text = state
        se = ET.SubElement(check, 'name')
        se.text = name
        return ET.tostring(check)

    def target_archs(self, project, repository):
        archs = target_archs(self.api.apiurl, project, repository)

        # Check for arch whitelist and use intersection.
        if self.arch_whitelist:
            archs = list(self.arch_whitelist.intersection(set(archs)))

        # Trick to prioritize x86_64.
        return sorted(archs, reverse=True)

    @memoize(ttl=60, session=True, add_invalidate=True)
    def mirror(self, project, repository, arch):
        """Call bs_mirrorfull script to mirror packages."""
        directory = os.path.join(CACHEDIR, project, repository, arch)
        if not os.path.exists(directory):
            os.makedirs(directory)

        script = os.path.join(SCRIPT_PATH, 'bs_mirrorfull')
        path = '/'.join((project, repository, arch))
        url = '{}/public/build/{}'.format(self.api.apiurl, path)
        parts = ['LC_ALL=C', 'perl', script, '--nodebug', url, directory]
        parts = [pipes.quote(part) for part in parts]

        self.logger.info('mirroring {}'.format(path))
        if os.system(' '.join(parts)):
            raise Exception('failed to mirror {}'.format(path))

        return directory

    @memoize(session=True)
    def binary_list_existing_problem(self, project, repository):
        """Determine which binaries are mentioned in repo_checker output."""
        binaries = set()

        filename = self.project_pseudometa_file_name(project, repository)
        content = project_pseudometa_file_load(self.api.apiurl, project, filename)
        if not content:
            self.logger.warning('no project_only run from which to extract existing problems')
            return binaries

        sections = self.install_check_parse(content)
        for section in sections:
            for binary in section.binaries:
                match = re.match(BINARY_REGEX, binary)
                if match:
                    binaries.add(match.group('name'))

        return binaries

    def install_check(self, target_project_pair, arch, directories,
                      ignore=None, whitelist=[], parse=False, no_filter=False):
        self.logger.info('install check: start (ignore:{}, whitelist:{}, parse:{}, no_filter:{})'.format(
            bool(ignore), len(whitelist), parse, no_filter))

        with tempfile.NamedTemporaryFile() as ignore_file:
            # Print ignored rpms on separate lines in ignore file.
            if ignore:
                for item in ignore:
                    ignore_file.write(item + '\n')
                ignore_file.flush()

            # Invoke repo_checker.pl to perform an install check.
            script = os.path.join(SCRIPT_PATH, 'repo_checker.pl')
            parts = ['LC_ALL=C', 'perl', script, arch, ','.join(directories),
                     '-f', ignore_file.name, '-w', ','.join(whitelist)]
            if no_filter:
                parts.append('--no-filter')

            parts = [pipes.quote(part) for part in parts]
            p = subprocess.Popen(' '.join(parts), shell=True,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE, close_fds=True)
            stdout, stderr = p.communicate()

        if p.returncode:
            self.logger.info('install check: failed')
            if p.returncode == 126:
                self.logger.warning('mirror cache reset due to corruption')
                self._invalidate_all()
            elif parse:
                # Parse output for later consumption for posting comments.
                sections = self.install_check_parse(stdout)
                self.install_check_sections_group(
                    target_project_pair[0], target_project_pair[1], arch, sections)

            # Format output as markdown comment.
            parts = []

            stdout = stdout.decode('utf-8').strip()
            if stdout:
                parts.append(stdout + '\n')
            stderr = stderr.strip()
            if stderr:
                parts.append(stderr + '\n')

            header = '### [install check & file conflicts for {}]'.format(arch)
            return CheckResult(False, header + '\n\n' + ('\n' + ('-' * 80) + '\n\n').join(parts))

        self.logger.info('install check: passed')
        return CheckResult(True, None)

    def install_check_sections_group(self, project, repository, arch, sections):
        _, binary_map = package_binary_list(self.api.apiurl, project, repository, arch)

        for section in sections:
            # If switch to creating bugs likely makes sense to join packages to
            # form grouping key and create shared bugs for conflicts.
            # Added check for b in binary_map after encountering:
            # https://lists.opensuse.org/opensuse-buildservice/2017-08/msg00035.html
            # Under normal circumstances this should never occur.
            packages = set([binary_map[b] for b in section.binaries if b in binary_map])
            for package in packages:
                self.package_results.setdefault(package, [])
                self.package_results[package].append(section)

    def install_check_parse(self, output):
        section = None
        text = None

        # Loop over lines and parse into chunks assigned to binaries.
        for line in output.splitlines(True):
            if line.startswith(' '):
                if section:
                    text += line
            else:
                if section:
                    yield InstallSection(section, text)

                match = re.match(INSTALL_REGEX, line)
                if match:
                    # Remove empty groups since regex matches different patterns.
                    binaries = [b for b in match.groups() if b is not None]
                    section = binaries
                    text = line
                else:
                    section = None

        if section:
            yield InstallSection(section, text)

    def calculate_allowed_cycles(self):
        self.allowed_cycles = []
        if self.cycle_packages:
            for comma_list in self.cycle_packages.split(';'):
                self.allowed_cycles.append(comma_list.split(','))

    def cycle_check(self, project, repository, arch):
        self.logger.info('cycle check: start %s/%s/%s' % (project, repository, arch))
        comment = []

        depinfo = builddepinfo(self.api.apiurl, project, repository, arch, order = False)
        for cycle in depinfo.findall('cycle'):
            for package in cycle.findall('package'):
                package = package.text
                allowed = False
                for acycle in self.allowed_cycles:
                    if package in acycle:
                        allowed = True
                        break
                if not allowed:
                    cycled = [p.text for p in cycle.findall('package')]
                    comment.append('Package {} appears in cycle {}'.format(package, '/'.join(cycled)))

        if len(comment):
            # New cycles, post comment.
            self.logger.info('cycle check: failed')
            return CheckResult(False, '\n'.join(comment) + '\n')

        self.logger.info('cycle check: passed')
        return CheckResult(True, None)

    def project_pseudometa_file_name(self, project, repository):
        filename = 'repo_checker'

        main_repo = Config.get(self.api.apiurl, project).get('main-repo')
        if not main_repo:
            filename += '.' + repository

        return filename
class StagingReport(object):
    def __init__(self, api):
        self.api = api
        self.comment = CommentAPI(api.apiurl)

    def _package_url(self, package):
        link = '/package/live_build_log/%s/%s/%s/%s'
        link = link % (package['project'],
                       package['package'],
                       package['repository'],
                       package['arch'])
        text = '[%s](%s)' % (package['arch'], link)
        return text

    def old_enough(self, _date):
        time_delta = datetime.utcnow() - _date
        safe_margin = timedelta(hours=MARGIN_HOURS)
        return safe_margin <= time_delta

    def update_status_comment(self, project, report, force=False, only_replace=False):
        report = self.comment.add_marker(report, MARKER)
        comments = self.comment.get_comments(project_name=project)
        comment, _ = self.comment.comment_find(comments, MARKER)
        if comment:
            write_comment = (report != comment['comment'] and self.old_enough(comment['when']))
        else:
            write_comment = not only_replace

        if write_comment or force:
            if osc.conf.config['debug']:
                print('Updating comment')
            if comment:
                self.comment.delete(comment['id'])
            self.comment.add_comment(project_name=project, comment=report)

    def _report_broken_packages(self, info):
        broken_package_status = info['broken_packages']

        # Group packages by name
        groups = defaultdict(list)
        for package in broken_package_status:
            groups[package['package']].append(package)

        failing_lines = [
            '* Build failed %s (%s)' % (key, ', '.join(self._package_url(p) for p in value))
            for key, value in groups.iteritems()
        ]

        report = '\n'.join(failing_lines[:MAX_LINES])
        if len(failing_lines) > MAX_LINES:
            report += '* and more (%s) ...' % (len(failing_lines) - MAX_LINES)
        return report

    def report_checks(self, info):
        failing_lines, green_lines = [], []

        links_state = {}
        for check in info['checks']:
            links_state.setdefault(check['state'], [])
            links_state[check['state']].append('[{}]({})'.format(check['name'], check['url']))

        lines = []
        failure = False
        for state, links in links_state.items():
            if len(links) > MAX_LINES:
                extra = len(links) - MAX_LINES
                links = links[:MAX_LINES]
                links.append('and {} more...'.format(extra))

            lines.append('- {}'.format(state))
            if state != 'success':
                lines.extend(['  - {}'.format(link) for link in links])
                failure = True
            else:
                lines[-1] += ': {}'.format(', '.join(links))

        return '\n'.join(lines).strip(), failure

    def report(self, project, aggregate=True, force=False):
        info = self.api.project_status(project, aggregate)

        # Do not attempt to process projects without staging info, or projects
        # in a pending state that will change before settling. This avoids
        # intermediate notifications that may end up being spammy and for
        # long-lived stagings where checks may be re-triggered multiple times
        # and thus enter pending state (not seen on first run) which is not
        # useful to report.
        if not info or not self.api.project_status_final(info):
            return

        report_broken_packages = self._report_broken_packages(info)
        report_checks, check_failure = self.report_checks(info)

        if report_broken_packages or check_failure:
            if report_broken_packages:
                report_broken_packages = 'Broken:\n\n' + report_broken_packages
            if report_checks:
                report_checks = 'Checks:\n\n' + report_checks
            report = '\n\n'.join((report_broken_packages, report_checks))
            report = report.strip()
            only_replace = False
        else:
            report = 'Congratulations! All fine now.'
            only_replace = True

        self.update_status_comment(project, report, force=force, only_replace=only_replace)

        if osc.conf.config['debug']:
            print(project)
            print('-' * len(project))
            print(report)
예제 #58
0
class AcceptCommand(object):
    def __init__(self, api):
        self.api = api
        self.comment = CommentAPI(self.api.apiurl)

    def find_new_requests(self, project):
        query = "match=state/@name='new'+and+(action/target/@project='{}'+and+action/@type='submit')".format(project)
        url = self.api.makeurl(['search', 'request'], query)

        f = http_GET(url)
        root = ET.parse(f).getroot()

        rqs = []
        for rq in root.findall('request'):
            pkgs = []
            actions = rq.findall('action')
            for action in actions:
                targets = action.findall('target')
                for t in targets:
                    pkgs.append(str(t.get('package')))

            rqs.append({'id': int(rq.get('id')), 'packages': pkgs})
        return rqs

    def perform(self, project):
        """
        Accept the staging LETTER for review and submit to factory
        Then disable the build to disabled
        :param project: staging project we are working with
        """

        status = self.api.check_project_status(project)

        if not status:
            print('The project "{}" is not yet acceptable.'.format(project))
            return False

        meta = self.api.get_prj_pseudometa(project)
        requests = []
        packages = []
        for req in meta['requests']:
            self.api.rm_from_prj(project, request_id=req['id'], msg='ready to accept')
            requests.append(req['id'])
            packages.append(req['package'])
            msg = 'Accepting staging review for {}'.format(req['package'])
            print(msg)

        for req in requests:
            change_request_state(self.api.apiurl, str(req), 'accepted', message='Accept to factory')

        # A single comment should be enough to notify everybody, since they are
        # already mentioned in the comments created by select/unselect
        pkg_list = ", ".join(packages)
        cmmt = 'Project "{}" accepted. The following packages have been submitted to factory: {}.'.format(project, pkg_list)
        self.comment.add_comment(project_name=project, comment=cmmt)

        # XXX CAUTION - AFAIK the 'accept' command is expected to clean the messages here.
        self.comment.delete_from(project_name=project)

        self.api.build_switch_prj(project, 'disable')
        if self.api.project_exists(project + ':DVD'):
            self.api.build_switch_prj(project + ':DVD', 'disable')

        return True

    def accept_other_new(self):
        changed = False
        for req in self.find_new_requests('openSUSE:{}'.format(self.api.opensuse)):
            print 'Accepting request %d: %s' % (req['id'], ','.join(req['packages']))
            change_request_state(self.api.apiurl, str(req['id']), 'accepted', message='Accept to factory')
            changed = True

        return changed

    def update_factory_version(self):
        """Update openSUSE (Factory, 13.2, ...)  version if is necessary."""
        project = 'openSUSE:{}'.format(self.api.opensuse)
        url = self.api.makeurl(['source', project, '_product', 'openSUSE.product'])

        product = http_GET(url).read()
        curr_version = date.today().strftime('%Y%m%d')
        new_product = re.sub(r'<version>\d{8}</version>', '<version>%s</version>' % curr_version, product)

        if product != new_product:
            http_PUT(url + '?comment=Update+version', data=new_product)