def remind_comment(apiurl, repeat_age, request_id, project, package=None): comment_api = CommentAPI(apiurl) comments = comment_api.get_comments(request_id=request_id) comment, _ = comment_api.comment_find(comments, BOT_NAME) if comment: delta = datetime.utcnow() - comment['when'] if delta.days < repeat_age: print( ' skipping due to previous reminder from {} days ago'.format( delta.days)) return # Repeat notification so remove old comment. try: comment_api.delete(comment['id']) except HTTPError as e: if e.code == 403: # Gracefully skip when previous reminder was by another user. print(' unable to remove previous reminder') return raise e userids = sorted(maintainers_get(apiurl, project, package)) if len(userids): users = ['@' + userid for userid in userids] message = '{}: {}'.format(', '.join(users), REMINDER) else: message = REMINDER print(' ' + message) message = comment_api.add_marker(message, BOT_NAME) comment_api.add_comment(request_id=request_id, comment=message)
def remind_comment(apiurl, repeat_age, request_id, project, package=None): comment_api = CommentAPI(apiurl) comments = comment_api.get_comments(request_id=request_id) comment, _ = comment_api.comment_find(comments, BOT_NAME) if comment: delta = datetime.utcnow() - comment['when'] if delta.days < repeat_age: print(' skipping due to previous reminder from {} days ago'.format(delta.days)) return # Repeat notification so remove old comment. try: comment_api.delete(comment['id']) except HTTPError as e: if e.code == 403: # Gracefully skip when previous reminder was by another user. print(' unable to remove previous reminder') return raise e userids = sorted(maintainers_get(apiurl, project, package)) if len(userids): users = ['@' + userid for userid in userids] message = '{}: {}'.format(', '.join(users), REMINDER) else: message = REMINDER print(' ' + message) message = comment_api.add_marker(message, BOT_NAME) comment_api.add_comment(request_id=request_id, comment=message)
def create_comments(self, state): comments = dict() for source, details in state['check'].items(): rebuild = dateutil.parser.parse(details["rebuild"]) if datetime.now() - rebuild < timedelta(days=2): self.logger.debug(f"Ignore {source} - problem too recent") continue _, _, arch, rpm = source.split('/') rpm = rpm.split(':')[0] comments.setdefault(rpm, {}) comments[rpm][arch] = details['problem'] url = makeurl(self.apiurl, ['comments', 'user']) root = ET.parse(http_GET(url)).getroot() for comment in root.findall('.//comment'): if comment.get('project') != self.project: continue if comment.get('package') in comments: continue self.logger.info("Removing comment for package {}".format( comment.get('package'))) url = makeurl(self.apiurl, ['comment', comment.get('id')]) http_DELETE(url) commentapi = CommentAPI(self.apiurl) MARKER = 'Installcheck' for package in comments: newcomment = '' for arch in sorted(comments[package]): newcomment += f"\n\n**Installcheck problems for {arch}**\n\n" for problem in sorted(comments[package][arch]): newcomment += "+ " + problem + "\n" newcomment = commentapi.add_marker(newcomment.strip(), MARKER) oldcomments = commentapi.get_comments(project_name=self.project, package_name=package) oldcomment, _ = commentapi.comment_find(oldcomments, MARKER) if oldcomment and oldcomment['comment'] == newcomment: continue if oldcomment: commentapi.delete(oldcomment['id']) self.logger.debug("Adding comment to {}/{}".format( self.project, package)) commentapi.add_comment(project_name=self.project, package_name=package, comment=newcomment)
class TestComment(unittest.TestCase): def setUp(self): self.api = CommentAPI('bogus') self.bot = type(self).__name__ self.comments = { 1: { 'comment': '<!-- {} -->\n\nshort comment'.format(self.bot) }, 2: { 'comment': '<!-- {} foo=bar distro=openSUSE -->\n\nshort comment'.format( self.bot) } } def test_truncate(self): comment = 'string of text' for i in range(len(comment) + 1): truncated = self.api.truncate(comment, length=i) print(truncated) self.assertEqual(len(truncated), i) def test_truncate_pre(self): comment = """ Some text. <pre> bar mar car </pre> ## section 2 <pre> more lines than you can handle </pre> """.strip() for i in range(len(comment) + len('...\n</pre>')): truncated = self.api.truncate(comment, length=i) print('=' * 80) print(truncated) self.assertTrue( len(truncated) <= i, '{} <= {}'.format(len(truncated), i)) self.assertEqual(truncated.count('<pre>'), truncated.count('</pre>')) self.assertFalse(len(re.findall(r'</?\w+[^\w>]', truncated))) tag_count = truncated.count('<pre>') + truncated.count('</pre>') self.assertEqual(tag_count, truncated.count('<')) self.assertEqual(tag_count, truncated.count('>')) def test_add_marker(self): comment_marked = self.api.add_marker(COMMENT, self.bot) self.assertEqual(comment_marked, self.comments[1]['comment']) comment_marked = self.api.add_marker(COMMENT, self.bot, COMMENT_INFO) self.assertEqual(comment_marked, self.comments[2]['comment']) def test_remove_marker(self): comment = self.api.remove_marker(COMMENT) self.assertEqual(comment, COMMENT) comment = self.api.remove_marker(self.comments[1]['comment']) self.assertEqual(comment, COMMENT) comment = self.api.remove_marker(self.comments[2]['comment']) self.assertEqual(comment, COMMENT) def test_comment_find(self): comment, info = self.api.comment_find(self.comments, self.bot) self.assertEqual(comment, self.comments[1]) comment, info = self.api.comment_find(self.comments, self.bot, COMMENT_INFO) self.assertEqual(comment, self.comments[2]) self.assertEqual(info, COMMENT_INFO) info_partial = dict(COMMENT_INFO) del info_partial['foo'] comment, info = self.api.comment_find(self.comments, self.bot, info_partial) self.assertEqual(comment, self.comments[2]) self.assertEqual(info, COMMENT_INFO)
class TestCommentOBS(OBSLocal.TestCase): def setUp(self): super(TestCommentOBS, self).setUp() self.wf = OBSLocal.FactoryWorkflow() self.wf.create_user('factory-auto') self.wf.create_user('repo-checker') self.wf.create_user('staging-bot') self.wf.create_group('factory-staging', ['staging-bot']) self.wf.create_project(PROJECT, maintainer={'groups': ['factory-staging']}) self.api = CommentAPI(self.apiurl) # Ensure different test runs operate in unique namespace. self.bot = '::'.join([type(self).__name__, str(random.getrandbits(8))]) def tearDown(self): self.osc_user('Admin') del self.wf def test_basic(self): self.osc_user('staging-bot') self.assertFalse(self.comments_filtered(self.bot)[0]) self.assertTrue( self.api.add_comment(project_name=PROJECT, comment=self.api.add_marker( COMMENT, self.bot))) comment, _ = self.comments_filtered(self.bot) self.assertTrue(comment) self.assertTrue(self.api.delete(comment['id'])) self.assertFalse(self.comments_filtered(self.bot)[0]) def test_delete_nested(self): self.osc_user('staging-bot') comment_marked = self.api.add_marker(COMMENT, self.bot) # Allow for existing comments by basing assertion on delta from initial count. comment_count = len(self.api.get_comments(project_name=PROJECT)) self.assertFalse(self.comments_filtered(self.bot)[0]) self.assertTrue( self.api.add_comment(project_name=PROJECT, comment=comment_marked)) comment, _ = self.comments_filtered(self.bot) self.assertTrue(comment) for i in range(0, 3): self.assertTrue( self.api.add_comment(project_name=PROJECT, comment=comment_marked, parent_id=comment['id'])) comments = self.api.get_comments(project_name=PROJECT) parented_count = 0 for comment in comments.values(): if comment['parent']: parented_count += 1 self.assertEqual(parented_count, 3) self.assertTrue(len(comments) == comment_count + 4) self.api.delete_from(project_name=PROJECT) self.assertFalse(len(self.api.get_comments(project_name=PROJECT))) def test_delete_batch(self): users = ['factory-auto', 'repo-checker', 'staging-bot'] for user in users: self.osc_user(user) print('logged in as ', user) bot = '::'.join([self.bot, user]) comment = self.api.add_marker(COMMENT, bot) self.assertFalse(self.comments_filtered(bot)[0]) self.assertTrue( self.api.add_comment(project_name=PROJECT, comment=comment)) self.assertTrue(self.comments_filtered(bot)[0]) # Allow for existing comments by basing assertion on delta from initial count. comment_count = len(self.api.get_comments(project_name=PROJECT)) self.assertTrue(comment_count >= len(users)) self.api.delete_from_where_user(users[0], project_name=PROJECT) self.assertTrue( len(self.api.get_comments(project_name=PROJECT)) == comment_count - 1) self.api.delete_from(project_name=PROJECT) self.assertFalse(len(self.api.get_comments(project_name=PROJECT))) def comments_filtered(self, bot): comments = self.api.get_comments(project_name=PROJECT) return self.api.comment_find(comments, bot)
class PkglistComments(object): """Handling staging comments of diffs""" def __init__(self, apiurl): self.apiurl = apiurl self.comment = CommentAPI(apiurl) def read_summary_file(self, file): ret = dict() with open(file, 'r') as f: for line in f: pkg, group = line.strip().split(':') ret.setdefault(pkg, []) ret[pkg].append(group) return ret def write_summary_file(self, file, content): output = [] for pkg in sorted(content): for group in sorted(content[pkg]): output.append(f"{pkg}:{group}") with open(file, 'w') as f: for line in sorted(output): f.write(line + '\n') def calculcate_package_diff(self, old_file, new_file): old_file = self.read_summary_file(old_file) new_file = self.read_summary_file(new_file) # remove common part keys = list(old_file.keys()) for key in keys: if new_file.get(key, []) == old_file[key]: del new_file[key] del old_file[key] if not old_file and not new_file: return None removed = dict() for pkg in old_file: old_groups = old_file[pkg] if new_file.get(pkg): continue removekey = ','.join(old_groups) removed.setdefault(removekey, []) removed[removekey].append(pkg) report = '' for rm in sorted(removed.keys()): report += f"**Remove from {rm}**\n\n```\n" paragraph = ', '.join(removed[rm]) report += "\n".join(textwrap.wrap(paragraph, width=90, break_long_words=False, break_on_hyphens=False)) report += "\n```\n\n" moved = dict() for pkg in old_file: old_groups = old_file[pkg] new_groups = new_file.get(pkg) if not new_groups: continue movekey = ','.join(old_groups) + ' to ' + ','.join(new_groups) moved.setdefault(movekey, []) moved[movekey].append(pkg) for move in sorted(moved.keys()): report += f"**Move from {move}**\n\n```\n" paragraph = ', '.join(moved[move]) report += "\n".join(textwrap.wrap(paragraph, width=90, break_long_words=False, break_on_hyphens=False)) report += "\n```\n\n" added = dict() for pkg in new_file: if pkg in old_file: continue addkey = ','.join(new_file[pkg]) added.setdefault(addkey, []) added[addkey].append(pkg) for group in sorted(added): report += f"**Add to {group}**\n\n```\n" paragraph = ', '.join(added[group]) report += "\n".join(textwrap.wrap(paragraph, width=90, break_long_words=False, break_on_hyphens=False)) report += "\n```\n\n" return report.strip() def handle_package_diff(self, project, old_file, new_file): comments = self.comment.get_comments(project_name=project) comment, _ = self.comment.comment_find(comments, MARKER) report = self.calculcate_package_diff(old_file, new_file) if not report: if comment: self.comment.delete(comment['id']) return 0 report = self.comment.add_marker(report, MARKER) if comment: write_comment = report != comment['comment'] else: write_comment = True if write_comment: if comment: self.comment.delete(comment['id']) self.comment.add_comment(project_name=project, comment=report) else: for c in comments.values(): if c['parent'] == comment['id']: ct = c['comment'] if ct.startswith('ignore ') or ct == 'ignore': print(c) return 0 if ct.startswith('approve ') or ct == 'approve': print(c) return 0 return 1 def is_approved(self, comment, comments): if not comment: return None for c in comments.values(): if c['parent'] == comment['id']: ct = c['comment'] if ct.startswith('approve ') or ct == 'approve': return c['who'] return None def parse_title(self, line): m = re.match(r'\*\*Add to (.*)\*\*', line) if m: return {'cmd': 'add', 'to': m.group(1).split(','), 'pkgs': []} m = re.match(r'\*\*Move from (.*) to (.*)\*\*', line) if m: return {'cmd': 'move', 'from': m.group(1).split(','), 'to': m.group(2).split(','), 'pkgs': []} m = re.match(r'\*\*Remove from (.*)\*\*', line) if m: return {'cmd': 'remove', 'from': m.group(1).split(','), 'pkgs': []} return None def parse_sections(self, comment): current_section = None sections = [] in_quote = False for line in comment.split('\n'): if line.startswith('**'): if current_section: sections.append(current_section) current_section = self.parse_title(line) continue if line.startswith("```"): in_quote = not in_quote continue if in_quote: for pkg in line.split(','): pkg = pkg.strip() if pkg: current_section['pkgs'].append(pkg) if current_section: sections.append(current_section) return sections def apply_move(self, content, section): for pkg in section['pkgs']: pkg_content = content[pkg] for group in section['from']: try: pkg_content.remove(group) except ValueError: logging.error(f"Can't remove {pkg} from {group}, not there. Mismatch.") sys.exit(1) for group in section['to']: pkg_content.append(group) content[pkg] = pkg_content def apply_add(self, content, section): for pkg in section['pkgs']: content.setdefault(pkg, []) content[pkg] += section['to'] def apply_remove(self, content, section): for pkg in section['pkgs']: pkg_content = content[pkg] for group in section['from']: try: pkg_content.remove(group) except ValueError: logging.error(f"Can't remove {pkg} from {group}, not there. Mismatch.") sys.exit(1) content[pkg] = pkg_content def apply_commands(self, filename, sections): content = self.read_summary_file(filename) for section in sections: if section['cmd'] == 'move': self.apply_move(content, section) elif section['cmd'] == 'add': self.apply_add(content, section) elif section['cmd'] == 'remove': self.apply_remove(content, section) self.write_summary_file(filename, content) def format_pkgs(self, pkgs): text = ', '.join(pkgs) return " " + "\n ".join(textwrap.wrap(text, width=68, break_long_words=False, break_on_hyphens=False)) + "\n\n" def format_move(self, section): gfrom = ','.join(section['from']) gto = ','.join(section['to']) text = f" * Move from {gfrom} to {gto}:\n" return text + self.format_pkgs(section['pkgs']) def format_add(self, section): gto = ','.join(section['to']) text = f" * Add to {gto}:\n" return text + self.format_pkgs(section['pkgs']) def format_remove(self, section): gfrom = ','.join(section['from']) text = f" * Remove from {gfrom}:\n" return text + self.format_pkgs(section['pkgs']) def apply_changes(self, filename, sections, approver): text = "-------------------------------------------------------------------\n" now = datetime.datetime.utcnow() date = now.strftime("%a %b %d %H:%M:%S UTC %Y") url = makeurl(self.apiurl, ['person', approver]) root = ET.parse(http_GET(url)) realname = root.find('realname').text email = root.find('email').text text += f"{date} - {realname} <{email}>\n\n- Approved changes to summary-staging.txt\n" for section in sections: if section['cmd'] == 'move': text += self.format_move(section) elif section['cmd'] == 'add': text += self.format_add(section) elif section['cmd'] == 'remove': text += self.format_remove(section) with open(filename + '.new', 'w') as writer: writer.write(text) with open(filename, 'r') as reader: for line in reader: writer.write(line) os.rename(filename + '.new', filename) def check_staging_accept(self, project, target): comments = self.comment.get_comments(project_name=project) comment, _ = self.comment.comment_find(comments, MARKER) approver = self.is_approved(comment, comments) if not approver: return sections = self.parse_sections(comment['comment']) with tempfile.TemporaryDirectory() as tmpdirname: checkout_package(self.apiurl, target, '000package-groups', expand_link=True, outdir=tmpdirname) self.apply_commands(tmpdirname + '/summary-staging.txt', sections) self.apply_changes(tmpdirname + '/package-groups.changes', sections, approver) package = Package(tmpdirname) package.commit(msg='Approved packagelist changes', skip_local_service_run=True)
class ReviewBot(object): """ A generic obs request reviewer Inherit from this class and implement check functions for each action type: def check_action_<type>(self, req, action): return (None|True|False) """ DEFAULT_REVIEW_MESSAGES = {'accepted': 'ok', 'declined': 'review failed'} REVIEW_CHOICES = ('normal', 'no', 'accept', 'accept-onpass', 'fallback-onfail', 'fallback-always') COMMENT_MARKER_REGEX = re.compile( r'<!-- (?P<bot>[^ ]+) state=(?P<state>[^ ]+)(?: result=(?P<result>[^ ]+))? -->' ) # map of default config entries config_defaults = { # list of tuples (prefix, apiurl, submitrequestprefix) # set this if the obs instance maps another instance into it's # namespace 'project_namespace_api_map': [ ('openSUSE.org:', 'https://api.opensuse.org', 'obsrq'), ], } def __init__(self, apiurl=None, dryrun=False, logger=None, user=None, group=None): self.apiurl = apiurl self.ibs = apiurl.startswith('https://api.suse.de') self.dryrun = dryrun self.logger = logger self.review_user = user self.review_group = group self.requests = [] self.review_messages = ReviewBot.DEFAULT_REVIEW_MESSAGES self._review_mode = 'normal' self.fallback_user = None self.fallback_group = None self.comment_api = CommentAPI(self.apiurl) self.bot_name = self.__class__.__name__ self.only_one_action = False self.request_default_return = None self.comment_handler = False self.override_allow = True self.override_group_key = '{}-override-group'.format( self.bot_name.lower()) self.request_age_min_default = 0 self.request_age_min_key = '{}-request-age-min'.format( self.bot_name.lower()) self.lookup = PackageLookup(self.apiurl) self.load_config() def _load_config(self, handle=None): d = self.__class__.config_defaults y = yaml.safe_load(handle) if handle is not None else {} return namedtuple('BotConfig', sorted( d.keys()))(*[y.get(p, d[p]) for p in sorted(d.keys())]) def load_config(self, filename=None): if filename: with open(filename, 'r') as fh: self.config = self._load_config(fh) else: self.config = self._load_config() def has_staging(self, project): try: url = osc.core.makeurl(self.apiurl, ('staging', project, 'staging_projects')) osc.core.http_GET(url) return True except HTTPError as e: if e.code != 404: self.logger.error('ERROR in URL %s [%s]' % (url, e)) raise pass return False def staging_api(self, project): # Allow for the Staging subproject to be passed directly from config # which should be stripped before initializing StagingAPI. This allows # for NonFree subproject to utilize StagingAPI for main project. if project.endswith(':Staging'): project = project[:-8] if project not in self.staging_apis: Config.get(self.apiurl, project) self.staging_apis[project] = StagingAPI(self.apiurl, project) return self.staging_apis[project] @property def review_mode(self): return self._review_mode @review_mode.setter def review_mode(self, value): if value not in self.REVIEW_CHOICES: raise Exception("invalid review option: %s" % value) self._review_mode = value def set_request_ids(self, ids): for rqid in ids: u = osc.core.makeurl(self.apiurl, ['request', rqid], {'withfullhistory': '1'}) r = osc.core.http_GET(u) root = ET.parse(r).getroot() req = osc.core.Request() req.read(root) self.requests.append(req) # function called before requests are reviewed def prepare_review(self): pass def check_requests(self): self.staging_apis = {} # give implementations a chance to do something before single requests self.prepare_review() return_value = 0 for req in self.requests: self.logger.info("checking %s" % req.reqid) self.request = req with sentry_sdk.configure_scope() as scope: scope.set_extra('request.id', self.request.reqid) # XXX: this is a hack. Annotating the request with staging_project. # OBS itself should provide an API for that but that's currently not the case # https://github.com/openSUSE/openSUSE-release-tools/pull/2377 if not hasattr(req, 'staging_project'): staging_project = None for r in req.reviews: if r.state == 'new' and r.by_project and ":Staging:" in r.by_project: staging_project = r.by_project break setattr(req, 'staging_project', staging_project) try: good = self.check_one_request(req) except Exception as e: good = None import traceback traceback.print_exc() return_value = 1 sentry_sdk.capture_exception(e) if self.review_mode == 'no': good = None elif self.review_mode == 'accept': good = True if good is None: self.logger.info("%s ignored" % req.reqid) elif good: self._set_review(req, 'accepted') elif self.review_mode != 'accept-onpass': self._set_review(req, 'declined') return return_value @memoize(session=True) def request_override_check_users(self, project): """Determine users allowed to override review in a comment command.""" config = Config.get(self.apiurl, project) users = [] group = config.get('staging-group') if group: users += group_members(self.apiurl, group) if self.override_group_key: override_group = config.get(self.override_group_key) if override_group: users += group_members(self.apiurl, override_group) return users def request_override_check(self, force=False): """Check for a comment command requesting review override.""" if not force and not self.override_allow: return None for args, who in self.request_commands('override'): message = 'overridden by {}'.format(who) override = args[1] if len(args) >= 2 else 'accept' if override == 'accept': self.review_messages['accepted'] = message return True if override == 'decline': self.review_messages['declined'] = message return False def request_commands(self, command, who_allowed=None, request=None, action=None, include_description=True): if not request: request = self.request if not action: action = self.action if not who_allowed: who_allowed = self.request_override_check_users(action.tgt_project) comments = self.comment_api.get_comments(request_id=request.reqid) if include_description: request_comment = self.comment_api.request_as_comment_dict(request) comments[request_comment['id']] = request_comment yield from self.comment_api.command_find(comments, self.review_user, command, who_allowed) def _set_review(self, req, state): doit = self.can_accept_review(req.reqid) if doit is None: self.logger.info( "can't change state, %s does not have the reviewer" % (req.reqid)) newstate = state by_user = self.fallback_user by_group = self.fallback_group msg = self.review_messages[ state] if state in self.review_messages else state self.logger.info("%s %s: %s" % (req.reqid, state, msg)) if state == 'declined': if self.review_mode == 'fallback-onfail': self.logger.info("%s needs fallback reviewer" % req.reqid) self.add_review( req, by_group=by_group, by_user=by_user, msg="Automated review failed. Needs fallback reviewer.") newstate = 'accepted' elif self.review_mode == 'fallback-always': self.add_review(req, by_group=by_group, by_user=by_user, msg='Adding fallback reviewer') if doit == True: self.logger.debug("setting %s to %s" % (req.reqid, state)) if not self.dryrun: try: osc.core.change_review_state(apiurl=self.apiurl, reqid=req.reqid, newstate=newstate, by_group=self.review_group, by_user=self.review_user, message=msg) except HTTPError as e: if e.code != 403: raise e self.logger.info( 'unable to change review state (likely superseded or revoked)' ) else: self.logger.debug("%s review not changed" % (req.reqid)) # allow_duplicate=True should only be used if it makes sense to force a # re-review in a scenario where the bot adding the review will rerun. # Normally a declined review will automatically be reopened along with the # request and any other bot reviews already added will not be touched unless # the issuing bot is rerun which does not fit normal workflow. def add_review(self, req, by_group=None, by_user=None, by_project=None, by_package=None, msg=None, allow_duplicate=False): query = {'cmd': 'addreview'} if by_group: query['by_group'] = by_group elif by_user: query['by_user'] = by_user elif by_project: query['by_project'] = by_project if by_package: query['by_package'] = by_package else: raise osc.oscerr.WrongArgs("missing by_*") for r in req.reviews: if (r.by_group == by_group and r.by_project == by_project and r.by_package == by_package and r.by_user == by_user and # Only duplicate when allow_duplicate and state != new. (not allow_duplicate or r.state == 'new')): del query['cmd'] self.logger.debug( 'skipped adding duplicate review for {}'.format('/'.join( query.values()))) return u = osc.core.makeurl(self.apiurl, ['request', req.reqid], query) if self.dryrun: self.logger.info('POST %s' % u) return if self.multiple_actions: key = request_action_key(self.action) msg = yaml.dump({key: msg}, default_flow_style=False) try: r = osc.core.http_POST(u, data=msg) except HTTPError as e: if e.code != 403: raise e del query['cmd'] self.logger.info('unable to add review {} with message: {}'.format( query, msg)) return code = ET.parse(r).getroot().attrib['code'] if code != 'ok': raise Exception('non-ok return code: {}'.format(code)) def devel_project_review_add(self, request, project, package, message='adding devel project review'): devel_project, devel_package = devel_project_fallback( self.apiurl, project, package) if not devel_project: self.logger.warning('no devel project found for {}/{}'.format( project, package)) return False self.add_review(request, by_project=devel_project, by_package=devel_package, msg=message) return True def devel_project_review_ensure(self, request, project, package, message='submitter not devel maintainer'): if not self.devel_project_review_needed(request, project, package): self.logger.debug('devel project review not needed') return True return self.devel_project_review_add(request, project, package, message) def devel_project_review_needed(self, request, project, package): author = request.get_creator() maintainers = set(maintainers_get(self.apiurl, project, package)) if author in maintainers: return False # Carried over from maintbot, but seems haphazard. for review in request.reviews: if review.by_user in maintainers: return False return True def check_one_request(self, req): """ check all actions in one request. calls helper functions for each action type return None if nothing to do, True to accept, False to reject """ if len(req.actions) > 1: if self.only_one_action: self.review_messages[ 'declined'] = 'Only one action per request supported' return False # Will cause added reviews and overall review message to include # each actions message prefixed by an action key. self.multiple_actions = True review_messages_multi = {} else: self.multiple_actions = False # Copy original values to revert changes made to them. self.review_messages = self.DEFAULT_REVIEW_MESSAGES.copy() if self.comment_handler is not False: self.comment_handler_add() overall = True for a in req.actions: if self.multiple_actions: self.review_messages = self.DEFAULT_REVIEW_MESSAGES.copy() # Store in-case sub-classes need direct access to original values. self.action = a key = request_action_key(a) with sentry_sdk.configure_scope() as scope: scope.set_extra('action.key', key) override = self.request_override_check() if override is not None: ret = override else: func = getattr(self, self.action_method(a)) ret = func(req, a) # In the case of multiple actions take the "lowest" result where the # order from lowest to highest is: False, None, True. if overall is not False: if ((overall is True and ret is not True) or (overall is None and ret is False)): overall = ret if self.multiple_actions and ret is not None: message_key = self.review_message_key(ret) review_messages_multi[key] = self.review_messages[message_key] message_key = self.review_message_key(overall) if self.multiple_actions: message_combined = yaml.dump(review_messages_multi, default_flow_style=False) self.review_messages[message_key] = message_combined elif type(self.review_messages[message_key]) is dict: self.review_messages[message_key] = yaml.dump( self.review_messages[message_key], default_flow_style=False) return overall def action_method(self, action): method_prefix = 'check_action' method_type = action.type method_suffix = None if method_type == 'delete': method_suffix = 'project' if action.tgt_package is not None: method_suffix = 'package' elif action.tgt_repository is not None: method_suffix = 'repository' if method_suffix: method = '_'.join([method_prefix, method_type, method_suffix]) if hasattr(self, method): return method method = '_'.join([method_prefix, method_type]) if hasattr(self, method): return method method_type = '_default' return '_'.join([method_prefix, method_type]) def review_message_key(self, result): return 'accepted' if result else 'declined' def check_action_maintenance_incident(self, req, a): if action_is_patchinfo(a): self.logger.debug('ignoring patchinfo action') return True # Duplicate src_package as tgt_package since prior to assignment to a # specific incident project there is no target package (odd API). After # assignment it is still assumed the target will match the source. Since # the ultimate goal is the tgt_releaseproject the incident is treated # similar to staging in that the intermediate result is not the final # and thus the true target project (ex. openSUSE:Maintenance) is not # used for check_source_submission(). tgt_package = a.src_package if a.tgt_releaseproject is not None: suffix = '.' + a.tgt_releaseproject.replace(':', '_') if tgt_package.endswith(suffix): tgt_package = tgt_package[:-len(suffix)] # Note tgt_releaseproject (product) instead of tgt_project (maintenance). return self.check_source_submission(a.src_project, a.src_package, a.src_rev, a.tgt_releaseproject, tgt_package) def check_action_maintenance_release(self, req, a): pkgname = a.src_package if action_is_patchinfo(a): self.logger.debug('ignoring patchinfo action') return True linkpkg = self._get_linktarget_self(a.src_project, pkgname) if linkpkg is not None: pkgname = linkpkg # packages in maintenance have links to the target. Use that # to find the real package name (linkprj, linkpkg) = self._get_linktarget(a.src_project, pkgname) if linkpkg is None or linkprj is None or linkprj != a.tgt_project: self.logger.warning("%s/%s is not a link to %s" % (a.src_project, pkgname, a.tgt_project)) return self.check_source_submission(a.src_project, a.src_package, a.src_rev, a.tgt_project, a.tgt_package) else: pkgname = linkpkg return self.check_source_submission(a.src_project, a.src_package, None, a.tgt_project, pkgname) def check_action_submit(self, req, a): return self.check_source_submission(a.src_project, a.src_package, a.src_rev, a.tgt_project, a.tgt_package) def check_action__default(self, req, a): # Disable any comment handler to avoid making a comment even if # comment_write() is called by another bot wrapping __default(). self.comment_handler_remove() message = 'unhandled request type {}'.format(a.type) self.logger.info(message) self.review_messages['accepted'] += ': ' + message return self.request_default_return def check_source_submission(self, src_project, src_package, src_rev, target_project, target_package): """ default implemention does nothing """ self.logger.info("%s/%s@%s -> %s/%s" % (src_project, src_package, src_rev, target_project, target_package)) return None @staticmethod @memoize(session=True) def _get_sourceinfo(apiurl, project, package, rev=None): query = {'view': 'info'} if rev is not None: query['rev'] = rev url = osc.core.makeurl(apiurl, ('source', project, package), query=query) try: return ET.parse(osc.core.http_GET(url)).getroot() except (HTTPError, URLError): return None def get_originproject(self, project, package, rev=None): root = ReviewBot._get_sourceinfo(self.apiurl, project, package, rev) if root is None: return None originproject = root.find('originproject') if originproject is not None: return originproject.text return None def get_sourceinfo(self, project, package, rev=None): root = ReviewBot._get_sourceinfo(self.apiurl, project, package, rev) if root is None: return None props = ('package', 'rev', 'vrev', 'srcmd5', 'lsrcmd5', 'verifymd5') return namedtuple('SourceInfo', props)(*[root.get(p) for p in props]) # TODO: what if there is more than _link? def _get_linktarget_self(self, src_project, src_package): """ if it's a link to a package in the same project return the name of the package""" prj, pkg = self._get_linktarget(src_project, src_package) if prj is None or prj == src_project: return pkg def _get_linktarget(self, src_project, src_package): query = {} url = osc.core.makeurl(self.apiurl, ('source', src_project, src_package), query=query) try: root = ET.parse(osc.core.http_GET(url)).getroot() except HTTPError: return (None, None) if root is not None: linkinfo = root.find("linkinfo") if linkinfo is not None: return (linkinfo.get('project'), linkinfo.get('package')) return (None, None) def _has_open_review_by(self, root, by_what, reviewer): states = set([ review.get('state') for review in root.findall('review') if review.get(by_what) == reviewer ]) if not states: return None elif 'new' in states: return True return False def can_accept_review(self, request_id): """return True if there is a new review for the specified reviewer""" states = set() url = osc.core.makeurl(self.apiurl, ('request', str(request_id))) try: root = ET.parse(osc.core.http_GET(url)).getroot() if self.review_user and self._has_open_review_by( root, 'by_user', self.review_user): return True if self.review_group and self._has_open_review_by( root, 'by_group', self.review_group): return True except HTTPError as e: print('ERROR in URL %s [%s]' % (url, e)) return False def set_request_ids_search_review(self): review = None if self.review_user: review = "@by_user='******' and @state='new'" % self.review_user if self.review_group: review = osc.core.xpath_join( review, "@by_group='%s' and @state='new'" % self.review_group) url = osc.core.makeurl( self.apiurl, ('search', 'request'), { 'match': "state/@name='review' and review[%s]" % review, 'withfullhistory': 1 }) root = ET.parse(osc.core.http_GET(url)).getroot() self.requests = [] for request in root.findall('request'): req = osc.core.Request() req.read(request) self.requests.append(req) # also used by openqabot def ids_project(self, project, typename): url = osc.core.makeurl( self.apiurl, ('search', 'request'), { 'match': "(state/@name='review' or state/@name='new') and (action/target/@project='%s' and action/@type='%s')" % (project, typename), 'withfullhistory': 1 }) root = ET.parse(osc.core.http_GET(url)).getroot() ret = [] for request in root.findall('request'): req = osc.core.Request() req.read(request) ret.append(req) return ret def set_request_ids_project(self, project, typename): self.requests = self.ids_project(project, typename) def comment_handler_add(self, level=logging.INFO): """Add handler to start recording log messages for comment.""" self.comment_handler = CommentFromLogHandler(level) self.logger.addHandler(self.comment_handler) def comment_handler_remove(self): self.logger.removeHandler(self.comment_handler) def comment_handler_lines_deduplicate(self): self.comment_handler.lines = list( OrderedDict.fromkeys(self.comment_handler.lines)) def comment_write(self, state='done', result=None, project=None, package=None, request=None, message=None, identical=False, only_replace=False, info_extra=None, info_extra_identical=True, bot_name_suffix=None): """Write comment if not similar to previous comment and replace old one. The state, result, and info_extra (dict) are combined to create the info that is passed to CommentAPI methods for creating a marker and finding previous comments. self.bot_name, which defaults to class, will be used as the primary matching key. When info_extra_identical is set to False info_extra will not be included when finding previous comments to compare message against. A comment from the same bot will be replaced when a new comment is written. The only_replace flag will restrict to only writing a comment if a prior one is being replaced. This can be useful for writing a final comment that indicates a change from previous uncompleted state, but only makes sense to post if a prior comment was posted. The project, package, and request variables control where the comment is placed. If no value is given the default is the request being reviewed. If no message is provided the content will be extracted from self.comment_handler.line which is provided by CommentFromLogHandler. To use this call comment_handler_add() at the point which messages should start being collected. Alternatively the self.comment_handler setting may be set to True to automatically set one on each request. The previous comment body line count is compared to see if too similar to bother posting another comment which is useful for avoiding re-posting comments that contain irrelevant minor changes. To force an exact match use the identical flag to replace any non-identical comment body. """ if project: kwargs = {'project_name': project} if package: kwargs['package_name'] = package else: if request is None: request = self.request kwargs = {'request_id': request.reqid} debug_key = '/'.join(kwargs.values()) if message is None: if not len(self.comment_handler.lines): self.logger.debug( 'skipping empty comment for {}'.format(debug_key)) return message = '\n\n'.join(self.comment_handler.lines) bot_name = self.bot_name if bot_name_suffix: bot_name = '::'.join([bot_name, bot_name_suffix]) info = {'state': state, 'result': result} if info_extra and info_extra_identical: info.update(info_extra) comments = self.comment_api.get_comments(**kwargs) comment, _ = self.comment_api.comment_find(comments, bot_name, info) if info_extra and not info_extra_identical: # Add info_extra once comment has already been matched. info.update(info_extra) message = self.comment_api.add_marker(message, bot_name, info) message = self.comment_api.truncate(message.strip()) if (comment is not None and (( identical and # Remove marker from comments since handled during comment_find(). self.comment_api.remove_marker(comment['comment']) == self.comment_api.remove_marker(message)) or (not identical and comment['comment'].count('\n') == message.count('\n')))): # Assume same state/result and number of lines in message is duplicate. self.logger.debug( 'previous comment too similar on {}'.format(debug_key)) return if comment is None: self.logger.debug( 'broadening search to include any state on {}'.format( debug_key)) comment, _ = self.comment_api.comment_find(comments, bot_name) if comment is not None: self.logger.debug( 'removing previous comment on {}'.format(debug_key)) if not self.dryrun: self.comment_api.delete(comment['id']) elif only_replace: self.logger.debug( 'no previous comment to replace on {}'.format(debug_key)) return self.logger.debug('adding comment to {}: {}'.format( debug_key, message)) if not self.dryrun: self.comment_api.add_comment(comment=message, **kwargs) self.comment_handler_remove() def _check_matching_srcmd5(self, project, package, rev, history_limit=5): """check if factory sources contain the package and revision. check head and history""" self.logger.debug("checking %s in %s" % (package, project)) try: osc.core.show_package_meta(self.apiurl, project, package) except (HTTPError, URLError): self.logger.debug("new package") return None si = self.get_sourceinfo(project, package) if rev == si.verifymd5: self.logger.debug("srcmd5 matches") return True if history_limit: self.logger.debug("%s not the latest version, checking history", rev) u = osc.core.makeurl(self.apiurl, ['source', project, package, '_history'], {'limit': history_limit}) try: r = osc.core.http_GET(u) except HTTPError as e: self.logger.debug("package has no history!?") return None root = ET.parse(r).getroot() # we need this complicated construct as obs doesn't honor # the 'limit' parameter use above for obs interconnect: # https://github.com/openSUSE/open-build-service/issues/2545 for revision, i in zip(reversed(root.findall('revision')), count()): node = revision.find('srcmd5') if node is None: continue self.logger.debug("checking %s" % node.text) if node.text == rev: self.logger.debug("got it, rev %s" % revision.get('rev')) return True if i == history_limit: break self.logger.debug("srcmd5 not found in history either") return False def request_age_wait(self, age_min=None, request=None, target_project=None): if not request: request = self.request if not target_project: target_project = self.action.tgt_project if age_min is None or isinstance(age_min, str): key = self.request_age_min_key if age_min is None else age_min age_min = int( Config.get(self.apiurl, target_project).get(key, self.request_age_min_default)) age = request_age(request).total_seconds() if age < age_min: self.logger.info( 'skipping {} of age {:.2f}s since it is younger than {}s'. format(request.reqid, age, age_min)) return True return False
class StagingReport(object): def __init__(self, api): self.api = api self.comment = CommentAPI(api.apiurl) def _package_url(self, package): link = '/package/live_build_log/%s/%s/%s/%s' link = link % (package.get('project'), package.get('package'), package.get('repository'), package.get('arch')) text = '[%s](%s)' % (package.get('arch'), link) return text def old_enough(self, _date): time_delta = datetime.utcnow() - _date safe_margin = timedelta(hours=MARGIN_HOURS) return safe_margin <= time_delta def update_status_comment(self, project, report, force=False, only_replace=False): report = self.comment.add_marker(report, MARKER) comments = self.comment.get_comments(project_name=project) comment, _ = self.comment.comment_find(comments, MARKER) if comment: write_comment = (report != comment['comment'] and self.old_enough(comment['when'])) else: write_comment = not only_replace if write_comment or force: if osc.conf.config['debug']: print('Updating comment') if comment: self.comment.delete(comment['id']) self.comment.add_comment(project_name=project, comment=report) def _report_broken_packages(self, info): # Group packages by name groups = defaultdict(list) for package in info.findall('broken_packages/package'): groups[package.get('package')].append(package) failing_lines = [ '* Build failed %s (%s)' % (key, ', '.join(self._package_url(p) for p in value)) for key, value in groups.items() ] report = '\n'.join(failing_lines[:MAX_LINES]) if len(failing_lines) > MAX_LINES: report += '* and more (%s) ...' % (len(failing_lines) - MAX_LINES) return report def report_checks(self, info): links_state = {} for check in info.findall('checks/check'): state = check.find('state').text links_state.setdefault(state, []) links_state[state].append('[{}]({})'.format( check.get('name'), check.find('url').text)) lines = [] failure = False for state, links in links_state.items(): if len(links) > MAX_LINES: extra = len(links) - MAX_LINES links = links[:MAX_LINES] links.append('and {} more...'.format(extra)) lines.append('- {}'.format(state)) if state != 'success': lines.extend([' - {}'.format(link) for link in links]) failure = True else: lines[-1] += ': {}'.format(', '.join(links)) return '\n'.join(lines).strip(), failure def report(self, project, force=False): info = self.api.project_status(project) # Do not attempt to process projects without staging info, or projects # in a pending state that will change before settling. This avoids # intermediate notifications that may end up being spammy and for # long-lived stagings where checks may be re-triggered multiple times # and thus enter pending state (not seen on first run) which is not # useful to report. if info is None or not self.api.project_status_final(info): return report_broken_packages = self._report_broken_packages(info) report_checks, check_failure = self.report_checks(info) if report_broken_packages or check_failure: if report_broken_packages: report_broken_packages = 'Broken:\n\n' + report_broken_packages if report_checks: report_checks = 'Checks:\n\n' + report_checks report = '\n\n'.join((report_broken_packages, report_checks)) report = report.strip() only_replace = False else: report = 'Congratulations! All fine now.' only_replace = True report = self.cc_list(project, info) + report self.update_status_comment(project, report, force=force, only_replace=only_replace) if osc.conf.config['debug']: print(project) print('-' * len(project)) print(report) def cc_list(self, project, info): if not self.api.is_adi_project(project): return "" ccs = set() for req in info.findall('staged_requests/request'): ccs.add("@" + req.get('creator')) str = "Submitters: " + " ".join(sorted(list(ccs))) + "\n\n" return str
class ReviewBot(object): """ A generic obs request reviewer Inherit from this class and implement check functions for each action type: def check_action_<type>(self, req, action): return (None|True|False) """ DEFAULT_REVIEW_MESSAGES = { 'accepted' : 'ok', 'declined': 'review failed' } REVIEW_CHOICES = ('normal', 'no', 'accept', 'accept-onpass', 'fallback-onfail', 'fallback-always') COMMENT_MARKER_REGEX = re.compile(r'<!-- (?P<bot>[^ ]+) state=(?P<state>[^ ]+)(?: result=(?P<result>[^ ]+))? -->') # map of default config entries config_defaults = { # list of tuples (prefix, apiurl, submitrequestprefix) # set this if the obs instance maps another instance into it's # namespace 'project_namespace_api_map' : [ ('openSUSE.org:', 'https://api.opensuse.org', 'obsrq'), ], } def __init__(self, apiurl = None, dryrun = False, logger = None, user = None, group = None): self.apiurl = apiurl self.ibs = apiurl.startswith('https://api.suse.de') self.dryrun = dryrun self.logger = logger self.review_user = user self.review_group = group self.requests = [] self.review_messages = ReviewBot.DEFAULT_REVIEW_MESSAGES self._review_mode = 'normal' self.fallback_user = None self.fallback_group = None self.comment_api = CommentAPI(self.apiurl) self.bot_name = self.__class__.__name__ self.only_one_action = False self.request_default_return = None self.comment_handler = False self.override_allow = True self.override_group_key = '{}-override-group'.format(self.bot_name.lower()) self.load_config() def _load_config(self, handle = None): d = self.__class__.config_defaults y = yaml.safe_load(handle) if handle is not None else {} return namedtuple('BotConfig', sorted(d.keys()))(*[ y.get(p, d[p]) for p in sorted(d.keys()) ]) def load_config(self, filename = None): if filename: with open(filename, 'r') as fh: self.config = self._load_config(fh) else: self.config = self._load_config() def staging_api(self, project): if project not in self.staging_apis: config = Config(project) self.staging_apis[project] = StagingAPI(self.apiurl, project) config.apply_remote(self.staging_apis[project]) self.staging_config[project] = conf.config[project].copy() return self.staging_apis[project] @property def review_mode(self): return self._review_mode @review_mode.setter def review_mode(self, value): if value not in self.REVIEW_CHOICES: raise Exception("invalid review option: %s"%value) self._review_mode = value def set_request_ids(self, ids): for rqid in ids: u = osc.core.makeurl(self.apiurl, [ 'request', rqid ], { 'withfullhistory' : '1' }) r = osc.core.http_GET(u) root = ET.parse(r).getroot() req = osc.core.Request() req.read(root) self.requests.append(req) # function called before requests are reviewed def prepare_review(self): pass def check_requests(self): self.staging_apis = {} self.staging_config = {} # give implementations a chance to do something before single requests self.prepare_review() for req in self.requests: self.logger.info("checking %s"%req.reqid) self.request = req override = self.request_override_check(req) if override is not None: good = override else: good = self.check_one_request(req) if self.review_mode == 'no': good = None elif self.review_mode == 'accept': good = True if good is None: self.logger.info("%s ignored"%req.reqid) elif good: self._set_review(req, 'accepted') elif self.review_mode != 'accept-onpass': self._set_review(req, 'declined') @memoize(session=True) def request_override_check_users(self, project): """Determine users allowed to override review in a comment command.""" self.staging_api(project) config = self.staging_config[project] users = [] group = config.get('staging-group') if group: users += group_members(self.apiurl, group) if self.override_group_key: override_group = config.get(self.override_group_key) if override_group: users += group_members(self.apiurl, override_group) return users def request_override_check(self, request): """Check for a comment command requesting review override.""" if not self.override_allow: return None comments = self.comment_api.get_comments(request_id=request.reqid) users = self.request_override_check_users(request.actions[0].tgt_project) for args, who in self.comment_api.command_find( comments, self.review_user, 'override', users): message = 'overridden by {}'.format(who) override = args[1] or None if override == 'accept': self.review_messages['accepted'] = message return True if override == 'decline': self.review_messages['declined'] = message return False def _set_review(self, req, state): doit = self.can_accept_review(req.reqid) if doit is None: self.logger.info("can't change state, %s does not have the reviewer"%(req.reqid)) newstate = state by_user = self.fallback_user by_group = self.fallback_group msg = self.review_messages[state] if state in self.review_messages else state self.logger.info("%s %s: %s"%(req.reqid, state, msg)) if state == 'declined': if self.review_mode == 'fallback-onfail': self.logger.info("%s needs fallback reviewer"%req.reqid) self.add_review(req, by_group=by_group, by_user=by_user, msg="Automated review failed. Needs fallback reviewer.") newstate = 'accepted' elif self.review_mode == 'fallback-always': self.add_review(req, by_group=by_group, by_user=by_user, msg='Adding fallback reviewer') if doit == True: self.logger.debug("setting %s to %s"%(req.reqid, state)) if not self.dryrun: osc.core.change_review_state(apiurl = self.apiurl, reqid = req.reqid, newstate = newstate, by_group=self.review_group, by_user=self.review_user, message=msg) else: self.logger.debug("%s review not changed"%(req.reqid)) # allow_duplicate=True should only be used if it makes sense to force a # re-review in a scenario where the bot adding the review will rerun. # Normally a declined review will automatically be reopened along with the # request and any other bot reviews already added will not be touched unless # the issuing bot is rerun which does not fit normal workflow. def add_review(self, req, by_group=None, by_user=None, by_project=None, by_package=None, msg=None, allow_duplicate=False): query = { 'cmd': 'addreview' } if by_group: query['by_group'] = by_group elif by_user: query['by_user'] = by_user elif by_project: query['by_project'] = by_project if by_package: query['by_package'] = by_package else: raise osc.oscerr.WrongArgs("missing by_*") for r in req.reviews: if (r.by_group == by_group and r.by_project == by_project and r.by_package == by_package and r.by_user == by_user and # Only duplicate when allow_duplicate and state != new. (not allow_duplicate or r.state == 'new')): del query['cmd'] self.logger.debug('skipped adding duplicate review for {}'.format( '/'.join(query.values()))) return u = osc.core.makeurl(self.apiurl, ['request', req.reqid], query) if self.dryrun: self.logger.info('POST %s' % u) return r = osc.core.http_POST(u, data=msg) code = ET.parse(r).getroot().attrib['code'] if code != 'ok': raise Exception('non-ok return code: {}'.format(code)) def check_one_request(self, req): """ check all actions in one request. calls helper functions for each action type return None if nothing to do, True to accept, False to reject """ # Copy original values to revert changes made to them. self.review_messages = self.DEFAULT_REVIEW_MESSAGES.copy() if self.only_one_action and len(req.actions) != 1: self.review_messages['declined'] = 'Only one action per request' return False if self.comment_handler is not False: self.comment_handler_add() overall = None for a in req.actions: fn = 'check_action_%s'%a.type if not hasattr(self, fn): fn = 'check_action__default' func = getattr(self, fn) ret = func(req, a) if ret == False or overall is None and ret is not None: overall = ret return overall @staticmethod def _is_patchinfo(pkgname): return pkgname == 'patchinfo' or pkgname.startswith('patchinfo.') def check_action_maintenance_incident(self, req, a): dst_package = a.src_package # Ignoring patchinfo package for checking if self._is_patchinfo(a.src_package): self.logger.info("package is patchinfo, ignoring") return None # dirty obs crap if a.tgt_releaseproject is not None: ugly_suffix = '.'+a.tgt_releaseproject.replace(':', '_') if dst_package.endswith(ugly_suffix): dst_package = dst_package[:-len(ugly_suffix)] return self.check_source_submission(a.src_project, a.src_package, a.src_rev, a.tgt_releaseproject, dst_package) def check_action_maintenance_release(self, req, a): pkgname = a.src_package if self._is_patchinfo(pkgname): return None linkpkg = self._get_linktarget_self(a.src_project, pkgname) if linkpkg is not None: pkgname = linkpkg # packages in maintenance have links to the target. Use that # to find the real package name (linkprj, linkpkg) = self._get_linktarget(a.src_project, pkgname) if linkpkg is None or linkprj is None or linkprj != a.tgt_project: self.logger.error("%s/%s is not a link to %s"%(a.src_project, pkgname, a.tgt_project)) return False else: pkgname = linkpkg return self.check_source_submission(a.src_project, a.src_package, None, a.tgt_project, pkgname) def check_action_submit(self, req, a): return self.check_source_submission(a.src_project, a.src_package, a.src_rev, a.tgt_project, a.tgt_package) def check_action__default(self, req, a): # Disable any comment handler to avoid making a comment even if # comment_write() is called by another bot wrapping __default(). self.comment_handler_remove() message = 'unhandled request type {}'.format(a.type) self.logger.error(message) self.review_messages['accepted'] += ': ' + message return self.request_default_return def check_source_submission(self, src_project, src_package, src_rev, target_project, target_package): """ default implemention does nothing """ self.logger.info("%s/%s@%s -> %s/%s"%(src_project, src_package, src_rev, target_project, target_package)) return None @staticmethod @memoize(session=True) def _get_sourceinfo(apiurl, project, package, rev=None): query = { 'view': 'info' } if rev is not None: query['rev'] = rev url = osc.core.makeurl(apiurl, ('source', project, package), query=query) try: return ET.parse(osc.core.http_GET(url)).getroot() except (urllib2.HTTPError, urllib2.URLError): return None def get_originproject(self, project, package, rev=None): root = ReviewBot._get_sourceinfo(self.apiurl, project, package, rev) if root is None: return None originproject = root.find('originproject') if originproject is not None: return originproject.text return None def get_sourceinfo(self, project, package, rev=None): root = ReviewBot._get_sourceinfo(self.apiurl, project, package, rev) if root is None: return None props = ('package', 'rev', 'vrev', 'srcmd5', 'lsrcmd5', 'verifymd5') return namedtuple('SourceInfo', props)(*[ root.get(p) for p in props ]) # TODO: what if there is more than _link? def _get_linktarget_self(self, src_project, src_package): """ if it's a link to a package in the same project return the name of the package""" prj, pkg = self._get_linktarget(src_project, src_package) if prj is None or prj == src_project: return pkg def _get_linktarget(self, src_project, src_package): query = {} url = osc.core.makeurl(self.apiurl, ('source', src_project, src_package), query=query) try: root = ET.parse(osc.core.http_GET(url)).getroot() except urllib2.HTTPError: return (None, None) if root is not None: linkinfo = root.find("linkinfo") if linkinfo is not None: return (linkinfo.get('project'), linkinfo.get('package')) return (None, None) def _has_open_review_by(self, root, by_what, reviewer): states = set([review.get('state') for review in root.findall('review') if review.get(by_what) == reviewer]) if not states: return None elif 'new' in states: return True return False def can_accept_review(self, request_id): """return True if there is a new review for the specified reviewer""" states = set() url = osc.core.makeurl(self.apiurl, ('request', str(request_id))) try: root = ET.parse(osc.core.http_GET(url)).getroot() if self.review_user and self._has_open_review_by(root, 'by_user', self.review_user): return True if self.review_group and self._has_open_review_by(root, 'by_group', self.review_group): return True except urllib2.HTTPError as e: print('ERROR in URL %s [%s]' % (url, e)) return False def set_request_ids_search_review(self): review = None if self.review_user: review = "@by_user='******' and @state='new'" % self.review_user if self.review_group: review = osc.core.xpath_join(review, "@by_group='%s' and @state='new'" % self.review_group) url = osc.core.makeurl(self.apiurl, ('search', 'request'), { 'match': "state/@name='review' and review[%s]" % review, 'withfullhistory': 1 } ) root = ET.parse(osc.core.http_GET(url)).getroot() self.requests = [] for request in root.findall('request'): req = osc.core.Request() req.read(request) self.requests.append(req) # also used by openqabot def ids_project(self, project, typename): url = osc.core.makeurl(self.apiurl, ('search', 'request'), { 'match': "(state/@name='review' or state/@name='new') and (action/target/@project='%s' and action/@type='%s')" % (project, typename), 'withfullhistory': 1 }) root = ET.parse(osc.core.http_GET(url)).getroot() ret = [] for request in root.findall('request'): req = osc.core.Request() req.read(request) ret.append(req) return ret def set_request_ids_project(self, project, typename): self.requests = self.ids_project(project, typename) def comment_handler_add(self, level=logging.INFO): """Add handler to start recording log messages for comment.""" self.comment_handler = CommentFromLogHandler(level) self.logger.addHandler(self.comment_handler) def comment_handler_remove(self): self.logger.removeHandler(self.comment_handler) def comment_handler_lines_deduplicate(self): self.comment_handler.lines = list(OrderedDict.fromkeys(self.comment_handler.lines)) def comment_write(self, state='done', result=None, project=None, package=None, request=None, message=None, identical=False, only_replace=False, info_extra=None, info_extra_identical=True, bot_name_suffix=None): """Write comment if not similar to previous comment and replace old one. The state, result, and info_extra (dict) are combined to create the info that is passed to CommentAPI methods for creating a marker and finding previous comments. self.bot_name, which defaults to class, will be used as the primary matching key. When info_extra_identical is set to False info_extra will not be included when finding previous comments to compare message against. A comment from the same bot will be replaced when a new comment is written. The only_replace flag will restrict to only writing a comment if a prior one is being replaced. This can be useful for writing a final comment that indicates a change from previous uncompleted state, but only makes sense to post if a prior comment was posted. The project, package, and request variables control where the comment is placed. If no value is given the default is the request being reviewed. If no message is provided the content will be extracted from self.comment_handler.line which is provided by CommentFromLogHandler. To use this call comment_handler_add() at the point which messages should start being collected. Alternatively the self.comment_handler setting may be set to True to automatically set one on each request. The previous comment body line count is compared to see if too similar to bother posting another comment which is useful for avoiding re-posting comments that contain irrelevant minor changes. To force an exact match use the identical flag to replace any non-identical comment body. """ if project: kwargs = {'project_name': project} if package: kwargs['package_name'] = package else: if request is None: request = self.request kwargs = {'request_id': request.reqid} debug_key = '/'.join(kwargs.values()) if message is None: if not len(self.comment_handler.lines): self.logger.debug('skipping empty comment for {}'.format(debug_key)) return message = '\n\n'.join(self.comment_handler.lines) bot_name = self.bot_name if bot_name_suffix: bot_name = '::'.join([bot_name, bot_name_suffix]) info = {'state': state, 'result': result} if info_extra and info_extra_identical: info.update(info_extra) comments = self.comment_api.get_comments(**kwargs) comment, _ = self.comment_api.comment_find(comments, bot_name, info) if info_extra and not info_extra_identical: # Add info_extra once comment has already been matched. info.update(info_extra) message = self.comment_api.add_marker(message, bot_name, info) message = self.comment_api.truncate(message.strip()) if (comment is not None and ((identical and # Remove marker from comments since handled during comment_find(). self.comment_api.remove_marker(comment['comment']) == self.comment_api.remove_marker(message)) or (not identical and comment['comment'].count('\n') == message.count('\n'))) ): # Assume same state/result and number of lines in message is duplicate. self.logger.debug('previous comment too similar on {}'.format(debug_key)) return if comment is None: self.logger.debug('broadening search to include any state on {}'.format(debug_key)) comment, _ = self.comment_api.comment_find(comments, bot_name) if comment is not None: self.logger.debug('removing previous comment on {}'.format(debug_key)) if not self.dryrun: self.comment_api.delete(comment['id']) elif only_replace: self.logger.debug('no previous comment to replace on {}'.format(debug_key)) return self.logger.debug('adding comment to {}: {}'.format(debug_key, message)) if not self.dryrun: self.comment_api.add_comment(comment=message, **kwargs) self.comment_handler_remove()
class StagingReport(object): def __init__(self, api): self.api = api self.comment = CommentAPI(api.apiurl) def _package_url(self, package): link = '/package/live_build_log/%s/%s/%s/%s' link = link % (package['project'], package['package'], package['repository'], package['arch']) text = '[%s](%s)' % (package['arch'], link) return text def old_enough(self, _date): time_delta = datetime.utcnow() - _date safe_margin = timedelta(hours=MARGIN_HOURS) return safe_margin <= time_delta def update_status_comment(self, project, report, force=False, only_replace=False): report = self.comment.add_marker(report, MARKER) comments = self.comment.get_comments(project_name=project) comment, _ = self.comment.comment_find(comments, MARKER) if comment: write_comment = (report != comment['comment'] and self.old_enough(comment['when'])) else: write_comment = not only_replace if write_comment or force: if osc.conf.config['debug']: print('Updating comment') if comment: self.comment.delete(comment['id']) self.comment.add_comment(project_name=project, comment=report) def _report_broken_packages(self, info): broken_package_status = info['broken_packages'] # Group packages by name groups = defaultdict(list) for package in broken_package_status: groups[package['package']].append(package) failing_lines = [ '* Build failed %s (%s)' % (key, ', '.join(self._package_url(p) for p in value)) for key, value in groups.iteritems() ] report = '\n'.join(failing_lines[:MAX_LINES]) if len(failing_lines) > MAX_LINES: report += '* and more (%s) ...' % (len(failing_lines) - MAX_LINES) return report def report_checks(self, info): failing_lines, green_lines = [], [] links_state = {} for check in info['checks']: links_state.setdefault(check['state'], []) links_state[check['state']].append('[{}]({})'.format(check['name'], check['url'])) lines = [] failure = False for state, links in links_state.items(): if len(links) > MAX_LINES: extra = len(links) - MAX_LINES links = links[:MAX_LINES] links.append('and {} more...'.format(extra)) lines.append('- {}'.format(state)) if state != 'success': lines.extend([' - {}'.format(link) for link in links]) failure = True else: lines[-1] += ': {}'.format(', '.join(links)) return '\n'.join(lines).strip(), failure def report(self, project, aggregate=True, force=False): info = self.api.project_status(project, aggregate) # Do not attempt to process projects without staging info, or projects # in a pending state that will change before settling. This avoids # intermediate notifications that may end up being spammy and for # long-lived stagings where checks may be re-triggered multiple times # and thus enter pending state (not seen on first run) which is not # useful to report. if not info or not self.api.project_status_final(info): return report_broken_packages = self._report_broken_packages(info) report_checks, check_failure = self.report_checks(info) if report_broken_packages or check_failure: if report_broken_packages: report_broken_packages = 'Broken:\n\n' + report_broken_packages if report_checks: report_checks = 'Checks:\n\n' + report_checks report = '\n\n'.join((report_broken_packages, report_checks)) report = report.strip() only_replace = False else: report = 'Congratulations! All fine now.' only_replace = True self.update_status_comment(project, report, force=force, only_replace=only_replace) if osc.conf.config['debug']: print(project) print('-' * len(project)) print(report)
class TestCommentOBS(OBSLocalTestCase): def setUp(self): super(TestCommentOBS, self).setUp() self.api = CommentAPI(self.apiurl) # Ensure different test runs operate in unique namespace. self.bot = '::'.join([type(self).__name__, str(random.getrandbits(8))]) def test_basic(self): self.osc_user('staging-bot') self.assertFalse(self.comments_filtered(self.bot)[0]) self.assertTrue(self.api.add_comment( project_name=PROJECT, comment=self.api.add_marker(COMMENT, self.bot))) comment, _ = self.comments_filtered(self.bot) self.assertTrue(comment) self.assertTrue(self.api.delete(comment['id'])) self.assertFalse(self.comments_filtered(self.bot)[0]) def test_delete_nested(self): self.osc_user('staging-bot') comment_marked = self.api.add_marker(COMMENT, self.bot) # Allow for existing comments by basing assertion on delta from initial count. comment_count = len(self.api.get_comments(project_name=PROJECT)) self.assertFalse(self.comments_filtered(self.bot)[0]) self.assertTrue(self.api.add_comment(project_name=PROJECT, comment=comment_marked)) comment, _ = self.comments_filtered(self.bot) self.assertTrue(comment) for i in range(0, 3): self.assertTrue(self.api.add_comment( project_name=PROJECT, comment=comment_marked, parent_id=comment['id'])) comments = self.api.get_comments(project_name=PROJECT) parented_count = 0 for comment in comments.values(): if comment['parent']: parented_count += 1 self.assertEqual(parented_count, 3) self.assertTrue(len(comments) == comment_count + 4) self.api.delete_from(project_name=PROJECT) self.assertFalse(len(self.api.get_comments(project_name=PROJECT))) def test_delete_batch(self): users = ['factory-auto', 'repo-checker', 'staging-bot'] for user in users: self.osc_user(user) from osc import conf bot = '::'.join([self.bot, user]) comment = self.api.add_marker(COMMENT, bot) self.assertFalse(self.comments_filtered(bot)[0]) self.assertTrue(self.api.add_comment(project_name=PROJECT, comment=comment)) self.assertTrue(self.comments_filtered(bot)[0]) # Allow for existing comments by basing assertion on delta from initial count. comment_count = len(self.api.get_comments(project_name=PROJECT)) self.assertTrue(comment_count >= len(users)) self.api.delete_from_where_user(users[0], project_name=PROJECT) self.assertTrue(len(self.api.get_comments(project_name=PROJECT)) == comment_count - 1) self.api.delete_from(project_name=PROJECT) self.assertFalse(len(self.api.get_comments(project_name=PROJECT))) def comments_filtered(self, bot): comments = self.api.get_comments(project_name=PROJECT) return self.api.comment_find(comments, bot)
class TestComment(unittest.TestCase): def setUp(self): self.api = CommentAPI('bogus') self.bot = type(self).__name__ self.comments = { 1: {'comment': '<!-- {} -->\n\nshort comment'.format(self.bot)}, 2: {'comment': '<!-- {} foo=bar distro=openSUSE -->\n\nshort comment'.format(self.bot)} } def test_truncate(self): comment = "string of text" for i in xrange(len(comment) + 1): truncated = self.api.truncate(comment, length=i) print(truncated) self.assertEqual(len(truncated), i) def test_truncate_pre(self): comment = """ Some text. <pre> bar mar car </pre> ## section 2 <pre> more lines than you can handle </pre> """.strip() for i in xrange(len(comment) + len('...\n</pre>')): truncated = self.api.truncate(comment, length=i) print('=' * 80) print(truncated) self.assertTrue(len(truncated) <= i, '{} <= {}'.format(len(truncated), i)) self.assertEqual(truncated.count('<pre>'), truncated.count('</pre>')) self.assertFalse(len(re.findall(r'</?\w+[^\w>]', truncated))) tag_count = truncated.count('<pre>') + truncated.count('</pre>') self.assertEqual(tag_count, truncated.count('<')) self.assertEqual(tag_count, truncated.count('>')) def test_add_marker(self): comment_marked = self.api.add_marker(COMMENT, self.bot) self.assertEqual(comment_marked, self.comments[1]['comment']) comment_marked = self.api.add_marker(COMMENT, self.bot, COMMENT_INFO) self.assertEqual(comment_marked, self.comments[2]['comment']) def test_remove_marker(self): comment = self.api.remove_marker(COMMENT) self.assertEqual(comment, COMMENT) comment = self.api.remove_marker(self.comments[1]['comment']) self.assertEqual(comment, COMMENT) comment = self.api.remove_marker(self.comments[2]['comment']) self.assertEqual(comment, COMMENT) def test_comment_find(self): comment, info = self.api.comment_find(self.comments, self.bot) self.assertEqual(comment, self.comments[1]) comment, info = self.api.comment_find(self.comments, self.bot, COMMENT_INFO) self.assertEqual(comment, self.comments[2]) self.assertEqual(info, COMMENT_INFO) info_partial = dict(COMMENT_INFO) del info_partial['foo'] comment, info = self.api.comment_find(self.comments, self.bot, info_partial) self.assertEqual(comment, self.comments[2]) self.assertEqual(info, COMMENT_INFO)
class ReviewBot(object): """ A generic obs request reviewer Inherit from this class and implement check functions for each action type: def check_action_<type>(self, req, action): return (None|True|False) """ DEFAULT_REVIEW_MESSAGES = { 'accepted' : 'ok', 'declined': 'review failed' } REVIEW_CHOICES = ('normal', 'no', 'accept', 'accept-onpass', 'fallback-onfail', 'fallback-always') COMMENT_MARKER_REGEX = re.compile(r'<!-- (?P<bot>[^ ]+) state=(?P<state>[^ ]+)(?: result=(?P<result>[^ ]+))? -->') # map of default config entries config_defaults = { # list of tuples (prefix, apiurl, submitrequestprefix) # set this if the obs instance maps another instance into it's # namespace 'project_namespace_api_map' : [ ('openSUSE.org:', 'https://api.opensuse.org', 'obsrq'), ], } def __init__(self, apiurl = None, dryrun = False, logger = None, user = None, group = None): self.apiurl = apiurl self.ibs = apiurl.startswith('https://api.suse.de') self.dryrun = dryrun self.logger = logger self.review_user = user self.review_group = group self.requests = [] self.review_messages = ReviewBot.DEFAULT_REVIEW_MESSAGES self._review_mode = 'normal' self.fallback_user = None self.fallback_group = None self.comment_api = CommentAPI(self.apiurl) self.bot_name = self.__class__.__name__ self.only_one_action = False self.request_default_return = None self.comment_handler = False self.override_allow = True self.override_group_key = '{}-override-group'.format(self.bot_name.lower()) self.lookup = PackageLookup(self.apiurl) self.load_config() def _load_config(self, handle = None): d = self.__class__.config_defaults y = yaml.safe_load(handle) if handle is not None else {} return namedtuple('BotConfig', sorted(d.keys()))(*[ y.get(p, d[p]) for p in sorted(d.keys()) ]) def load_config(self, filename = None): if filename: with open(filename, 'r') as fh: self.config = self._load_config(fh) else: self.config = self._load_config() def staging_api(self, project): # Allow for the Staging subproject to be passed directly from config # which should be stripped before initializing StagingAPI. This allows # for NonFree subproject to utilize StagingAPI for main project. if project.endswith(':Staging'): project = project[:-8] if project not in self.staging_apis: Config.get(self.apiurl, project) self.staging_apis[project] = StagingAPI(self.apiurl, project) return self.staging_apis[project] @property def review_mode(self): return self._review_mode @review_mode.setter def review_mode(self, value): if value not in self.REVIEW_CHOICES: raise Exception("invalid review option: %s"%value) self._review_mode = value def set_request_ids(self, ids): for rqid in ids: u = osc.core.makeurl(self.apiurl, [ 'request', rqid ], { 'withfullhistory' : '1' }) r = osc.core.http_GET(u) root = ET.parse(r).getroot() req = osc.core.Request() req.read(root) self.requests.append(req) # function called before requests are reviewed def prepare_review(self): pass def check_requests(self): self.staging_apis = {} # give implementations a chance to do something before single requests self.prepare_review() for req in self.requests: self.logger.info("checking %s"%req.reqid) self.request = req override = self.request_override_check(req) if override is not None: good = override else: try: good = self.check_one_request(req) except: good = None import traceback traceback.print_exc() if self.review_mode == 'no': good = None elif self.review_mode == 'accept': good = True if good is None: self.logger.info("%s ignored"%req.reqid) elif good: self._set_review(req, 'accepted') elif self.review_mode != 'accept-onpass': self._set_review(req, 'declined') @memoize(session=True) def request_override_check_users(self, project): """Determine users allowed to override review in a comment command.""" config = Config.get(self.apiurl, project) users = [] group = config.get('staging-group') if group: users += group_members(self.apiurl, group) if self.override_group_key: override_group = config.get(self.override_group_key) if override_group: users += group_members(self.apiurl, override_group) return users def request_override_check(self, request, force=False): """Check for a comment command requesting review override.""" if not force and not self.override_allow: return None comments = self.comment_api.get_comments(request_id=request.reqid) users = self.request_override_check_users(request.actions[0].tgt_project) for args, who in self.comment_api.command_find( comments, self.review_user, 'override', users): message = 'overridden by {}'.format(who) override = args[1] if len(args) >= 2 else 'accept' if override == 'accept': self.review_messages['accepted'] = message return True if override == 'decline': self.review_messages['declined'] = message return False def _set_review(self, req, state): doit = self.can_accept_review(req.reqid) if doit is None: self.logger.info("can't change state, %s does not have the reviewer"%(req.reqid)) newstate = state by_user = self.fallback_user by_group = self.fallback_group msg = self.review_messages[state] if state in self.review_messages else state self.logger.info("%s %s: %s"%(req.reqid, state, msg)) if state == 'declined': if self.review_mode == 'fallback-onfail': self.logger.info("%s needs fallback reviewer"%req.reqid) self.add_review(req, by_group=by_group, by_user=by_user, msg="Automated review failed. Needs fallback reviewer.") newstate = 'accepted' elif self.review_mode == 'fallback-always': self.add_review(req, by_group=by_group, by_user=by_user, msg='Adding fallback reviewer') if doit == True: self.logger.debug("setting %s to %s"%(req.reqid, state)) if not self.dryrun: osc.core.change_review_state(apiurl = self.apiurl, reqid = req.reqid, newstate = newstate, by_group=self.review_group, by_user=self.review_user, message=msg) else: self.logger.debug("%s review not changed"%(req.reqid)) # allow_duplicate=True should only be used if it makes sense to force a # re-review in a scenario where the bot adding the review will rerun. # Normally a declined review will automatically be reopened along with the # request and any other bot reviews already added will not be touched unless # the issuing bot is rerun which does not fit normal workflow. def add_review(self, req, by_group=None, by_user=None, by_project=None, by_package=None, msg=None, allow_duplicate=False): query = { 'cmd': 'addreview' } if by_group: query['by_group'] = by_group elif by_user: query['by_user'] = by_user elif by_project: query['by_project'] = by_project if by_package: query['by_package'] = by_package else: raise osc.oscerr.WrongArgs("missing by_*") for r in req.reviews: if (r.by_group == by_group and r.by_project == by_project and r.by_package == by_package and r.by_user == by_user and # Only duplicate when allow_duplicate and state != new. (not allow_duplicate or r.state == 'new')): del query['cmd'] self.logger.debug('skipped adding duplicate review for {}'.format( '/'.join(query.values()))) return u = osc.core.makeurl(self.apiurl, ['request', req.reqid], query) if self.dryrun: self.logger.info('POST %s' % u) return r = osc.core.http_POST(u, data=msg) code = ET.parse(r).getroot().attrib['code'] if code != 'ok': raise Exception('non-ok return code: {}'.format(code)) def devel_project_review_add(self, request, project, package, message='adding devel project review'): devel_project, devel_package = devel_project_fallback(self.apiurl, project, package) if not devel_project: self.logger.warning('no devel project found for {}/{}'.format(project, package)) return False try: self.add_review(request, by_project=devel_project, by_package=devel_package, msg=message) except HTTPError as e: # could happen when the bot is not actually a reviewer and has no permissions if e.code != 403: raise e self.logger.error('failed to add devel project review for {}/{}'.format(devel_project, devel_package)) return False return True def devel_project_review_ensure(self, request, project, package, message='submitter not devel maintainer'): if not self.devel_project_review_needed(request, project, package): self.logger.debug('devel project review not needed') return True return self.devel_project_review_add(request, project, package, message) def devel_project_review_needed(self, request, project, package): author = request.get_creator() maintainers = set(maintainers_get(self.apiurl, project, package)) if author in maintainers: return False # Carried over from maintbot, but seems haphazard. for review in request.reviews: if review.by_user in maintainers: return False return True def check_one_request(self, req): """ check all actions in one request. calls helper functions for each action type return None if nothing to do, True to accept, False to reject """ # Copy original values to revert changes made to them. self.review_messages = self.DEFAULT_REVIEW_MESSAGES.copy() if self.only_one_action and len(req.actions) != 1: self.review_messages['declined'] = 'Only one action per request supported' return False if self.comment_handler is not False: self.comment_handler_add() overall = True for a in req.actions: # Store in-case sub-classes need direct access to original values. self.action = a func = getattr(self, self.action_method(a)) ret = func(req, a) # In the case of multiple actions take the "lowest" result where the # order from lowest to highest is: False, None, True. if overall is not False: if ((overall is True and ret is not True) or (overall is None and ret is False)): overall = ret return overall def action_method(self, action): method_prefix = 'check_action' method_type = action.type method_suffix = None if method_type == 'delete': method_suffix = 'project' if action.tgt_package is not None: method_suffix = 'package' elif action.tgt_repository is not None: method_suffix = 'repository' if method_suffix: method = '_'.join([method_prefix, method_type, method_suffix]) if hasattr(self, method): return method method = '_'.join([method_prefix, method_type]) if hasattr(self, method): return method method_type = '_default' return '_'.join([method_prefix, method_type]) @staticmethod def _is_patchinfo(pkgname): return pkgname == 'patchinfo' or pkgname.startswith('patchinfo.') def check_action_maintenance_incident(self, req, a): if self._is_patchinfo(a.src_package): self.logger.debug('ignoring patchinfo action') return True # Duplicate src_package as tgt_package since prior to assignment to a # specific incident project there is no target package (odd API). After # assignment it is still assumed the target will match the source. Since # the ultimate goal is the tgt_releaseproject the incident is treated # similar to staging in that the intermediate result is not the final # and thus the true target project (ex. openSUSE:Maintenance) is not # used for check_source_submission(). tgt_package = a.src_package if a.tgt_releaseproject is not None: suffix = '.' + a.tgt_releaseproject.replace(':', '_') if tgt_package.endswith(suffix): tgt_package = tgt_package[:-len(suffix)] # Note tgt_releaseproject (product) instead of tgt_project (maintenance). return self.check_source_submission(a.src_project, a.src_package, a.src_rev, a.tgt_releaseproject, tgt_package) def check_action_maintenance_release(self, req, a): pkgname = a.src_package if self._is_patchinfo(pkgname): self.logger.debug('ignoring patchinfo action') return True linkpkg = self._get_linktarget_self(a.src_project, pkgname) if linkpkg is not None: pkgname = linkpkg # packages in maintenance have links to the target. Use that # to find the real package name (linkprj, linkpkg) = self._get_linktarget(a.src_project, pkgname) if linkpkg is None or linkprj is None or linkprj != a.tgt_project: self.logger.warning("%s/%s is not a link to %s"%(a.src_project, pkgname, a.tgt_project)) return self.check_source_submission(a.src_project, a.src_package, a.src_rev, a.tgt_project, a.tgt_package) else: pkgname = linkpkg return self.check_source_submission(a.src_project, a.src_package, None, a.tgt_project, pkgname) def check_action_submit(self, req, a): return self.check_source_submission(a.src_project, a.src_package, a.src_rev, a.tgt_project, a.tgt_package) def check_action__default(self, req, a): # Disable any comment handler to avoid making a comment even if # comment_write() is called by another bot wrapping __default(). self.comment_handler_remove() message = 'unhandled request type {}'.format(a.type) self.logger.error(message) self.review_messages['accepted'] += ': ' + message return self.request_default_return def check_source_submission(self, src_project, src_package, src_rev, target_project, target_package): """ default implemention does nothing """ self.logger.info("%s/%s@%s -> %s/%s"%(src_project, src_package, src_rev, target_project, target_package)) return None @staticmethod @memoize(session=True) def _get_sourceinfo(apiurl, project, package, rev=None): query = { 'view': 'info' } if rev is not None: query['rev'] = rev url = osc.core.makeurl(apiurl, ('source', project, package), query=query) try: return ET.parse(osc.core.http_GET(url)).getroot() except (HTTPError, URLError): return None def get_originproject(self, project, package, rev=None): root = ReviewBot._get_sourceinfo(self.apiurl, project, package, rev) if root is None: return None originproject = root.find('originproject') if originproject is not None: return originproject.text return None def get_sourceinfo(self, project, package, rev=None): root = ReviewBot._get_sourceinfo(self.apiurl, project, package, rev) if root is None: return None props = ('package', 'rev', 'vrev', 'srcmd5', 'lsrcmd5', 'verifymd5') return namedtuple('SourceInfo', props)(*[ root.get(p) for p in props ]) # TODO: what if there is more than _link? def _get_linktarget_self(self, src_project, src_package): """ if it's a link to a package in the same project return the name of the package""" prj, pkg = self._get_linktarget(src_project, src_package) if prj is None or prj == src_project: return pkg def _get_linktarget(self, src_project, src_package): query = {} url = osc.core.makeurl(self.apiurl, ('source', src_project, src_package), query=query) try: root = ET.parse(osc.core.http_GET(url)).getroot() except HTTPError: return (None, None) if root is not None: linkinfo = root.find("linkinfo") if linkinfo is not None: return (linkinfo.get('project'), linkinfo.get('package')) return (None, None) def _has_open_review_by(self, root, by_what, reviewer): states = set([review.get('state') for review in root.findall('review') if review.get(by_what) == reviewer]) if not states: return None elif 'new' in states: return True return False def can_accept_review(self, request_id): """return True if there is a new review for the specified reviewer""" states = set() url = osc.core.makeurl(self.apiurl, ('request', str(request_id))) try: root = ET.parse(osc.core.http_GET(url)).getroot() if self.review_user and self._has_open_review_by(root, 'by_user', self.review_user): return True if self.review_group and self._has_open_review_by(root, 'by_group', self.review_group): return True except HTTPError as e: print('ERROR in URL %s [%s]' % (url, e)) return False def set_request_ids_search_review(self): review = None if self.review_user: review = "@by_user='******' and @state='new'" % self.review_user if self.review_group: review = osc.core.xpath_join(review, "@by_group='%s' and @state='new'" % self.review_group) url = osc.core.makeurl(self.apiurl, ('search', 'request'), { 'match': "state/@name='review' and review[%s]" % review, 'withfullhistory': 1 } ) root = ET.parse(osc.core.http_GET(url)).getroot() self.requests = [] for request in root.findall('request'): req = osc.core.Request() req.read(request) self.requests.append(req) # also used by openqabot def ids_project(self, project, typename): url = osc.core.makeurl(self.apiurl, ('search', 'request'), { 'match': "(state/@name='review' or state/@name='new') and (action/target/@project='%s' and action/@type='%s')" % (project, typename), 'withfullhistory': 1 }) root = ET.parse(osc.core.http_GET(url)).getroot() ret = [] for request in root.findall('request'): req = osc.core.Request() req.read(request) ret.append(req) return ret def set_request_ids_project(self, project, typename): self.requests = self.ids_project(project, typename) def comment_handler_add(self, level=logging.INFO): """Add handler to start recording log messages for comment.""" self.comment_handler = CommentFromLogHandler(level) self.logger.addHandler(self.comment_handler) def comment_handler_remove(self): self.logger.removeHandler(self.comment_handler) def comment_handler_lines_deduplicate(self): self.comment_handler.lines = list(OrderedDict.fromkeys(self.comment_handler.lines)) def comment_write(self, state='done', result=None, project=None, package=None, request=None, message=None, identical=False, only_replace=False, info_extra=None, info_extra_identical=True, bot_name_suffix=None): """Write comment if not similar to previous comment and replace old one. The state, result, and info_extra (dict) are combined to create the info that is passed to CommentAPI methods for creating a marker and finding previous comments. self.bot_name, which defaults to class, will be used as the primary matching key. When info_extra_identical is set to False info_extra will not be included when finding previous comments to compare message against. A comment from the same bot will be replaced when a new comment is written. The only_replace flag will restrict to only writing a comment if a prior one is being replaced. This can be useful for writing a final comment that indicates a change from previous uncompleted state, but only makes sense to post if a prior comment was posted. The project, package, and request variables control where the comment is placed. If no value is given the default is the request being reviewed. If no message is provided the content will be extracted from self.comment_handler.line which is provided by CommentFromLogHandler. To use this call comment_handler_add() at the point which messages should start being collected. Alternatively the self.comment_handler setting may be set to True to automatically set one on each request. The previous comment body line count is compared to see if too similar to bother posting another comment which is useful for avoiding re-posting comments that contain irrelevant minor changes. To force an exact match use the identical flag to replace any non-identical comment body. """ if project: kwargs = {'project_name': project} if package: kwargs['package_name'] = package else: if request is None: request = self.request kwargs = {'request_id': request.reqid} debug_key = '/'.join(kwargs.values()) if message is None: if not len(self.comment_handler.lines): self.logger.debug('skipping empty comment for {}'.format(debug_key)) return message = '\n\n'.join(self.comment_handler.lines) bot_name = self.bot_name if bot_name_suffix: bot_name = '::'.join([bot_name, bot_name_suffix]) info = {'state': state, 'result': result} if info_extra and info_extra_identical: info.update(info_extra) comments = self.comment_api.get_comments(**kwargs) comment, _ = self.comment_api.comment_find(comments, bot_name, info) if info_extra and not info_extra_identical: # Add info_extra once comment has already been matched. info.update(info_extra) message = self.comment_api.add_marker(message, bot_name, info) message = self.comment_api.truncate(message.strip()) if (comment is not None and ((identical and # Remove marker from comments since handled during comment_find(). self.comment_api.remove_marker(comment['comment']) == self.comment_api.remove_marker(message)) or (not identical and comment['comment'].count('\n') == message.count('\n'))) ): # Assume same state/result and number of lines in message is duplicate. self.logger.debug('previous comment too similar on {}'.format(debug_key)) return if comment is None: self.logger.debug('broadening search to include any state on {}'.format(debug_key)) comment, _ = self.comment_api.comment_find(comments, bot_name) if comment is not None: self.logger.debug('removing previous comment on {}'.format(debug_key)) if not self.dryrun: self.comment_api.delete(comment['id']) elif only_replace: self.logger.debug('no previous comment to replace on {}'.format(debug_key)) return self.logger.debug('adding comment to {}: {}'.format(debug_key, message)) if not self.dryrun: self.comment_api.add_comment(comment=message, **kwargs) self.comment_handler_remove() def _check_matching_srcmd5(self, project, package, rev, history_limit = 5): """check if factory sources contain the package and revision. check head and history""" self.logger.debug("checking %s in %s"%(package, project)) try: osc.core.show_package_meta(self.apiurl, project, package) except (HTTPError, URLError): self.logger.debug("new package") return None si = self.get_sourceinfo(project, package) if rev == si.verifymd5: self.logger.debug("srcmd5 matches") return True if history_limit: self.logger.debug("%s not the latest version, checking history", rev) u = osc.core.makeurl(self.apiurl, [ 'source', project, package, '_history' ], { 'limit': history_limit }) try: r = osc.core.http_GET(u) except HTTPError as e: self.logger.debug("package has no history!?") return None root = ET.parse(r).getroot() # we need this complicated construct as obs doesn't honor # the 'limit' parameter use above for obs interconnect: # https://github.com/openSUSE/open-build-service/issues/2545 for revision, i in zip(reversed(root.findall('revision')), count()): node = revision.find('srcmd5') if node is None: continue self.logger.debug("checking %s"%node.text) if node.text == rev: self.logger.debug("got it, rev %s"%revision.get('rev')) return True if i == history_limit: break self.logger.debug("srcmd5 not found in history either") return False
class PkgListGen(ToolBase.ToolBase): def __init__(self): ToolBase.ToolBase.__init__(self) self.logger = logging.getLogger(__name__) self.comment = CommentAPI(self.apiurl) self.reset() def reset(self): # package -> supportatus self.packages = dict() self.groups = dict() self._supportstatus = None self.input_dir = '.' self.output_dir = '.' self.lockjobs = dict() self.ignore_broken = False self.unwanted = set() self.output = None self.locales = set() self.filtered_architectures = None self.dry_run = False self.all_architectures = None def filter_architectures(self, architectures): self.filtered_architectures = sorted(list(set(architectures) & set(self.all_architectures))) def _load_supportstatus(self): # XXX fn = os.path.join(self.input_dir, 'supportstatus.txt') self._supportstatus = dict() if os.path.exists(fn): with open(fn, 'r') as fh: for line in fh: # pkg, status fields = line.rstrip().split(' ') if len(fields) > 1: self._supportstatus[fields[0]] = fields[1] def supportstatus(self, package): if self._supportstatus is None: self._load_supportstatus() return self._supportstatus.get(package) def _load_group_file(self, fn): output = None unwanted = None with open(fn, 'r') as fh: self.logger.debug('reading %s', fn) for groupname, group in yaml.safe_load(fh).items(): if groupname == 'OUTPUT': output = group continue if groupname == 'UNWANTED': unwanted = set(group) continue g = Group(groupname, self) g.parse_yml(group) return output, unwanted def group_input_files(self): return glob.glob(os.path.join(self.input_dir, 'group*.yml')) def load_all_groups(self): for fn in self.group_input_files(): o, u = self._load_group_file(fn) if o: if self.output is not None: raise Exception('OUTPUT defined multiple times') self.output = o if u: self.unwanted |= u # required to generate release spec files (only) def write_group_stubs(self): archs = ['*'] + self.all_architectures for name in self.groups: group = self.groups[name] group.solved_packages = dict() fn = '{}.group'.format(group.name) with open(os.path.join(self.output_dir, fn), 'w') as fh: for arch in archs: x = group.toxml(arch, group.ignore_broken, None) x = ET.tostring(x, pretty_print=True, encoding='unicode') fh.write(x) def write_all_groups(self): self._check_supplements() summary = dict() archs = ['*'] + self.all_architectures for name in self.groups: group = self.groups[name] if not group.solved: continue summary[name] = group.summary() fn = '{}.group'.format(group.name) with open(os.path.join(self.output_dir, fn), 'w') as fh: comment = group.comment for arch in archs: x = group.toxml(arch, group.ignore_broken, comment) # only comment first time comment = None x = ET.tostring(x, pretty_print=True, encoding='unicode') x = re.sub(r'\s*<!-- reason:', ' <!-- reason:', x) fh.write(x) return summary def solve_module(self, groupname, includes, excludes, use_recommends): g = self.groups[groupname] importants = set() for i in includes: name = i if isinstance(i, dict): name = list(i)[0] if i[name] != 'support': importants.add(name) else: importants.add(name) g.inherit(self.groups[name]) g.solve(use_recommends) for e in excludes: g.ignore(self.groups[e]) for i in importants: group = self.groups[i] for arch in group.packages: if arch not in g.solved_packages: continue for package in group.packages[arch]: if package[0] in g.solved_packages[arch]: continue if package[0] not in g.solved_packages['*']: self.logger.error(f'Missing {package[0]} in {groupname} for {arch}') def expand_repos(self, project, repo='standard'): return repository_path_expand(self.apiurl, project, repo) def _check_supplements(self): tocheck = set() tocheck_locales = set() for arch in self.filtered_architectures: pool = self.prepare_pool(arch, True) sel = pool.Selection() for s in pool.solvables_iter(): sel.add_raw(solv.Job.SOLVER_SOLVABLE, s.id) for s in sel.solvables(): for dep in s.lookup_deparray(solv.SOLVABLE_SUPPLEMENTS): for d in dep.str().split(' '): if d.startswith('namespace:modalias') or d.startswith('namespace:filesystem'): tocheck.add(s.name) for locale in self.locales: id = pool.str2id('locale({})'.format(locale)) for s in pool.whatprovides(id): tocheck_locales.add(s.name) all_grouped = set() for g in self.groups.values(): if g.solved: for arch in g.solved_packages.keys(): if g.solved_packages[arch]: all_grouped.update(g.solved_packages[arch]) for p in tocheck - all_grouped: self.logger.warning('package %s has supplements but is not grouped', p) for p in tocheck_locales - all_grouped: self.logger.warning('package %s provides supported locale but is not grouped', p) def prepare_pool(self, arch, ignore_conflicts): pool = solv.Pool() # the i586 DVD is really a i686 one if arch == 'i586': pool.setarch('i686') else: pool.setarch(arch) self.lockjobs[arch] = [] solvables = set() for project, reponame in self.repos: repo = pool.add_repo(project) # check back the repo state to avoid suprises state = repository_arch_state(self.apiurl, project, reponame, arch) if state is None: continue s = f'repo-{project}-{reponame}-{arch}-{state}.solv' if not repo.add_solv(s): raise MismatchedRepoException('failed to add repo {}/{}/{}'.format(project, reponame, arch)) for solvable in repo.solvables_iter(): if ignore_conflicts: solvable.unset(solv.SOLVABLE_CONFLICTS) solvable.unset(solv.SOLVABLE_OBSOLETES) # only take the first solvable in the repo chain if not self.use_newest_version and solvable.name in solvables: self.lockjobs[arch].append(pool.Job(solv.Job.SOLVER_SOLVABLE | solv.Job.SOLVER_LOCK, solvable.id)) solvables.add(solvable.name) pool.addfileprovides() pool.createwhatprovides() for locale in self.locales: pool.set_namespaceproviders(solv.NAMESPACE_LANGUAGE, pool.Dep(locale), True) return pool # parse file and merge all groups def _parse_unneeded(self, filename): filename = os.path.join(self.input_dir, filename) if not os.path.isfile(filename): return set() fh = open(filename, 'r') self.logger.debug('reading %s', filename) result = set() for group in yaml.safe_load(fh).values(): result.update(group) return result # the unsorted group is special and will contain all the rest for # the FTP tree. We filter it with unneeded though to create a # unsorted.yml file for release manager review def _collect_unsorted_packages(self, modules, unsorted): unneeded_regexps = [re.compile(r'\A' + r + r'\Z') for r in self._parse_unneeded('unneeded.yml')] packages = dict() if unsorted: unsorted.solved_packages = dict() unsorted.solved_packages['*'] = dict() for arch in self.filtered_architectures: pool = self.prepare_pool(arch, False) pool.Selection() archpacks = [s.name for s in pool.solvables_iter()] # copy filtered = list(archpacks) for r in unneeded_regexps: filtered = [p for p in filtered if not r.match(p)] # convert to set filtered = set(filtered) - self.unwanted for g in modules: if unsorted and g == unsorted: continue for a in ('*', arch): filtered -= set(g.solved_packages[a]) for package in filtered: packages.setdefault(package, []).append(arch) if unsorted: archpacks = set(archpacks) unsorted.solved_packages[arch] = dict() for g in modules: archpacks -= set(g.solved_packages[arch]) archpacks -= set(g.solved_packages['*']) unsorted.solved_packages[arch] = dict() for p in archpacks: unsorted.solved_packages[arch][p] = None if unsorted: common = None for arch in self.filtered_architectures: if common is None: common = set(unsorted.solved_packages[arch]) continue common &= set(unsorted.solved_packages[arch]) for p in common: unsorted.solved_packages['*'][p] = None for arch in self.filtered_architectures: del unsorted.solved_packages[arch][p] with open(os.path.join(self.output_dir, 'unsorted.yml'), 'w') as fh: fh.write('unsorted:\n') for p in sorted(packages): fh.write(' - ') fh.write(p) if len(packages[p]) != len(self.filtered_architectures): fh.write(': [') fh.write(','.join(sorted(packages[p]))) fh.write(']') reason = self._find_reason(p, modules) if reason: fh.write(' # ' + reason) fh.write(' \n') # give a hint if the package is related to a group def _find_reason(self, package, modules): # go through the modules multiple times to find the "best" for g in modules: if package in g.recommends: return 'recommended by ' + g.recommends[package] for g in modules: if package in g.suggested: return 'suggested by ' + g.suggested[package] for g in modules: if package in g.develpkgs: return 'devel package of ' + g.develpkgs[package] return None def update_one_repo(self, project, repo, arch, solv_file, solv_file_hash): # Either hash changed or new, so remove any old hash files. file_utils.unlink_list(None, glob.glob(solv_file + '::*')) d = os.path.join(CACHEDIR, project, repo, arch) if not os.path.exists(d): os.makedirs(d) self.logger.debug('updating %s', d) # only there to parse the repos bs_mirrorfull = os.path.join(SCRIPT_PATH, '..', 'bs_mirrorfull') args = [bs_mirrorfull] args.append('--nodebug') args.append('{}/public/build/{}/{}/{}'.format(self.apiurl, project, repo, arch)) args.append(d) with subprocess.Popen(args, stdout=subprocess.PIPE) as p: for line in p.stdout: self.logger.info(line.decode('utf-8').rstrip()) if p.wait() != 0: raise Exception("Mirroring repository failed") files = [os.path.join(d, f) for f in os.listdir(d) if f.endswith('.rpm')] suffix = f'.{os.getpid()}.tmp' fh = open(solv_file + suffix, 'w') p = subprocess.Popen( ['rpms2solv', '-m', '-', '-0'], stdin=subprocess.PIPE, stdout=fh) p.communicate(bytes('\0'.join(files), 'utf-8')) fh.close() if p.wait() != 0: raise Exception("rpm2solv failed") os.rename(solv_file + suffix, solv_file) # Create hash file now that solv creation is complete. open(solv_file_hash, 'a').close() def update_repos(self, architectures): for project, repo in self.repos: for arch in architectures: # Fetch state before mirroring in-case it changes during download. state = repository_arch_state(self.apiurl, project, repo, arch) if state is None: # Repo might not have this architecture continue repo_solv_name = 'repo-{}-{}-{}.solv'.format(project, repo, arch) # Would be preferable to include hash in name, but cumbersome to handle without # reworking a fair bit since the state needs to be tracked. solv_file = os.path.join(CACHEDIR, repo_solv_name) solv_file_hash = '{}::{}'.format(solv_file, state) if os.path.exists(solv_file) and os.path.exists(solv_file_hash): # Solve file exists and hash unchanged, skip updating solv. self.logger.debug('skipping solv generation for {} due to matching state {}'.format( '/'.join([project, repo, arch]), state)) else: self.update_one_repo(project, repo, arch, solv_file, solv_file_hash) shutil.copy(solv_file, f'./repo-{project}-{repo}-{arch}-{state}.solv') def create_weakremovers(self, target, target_config, directory, output): drops = dict() dropped_repos = dict() root = yaml.safe_load(open(os.path.join(directory, 'config.yml'))) for item in root: key = list(item)[0] # cast 15.1 to string :) key = str(key) oldrepos = set() for suffix in ['xz', 'zst']: oldrepos |= set(glob.glob(os.path.join(directory, f"{key}_*.packages.{suffix}"))) oldrepos |= set(glob.glob(os.path.join(directory, f"{key}.packages.{suffix}"))) for oldrepo in sorted(oldrepos): pool = solv.Pool() pool.setarch() # we need some progress in the debug output - or gocd gets nervous self.logger.debug('checking {}'.format(oldrepo)) oldsysrepo = file_utils.add_susetags(pool, oldrepo) for arch in self.all_architectures: for project, repo in self.repos: # check back the repo state to avoid suprises state = repository_arch_state(self.apiurl, project, repo, arch) if state is None: self.logger.debug(f'Skipping {project}/{repo}/{arch}') fn = f'repo-{project}-{repo}-{arch}-{state}.solv' r = pool.add_repo('/'.join([project, repo])) if not r.add_solv(fn): raise MismatchedRepoException('failed to add repo {}/{}/{}.'.format(project, repo, arch)) pool.createwhatprovides() accepted_archs = set(self.all_architectures) accepted_archs.add('noarch') for s in oldsysrepo.solvables_iter(): oldarch = s.arch if oldarch == 'i686': oldarch = 'i586' if oldarch not in accepted_archs: continue haveit = False for s2 in pool.whatprovides(s.nameid): if s2.repo == oldsysrepo or s.nameid != s2.nameid: continue newarch = s2.arch if newarch == 'i686': newarch = 'i586' if oldarch != newarch and newarch != 'noarch' and oldarch != 'noarch': continue haveit = True break if haveit: continue # check for already obsoleted packages nevr = pool.rel2id(s.nameid, s.evrid, solv.REL_EQ) for s2 in pool.whatmatchesdep(solv.SOLVABLE_OBSOLETES, nevr): if s2.repo == oldsysrepo: continue haveit = True break if haveit: continue if s.name not in drops: drops[s.name] = {'repo': key, 'archs': set()} if oldarch == 'noarch': drops[s.name]['archs'] |= set(self.all_architectures) else: drops[s.name]['archs'].add(oldarch) dropped_repos[key] = 1 del pool for repo in sorted(dropped_repos): repo_output = False exclusives = dict() for name in sorted(drops): if drops[name]['repo'] != repo: continue if drops[name]['archs'] == set(self.all_architectures): if not repo_output: print('#', repo, file=output) repo_output = True print('Provides: weakremover({})'.format(name), file=output) else: jarch = ' '.join(sorted(drops[name]['archs'])) exclusives.setdefault(jarch, []).append(name) for arch in sorted(exclusives): if not repo_output: print('#', repo, file=output) repo_output = True print('%ifarch {}'.format(arch), file=output) for name in sorted(exclusives[arch]): print('Provides: weakremover({})'.format(name), file=output) print('%endif', file=output) output.flush() def read_summary_file(self, file): ret = dict() with open(file, 'r') as f: for line in f: pkg, group = line.strip().split(':') ret.setdefault(pkg, []) ret[pkg].append(group) return ret def calculcate_package_diff(self, old_file, new_file): old_file = self.read_summary_file(old_file) new_file = self.read_summary_file(new_file) # remove common part keys = list(old_file.keys()) for key in keys: if new_file.get(key, []) == old_file[key]: del new_file[key] del old_file[key] if not old_file and not new_file: return None removed = dict() for pkg in old_file: old_groups = old_file[pkg] if new_file.get(pkg): continue removekey = ','.join(old_groups) removed.setdefault(removekey, []) removed[removekey].append(pkg) report = '' for rm in sorted(removed.keys()): report += f"**Remove from {rm}**\n\n```\n" paragraph = ', '.join(removed[rm]) report += "\n".join(textwrap.wrap(paragraph, width=90, break_long_words=False, break_on_hyphens=False)) report += "\n```\n\n" moved = dict() for pkg in old_file: old_groups = old_file[pkg] new_groups = new_file.get(pkg) if not new_groups: continue movekey = ','.join(old_groups) + ' to ' + ','.join(new_groups) moved.setdefault(movekey, []) moved[movekey].append(pkg) for move in sorted(moved.keys()): report += f"**Move from {move}**\n\n```\n" paragraph = ', '.join(moved[move]) report += "\n".join(textwrap.wrap(paragraph, width=90, break_long_words=False, break_on_hyphens=False)) report += "\n```\n\n" added = dict() for pkg in new_file: if pkg in old_file: continue addkey = ','.join(new_file[pkg]) added.setdefault(addkey, []) added[addkey].append(pkg) for group in sorted(added): report += f"**Add to {group}**\n\n```\n" paragraph = ', '.join(added[group]) report += "\n".join(textwrap.wrap(paragraph, width=90, break_long_words=False, break_on_hyphens=False)) report += "\n```\n\n" return report.strip() def handle_package_diff(self, project, old_file, new_file): comments = self.comment.get_comments(project_name=project) comment, _ = self.comment.comment_find(comments, MARKER) report = self.calculcate_package_diff(old_file, new_file) if not report: if comment: self.comment.delete(comment['id']) return 0 report = self.comment.add_marker(report, MARKER) if comment: write_comment = report != comment['comment'] else: write_comment = True if write_comment: if comment: self.comment.delete(comment['id']) self.comment.add_comment(project_name=project, comment=report) else: for c in comments.values(): if c['parent'] == comment['id']: print(c) return 1 def solve_project(self, ignore_unresolvable=False, ignore_recommended=False, locale=None, locales_from=None): self.load_all_groups() if not self.output: self.logger.error('OUTPUT not defined') return if ignore_unresolvable: self.ignore_broken = True global_use_recommends = not ignore_recommended if locale: self.locales |= set(locale.split(' ')) if locales_from: with open(os.path.join(self.input_dir, locales_from), 'r') as fh: root = ET.parse(fh).getroot() self.locales |= set([lang.text for lang in root.findall('.//linguas/language')]) modules = [] # the yml parser makes an array out of everything, so # we loop a bit more than what we support for group in self.output: groupname = list(group)[0] settings = group[groupname] if not settings: # e.g. unsorted settings = {} includes = settings.get('includes', []) excludes = settings.get('excludes', []) use_recommends = settings.get('recommends', global_use_recommends) self.solve_module(groupname, includes, excludes, use_recommends) g = self.groups[groupname] # the default is a little double negated but Factory has ignore_broken # as default and we only disable it for single groups (for now) g.ignore_broken = not settings.get('require_all', not self.ignore_broken) g.conflicts = settings.get('conflicts', []) g.default_support_status = settings.get('default-support', 'unsupported') modules.append(g) # not defined for openSUSE overlap = self.groups.get('overlap') for module in modules: module.check_dups(modules, overlap) module.collect_devel_packages() module.filter_already_selected(modules) if overlap: ignores = [x.name for x in overlap.ignored] self.solve_module(overlap.name, [], ignores, use_recommends=False) overlapped = set(overlap.solved_packages['*']) for arch in self.filtered_architectures: overlapped |= set(overlap.solved_packages[arch]) for module in modules: if module.name == 'overlap' or module in overlap.ignored: continue for arch in ['*'] + self.filtered_architectures: for p in overlapped: module.solved_packages[arch].pop(p, None) self._collect_unsorted_packages(modules, self.groups.get('unsorted')) return self.write_all_groups() def strip_medium_from_staging(self, path): # staging projects don't need source and debug medium - and the glibc source # rpm conflicts between standard and bootstrap_copy repository causing the # product builder to fail medium = re.compile('name="(DEBUG|SOURCE)MEDIUM"') for name in glob.glob(os.path.join(path, '*.kiwi')): lines = open(name).readlines() lines = [x for x in lines if not medium.search(x)] open(name, 'w').writelines(lines) def build_stub(self, destination, extension): with open(os.path.join(destination, '.'.join(['stub', extension])), 'w+') as f: f.write('# prevent building single {} files twice\n'.format(extension)) f.write('Name: stub\n') f.write('Version: 0.0\n') def commit_package(self, path): if self.dry_run: package = Package(path) for i in package.get_diff(): logging.info(''.join(i)) else: # No proper API function to perform the same operation. logging.debug(subprocess.check_output( ' '.join(['cd', path, '&&', 'osc', 'addremove']), shell=True, encoding='utf-8')) package = Package(path) package.commit(msg='Automatic update', skip_local_service_run=True) def replace_product_version(self, product_file, product_version): product_version = '<version>{}</version>'.format(product_version) lines = open(product_file).readlines() new_lines = [] for line in lines: new_lines.append(line.replace('<version></version>', product_version)) open(product_file, 'w').write(''.join(new_lines)) def update_and_solve_target(self, api, target_project, target_config, main_repo, project, scope, force, no_checkout, only_release_packages, stop_after_solve): self.all_architectures = target_config.get('pkglistgen-archs').split(' ') self.use_newest_version = str2bool(target_config.get('pkglistgen-use-newest-version', 'False')) self.repos = self.expand_repos(project, main_repo) logging.debug('[{}] {}/{}: update and solve'.format(scope, project, main_repo)) group = target_config.get('pkglistgen-group', '000package-groups') product = target_config.get('pkglistgen-product', '000product') release = target_config.get('pkglistgen-release', '000release-packages') oldrepos = target_config.get('pkglistgen-repos', '000update-repos') url = api.makeurl(['source', project]) packages = ET.parse(http_GET(url)).getroot() if packages.find('entry[@name="{}"]'.format(product)) is None: if not self.dry_run: undelete_package(api.apiurl, project, product, 'revive') # TODO disable build. logging.info('{} undeleted, skip dvd until next cycle'.format(product)) return elif not force: root = ET.fromstringlist(show_results_meta(api.apiurl, project, product, repository=[main_repo], multibuild=True)) if len(root.xpath('result[@state="building"]')) or len(root.xpath('result[@state="dirty"]')): logging.info('{}/{} build in progress'.format(project, product)) return drop_list = api.item_exists(project, oldrepos) checkout_list = [group, product, release] if drop_list and not only_release_packages: checkout_list.append(oldrepos) if packages.find('entry[@name="{}"]'.format(release)) is None: if not self.dry_run: undelete_package(api.apiurl, project, release, 'revive') logging.info('{} undeleted, skip dvd until next cycle'.format(release)) return # Cache dir specific to hostname and project. host = urlparse(api.apiurl).hostname cache_dir = CacheManager.directory('pkglistgen', host, project) if not no_checkout: if os.path.exists(cache_dir): shutil.rmtree(cache_dir) os.makedirs(cache_dir) group_dir = os.path.join(cache_dir, group) product_dir = os.path.join(cache_dir, product) release_dir = os.path.join(cache_dir, release) oldrepos_dir = os.path.join(cache_dir, oldrepos) self.input_dir = group_dir self.output_dir = product_dir for package in checkout_list: if no_checkout: logging.debug('Skipping checkout of {}/{}'.format(project, package)) continue checkout_package(api.apiurl, project, package, expand_link=True, prj_dir=cache_dir, outdir=os.path.join(cache_dir, package)) # print('RET', self.handle_package_diff(project, f"{group_dir}/summary-staging.txt", f"{product_dir}/summary-staging.txt")) file_utils.unlink_all_except(release_dir, ['weakremovers.inc']) if not only_release_packages: file_utils.unlink_all_except(product_dir) ignore_list = ['supportstatus.txt', 'summary-staging.txt', 'package-groups.changes'] ignore_list += self.group_input_files() file_utils.copy_directory_contents(group_dir, product_dir, ignore_list) file_utils.change_extension(product_dir, '.spec.in', '.spec') file_utils.change_extension(product_dir, '.product.in', '.product') logging.debug('-> do_update') # make sure we only calculcate existant architectures self.filter_architectures(target_archs(api.apiurl, project, main_repo)) self.update_repos(self.filtered_architectures) if only_release_packages: self.load_all_groups() self.write_group_stubs() else: summary = self.solve_project(ignore_unresolvable=str2bool(target_config.get('pkglistgen-ignore-unresolvable')), ignore_recommended=str2bool( target_config.get('pkglistgen-ignore-recommended')), locale=target_config.get('pkglistgen-locale'), locales_from=target_config.get('pkglistgen-locales-from')) if stop_after_solve: return if drop_list and not only_release_packages: weakremovers_file = os.path.join(release_dir, 'weakremovers.inc') try: self.create_weakremovers(project, target_config, oldrepos_dir, output=open(weakremovers_file, 'w')) except MismatchedRepoException: logging.error("Failed to create weakremovers.inc due to mismatch in repos - project most likey started building again.") return delete_products = target_config.get('pkglistgen-delete-products', '').split(' ') file_utils.unlink_list(product_dir, delete_products) logging.debug('-> product service') product_version = attribute_value_load(api.apiurl, project, 'ProductVersion') if not product_version: # for stagings the product version doesn't matter (I hope) product_version = '1' for product_file in glob.glob(os.path.join(product_dir, '*.product')): self.replace_product_version(product_file, product_version) logging.debug(subprocess.check_output( [PRODUCT_SERVICE, product_file, product_dir, project], encoding='utf-8')) for delete_kiwi in target_config.get('pkglistgen-delete-kiwis-{}'.format(scope), '').split(' '): delete_kiwis = glob.glob(os.path.join(product_dir, delete_kiwi)) file_utils.unlink_list(product_dir, delete_kiwis) if scope == 'staging': self.strip_medium_from_staging(product_dir) spec_files = glob.glob(os.path.join(product_dir, '*.spec')) file_utils.move_list(spec_files, release_dir) inc_files = glob.glob(os.path.join(group_dir, '*.inc')) # filter special inc file inc_files = filter(lambda file: file.endswith('weakremovers.inc'), inc_files) file_utils.move_list(inc_files, release_dir) # do not overwrite weakremovers.inc if it exists # we will commit there afterwards if needed if os.path.exists(os.path.join(group_dir, 'weakremovers.inc')) and \ not os.path.exists(os.path.join(release_dir, 'weakremovers.inc')): file_utils.move_list([os.path.join(group_dir, 'weakremovers.inc')], release_dir) file_utils.multibuild_from_glob(release_dir, '*.spec') self.build_stub(release_dir, 'spec') self.commit_package(release_dir) if only_release_packages: return file_utils.multibuild_from_glob(product_dir, '*.kiwi') self.build_stub(product_dir, 'kiwi') reference_summary = os.path.join(group_dir, f'summary-{scope}.txt') if os.path.isfile(reference_summary): summary_file = os.path.join(product_dir, f'summary-{scope}.txt') output = [] for group in summary: for package in sorted(summary[group]): output.append(f'{package}:{group}') with open(summary_file, 'w') as f: for line in sorted(output): f.write(line + '\n') self.commit_package(product_dir) if os.path.isfile(reference_summary): return self.handle_package_diff(project, reference_summary, summary_file)