def test_get_memoize_reset(self): """Ensure memoize_session_reset() properly forces re-fetch of config.""" self.assertEqual('remote-indeed', Config.get(obs.APIURL, obs.PROJECT)['remote-only']) attribute_value_save(obs.APIURL, obs.PROJECT, 'Config', 'remote-only = new value\n') memoize_session_reset() self.assertEqual('new value', Config.get(obs.APIURL, obs.PROJECT)['remote-only'])
def test_get_memoize_reset(self): """Ensure memoize_session_reset() properly forces re-fetch of config.""" wf = self.setup_vcr() self.assertEqual('remote-indeed', Config.get(wf.apiurl, wf.project)['remote-only']) attribute_value_save(wf.apiurl, wf.project, 'Config', 'remote-only = new value\n') memoize_session_reset() self.assertEqual('new value', Config.get(wf.apiurl, wf.project)['remote-only'])
def staging_api(self, project): # Allow for the Staging subproject to be passed directly from config # which should be stripped before initializing StagingAPI. This allows # for NonFree subproject to utilize StagingAPI for main project. if project.endswith(':Staging'): project = project[:-8] if project not in self.staging_apis: Config.get(self.apiurl, project) self.staging_apis[project] = StagingAPI(self.apiurl, project) return self.staging_apis[project]
def check_action_submit(self, request, action): repository_pairs = self.request_repository_pairs(request, action) if not isinstance(repository_pairs, list): return repository_pairs # use project_only results by default as reference whitelist = None config = Config.get(self.apiurl, action.tgt_project) staging = config.get('staging') arch_whitelist = config.get('repo_checker-arch-whitelist') cycle_packages = config.get('repo_checker-allowed-in-cycles') if staging: api = self.staging_api(staging) if not api.is_adi_project(repository_pairs[0][0]): # For "leaky" ring packages in letter stagings, where the # repository setup does not include the target project, that are # not intended to to have all run-time dependencies satisfied. whitelist = set(config.get('repo_checker-binary-whitelist-ring', '').split(' ')) state_hash = self.repository_state(repository_pairs, True) if not self.repository_check(repository_pairs, state_hash, True, arch_whitelist=arch_whitelist, whitelist=whitelist, cycle_packages=cycle_packages): return None self.review_messages['accepted'] = 'cycle and install check passed' return True
def mail_send(apiurl, project, to, subject, body, from_key='maintainer', followup_to_key='release-list', dry=False): from email.mime.text import MIMEText import email.utils import smtplib config = Config.get(apiurl, project) msg = MIMEText(body) msg['Message-ID'] = email.utils.make_msgid() msg['Date'] = email.utils.formatdate(localtime=1) if from_key is None: msg['From'] = entity_email(apiurl, conf.get_apiurl_usr(apiurl), include_name=True) else: msg['From'] = config['mail-{}'.format(from_key)] if '@' not in to: to = config['mail-{}'.format(to)] msg['To'] = to followup_to = config.get('mail-{}'.format(followup_to_key)) if followup_to: msg['Mail-Followup-To'] = followup_to msg['Subject'] = subject if dry: print(msg.as_string()) return s = smtplib.SMTP(config.get('mail-relay', 'relay.suse.de')) s.sendmail(msg['From'], [msg['To']], msg.as_string()) s.quit()
def check_one_request(self, req): config = Config.get(self.apiurl, req.actions[0].tgt_project) self.needs_legal_review = False self.needs_reviewteam = False self.needs_release_manager = False self.pending_factory_submission = False self.source_in_factory = None self.do_check_maintainer_review = not self.ibs self.packages = {} request_ok = ReviewBot.ReviewBot.check_one_request(self, req) self.logger.debug("review result: %s", request_ok) if self.pending_factory_submission: self.logger.info("submission is waiting for a Factory request to complete") creator = req.get_creator() bot_name = self.bot_name.lower() if self.automatic_submission and creator != bot_name: self.logger.info('@{}: this request would have been automatically created by {} after the Factory submission was accepted in order to eleviate the need to manually create requests for packages sourced from Factory'.format(creator, bot_name)) elif self.source_in_factory: self.logger.info("perfect. the submitted sources are in or accepted for Factory") elif self.source_in_factory == False: self.logger.warn("the submitted sources are NOT in Factory") if request_ok == False: self.logger.info("NOTE: if you think the automated review was wrong here, please talk to the release team before reopening the request") if self.do_comments: result = None if request_ok is None: state = 'seen' elif request_ok: state = 'done' result = 'accepted' else: state = 'done' result = 'declined' self.comment_write(state, result) add_review_groups = [] if self.needs_release_manager: add_review_groups.append(self.release_manager_group or config.get(self.override_group_key)) if self.needs_reviewteam: add_review_groups.append(self.review_team_group or config.get('review-team')) if self.needs_legal_review: add_review_groups.append(self.legal_review_group or config.get('legal-review-group')) if self.needs_check_source and self.check_source_group is not None: add_review_groups.append(self.check_source_group) for group in add_review_groups: if group is None: continue self.logger.info("{0} needs review by [{1}](/group/show/{1})".format(req.reqid, group)) self.add_review(req, by_group=group) return request_ok
def target_project_config(self, project): # Load project config and allow for remote entries. config = Config.get(self.apiurl, project) self.single_action_require = str2bool(config.get('check-source-single-action-require', 'False')) self.ignore_devel = not str2bool(config.get('devel-project-enforce', 'False')) self.in_air_rename_allow = str2bool(config.get('check-source-in-air-rename-allow', 'False')) self.add_review_team = str2bool(config.get('check-source-add-review-team', 'True')) self.review_team = config.get('review-team') self.staging_group = config.get('staging-group') self.repo_checker = config.get('repo-checker') self.devel_whitelist = config.get('devel-whitelist', '').split() self.skip_add_reviews = False if self.action.type == 'maintenance_incident': # The workflow effectively enforces the names to match and the # parent code sets target_package from source_package so this check # becomes useless and awkward to perform. self.in_air_rename_allow = True # The target project will be set to product and thus inherit # settings, but override since real target is not product. self.single_action_require = False # It might make sense to supersede maintbot, but for now. self.skip_add_reviews = True
def project_pseudometa_file_name(self, project, repository): filename = 'repo_checker' main_repo = Config.get(self.api.apiurl, project).get('main-repo') if not main_repo: filename += '.' + repository return filename
def policy_get_preprocess(apiurl, origin, policy): project = origin.rstrip('~') config_project = Config.get(apiurl, project) policy['pending_submission_allowed_reviews'] = filter(None, [ config_resolve_variable(v, config_project, 'config_source') for v in policy['pending_submission_allowed_reviews']]) return policy
def request_repository_pairs(self, request, action): if str2bool(Config.get(self.apiurl, action.tgt_project).get('repo_checker-project-skip', 'False')): # Do not change message as this should only occur in requests # targeting multiple projects such as in maintenance workflow in # which the message should be set by other actions. self.logger.debug('skipping review of action targeting {}'.format(action.tgt_project)) return True repository = self.project_repository(action.tgt_project) if not repository: self.review_messages['declined'] = ERROR_REPO_SPECIFIED.format(action.tgt_project) return False repository_pairs = [] # Assumes maintenance_release target project has staging disabled. staging = Config.get(self.apiurl, action.tgt_project).get('staging') if staging: api = self.staging_api(staging) stage_info = api.packages_staged.get(action.tgt_package) if not stage_info or str(stage_info['rq_id']) != str(request.reqid): self.logger.info('{} not staged'.format(request.reqid)) return None if not self.force and not self.staging_build_failure_check(api, stage_info['prj']): self.logger.info('{} not ready due to staging build failure(s)'.format(request.reqid)) return None # Staging setup is convoluted and thus the repository setup does not # contain a path to the target project. Instead the ports repository # is used to import the target prjconf. As such the staging group # repository must be explicitly layered on top of target project. repository_pairs.append([stage_info['prj'], repository]) repository_pairs.extend(repository_path_expand(self.apiurl, action.tgt_project, repository)) else: # Find a repository which links to target project "main" repository. repository = repository_path_search( self.apiurl, action.src_project, action.tgt_project, repository) if not repository: self.review_messages['declined'] = ERROR_REPO_SPECIFIED.format(action.tgt_project) return False repository_pairs.extend(repository_path_expand(self.apiurl, action.src_project, repository)) return repository_pairs
def target_archs(self, project, repository): archs = target_archs(self.apiurl, project, repository) # Check for arch whitelist and use intersection. whitelist = Config.get(self.apiurl, project).get('repo_checker-arch-whitelist') if whitelist: archs = list(set(whitelist.split(' ')).intersection(set(archs))) # Trick to prioritize x86_64. return sorted(archs, reverse=True)
def project_only(self, project, post_comments=False): repository = self.project_repository(project) if not repository: self.logger.error(ERROR_REPO_SPECIFIED.format(project)) return config = Config.get(self.apiurl, project) arch_whitelist = config.get('repo_checker-arch-whitelist') repository_pairs = repository_path_expand(self.apiurl, project, repository) state_hash = self.repository_state(repository_pairs, False) self.repository_check(repository_pairs, state_hash, False, bool(post_comments), arch_whitelist=arch_whitelist)
def binary_whitelist(self, override_pair, overridden_pair, arch): whitelist = self.binary_list_existing_problem(overridden_pair[0], overridden_pair[1]) staging = Config.get(self.apiurl, overridden_pair[0]).get('staging') if staging: additions = self.staging_api(staging).get_prj_pseudometa( override_pair[0]).get('config', {}) prefix = 'repo_checker-binary-whitelist' for key in [prefix, '-'.join([prefix, arch])]: whitelist.update(additions.get(key, '').split(' ')) return set(filter(None, whitelist))
def request_override_check_users(self, project): """Determine users allowed to override review in a comment command.""" config = Config.get(self.apiurl, project) users = [] group = config.get('staging-group') if group: users += group_members(self.apiurl, group) if self.override_group_key: override_group = config.get(self.override_group_key) if override_group: users += group_members(self.apiurl, override_group) return users
def project_repository(self, project): repository = Config.get(self.apiurl, project).get('main-repo') if not repository: self.logger.debug('no main-repo defined for {}'.format(project)) search_project = 'openSUSE:Factory' for search_repository in ('snapshot', 'standard'): repository = repository_path_search( self.apiurl, project, search_project, search_repository) if repository: self.logger.debug('found chain to {}/{} via {}'.format( search_project, search_repository, repository)) break return repository
def config_resolve(apiurl, project, config): defaults = POLICY_DEFAULTS.copy() defaults_workarounds = POLICY_DEFAULTS.copy() origins_original = config_origin_list(config) config_project = Config.get(apiurl, project) config_resolve_variables(config, config_project) origins = config['origins'] i = 0 while i < len(origins): origin = origins[i].keys()[0] values = origins[i][origin] if origin == '*': del origins[i] defaults.update(values) defaults_workarounds.update(values) config_resolve_apply(config, values, until='*') elif origin == '*~': del origins[i] defaults_workarounds.update(values) config_resolve_create_workarounds(config, values, origins_original) config_resolve_apply(config, values, workaround=True, until='*~') elif '*' in origin: # Does not allow for family + workaround expansion (ie. foo*~). del origins[i] config_resolve_create_family(apiurl, project, config, i, origin, values) elif origin.endswith('~'): values_new = deepcopy(defaults_workarounds) values_new.update(values) values.update(values_new) i += 1 else: values_new = deepcopy(defaults) values_new.update(values) values.update(values_new) i += 1 return config
def package_comments(self, project, repository): self.logger.info('{} package comments'.format(len(self.package_results))) for package, sections in self.package_results.items(): if str2bool(Config.get(self.apiurl, project).get('repo_checker-package-comment-devel', 'False')): bot_name_suffix = project comment_project, comment_package = devel_project_fallback(self.apiurl, project, package) if comment_project is None or comment_package is None: self.logger.warning('unable to find devel project for {}'.format(package)) continue message = 'The version of this package in [`{project}`](/package/show/{project}/{package}) ' \ 'has installation issues and may not be installable:'.format( project=project, package=package) else: bot_name_suffix = repository comment_project = project comment_package = package message = 'This package has installation issues and may not be installable from the `{}` ' \ 'repository:'.format(repository) # Sort sections by text to group binaries together. sections = sorted(sections, key=lambda s: s.text) message += '\n\n<pre>\n{}\n</pre>'.format( '\n'.join([section.text for section in sections]).strip()) # Generate a hash based on the binaries involved and the number of # sections. This eliminates version or release changes from causing # an update to the comment while still updating on relevant changes. binaries = set() for section in sections: binaries.update(section.binaries) info = ';'.join(['::'.join(sorted(binaries)), str(len(sections))]) reference = hashlib.sha1(info).hexdigest()[:7] # Post comment on package in order to notifiy maintainers. self.comment_write(state='seen', result=reference, bot_name_suffix=bot_name_suffix, project=comment_project, package=comment_package, message=message)
def check_one_request(self, req): config = Config.get(self.apiurl, req.actions[0].tgt_project) self.needs_legal_review = False self.needs_reviewteam = False self.needs_release_manager = False self.pending_factory_submission = False self.source_in_factory = None self.do_check_maintainer_review = not self.ibs self.packages = {} request_ok = ReviewBot.ReviewBot.check_one_request(self, req) self.logger.debug("review result: %s", request_ok) if self.pending_factory_submission: self.logger.info( "submission is waiting for a Factory request to complete") creator = req.get_creator() bot_name = self.bot_name.lower() if self.automatic_submission and creator != bot_name: self.logger.info( '@{}: this request would have been automatically created by {} after the Factory submission was accepted in order to eleviate the need to manually create requests for packages sourced from Factory' .format(creator, bot_name)) elif self.source_in_factory: self.logger.info( "perfect. the submitted sources are in or accepted for Factory" ) elif self.source_in_factory == False: self.logger.warn("the submitted sources are NOT in Factory") if request_ok == False: self.logger.info( "NOTE: if you think the automated review was wrong here, please talk to the release team before reopening the request" ) if self.do_comments: result = None if request_ok is None: state = 'seen' elif request_ok: state = 'done' result = 'accepted' else: state = 'done' result = 'declined' self.comment_write(state, result) add_review_groups = [] if self.needs_release_manager: add_review_groups.append(self.release_manager_group or config.get(self.override_group_key)) if self.needs_reviewteam: add_review_groups.append(self.review_team_group or config.get('review-team')) if self.needs_legal_review: add_review_groups.append(self.legal_review_group or config.get('legal-review-group')) if self.needs_check_source and self.check_source_group is not None: add_review_groups.append(self.check_source_group) for group in add_review_groups: if group is None: continue self.logger.info( "{0} needs review by [{1}](/group/show/{1})".format( req.reqid, group)) self.add_review(req, by_group=group) return request_ok
def setUp(self): self.obs = obs.OBS() Config(obs.APIURL, obs.PROJECT) self.api = StagingAPI(obs.APIURL, obs.PROJECT)
def staging_api(args): Config(args.project) return StagingAPI(osc.conf.config['apiurl'], args.project)
def do_update_and_solve(self, subcmd, opts): """${cmd_name}: update and solve for given scope ${cmd_usage} ${cmd_option_list} """ if not opts.project: raise ValueError('project is required') if opts.scope not in self.SCOPES: raise ValueError('scope must be one of: {}'.format(', '.join( self.SCOPES))) if opts.scope == 'all': for scope in self.SCOPES[1:]: opts.scope = scope self.do_update_and_solve(subcmd, copy.deepcopy(opts)) return # Store target project as opts.project will contain subprojects. target_project = opts.project config = Config(target_project) apiurl = conf.config['apiurl'] api = StagingAPI(apiurl, target_project) config.apply_remote(api) target_config = conf.config[target_project] archs_key = 'pkglistgen-archs' if opts.scope != 'ports' else 'pkglistgen-archs-ports' if archs_key in target_config: self.options.architectures = target_config.get(archs_key).split( ' ') main_repo = target_config['main-repo'] if opts.scope == 'target': self.options.repos = ['/'.join([target_project, main_repo])] self.update_and_solve_target(apiurl, target_project, target_config, main_repo, opts) return elif opts.scope == 'ports': # TODO Continue supporting #1297, but should be abstracted. main_repo = 'ports' opts.project += ':Ports' self.options.repos = ['/'.join([opts.project, main_repo])] self.update_and_solve_target(apiurl, target_project, target_config, main_repo, opts) return elif opts.scope == 'rings': opts.project = api.rings[1] self.options.repos = [ '/'.join([api.rings[1], main_repo]), '/'.join([api.rings[0], main_repo]), ] self.update_and_solve_target(apiurl, target_project, target_config, main_repo, opts) opts.project = api.rings[2] self.options.repos.insert(0, '/'.join([api.rings[2], main_repo])) self.update_and_solve_target(apiurl, target_project, target_config, main_repo, opts, skip_release=True) return elif opts.scope == 'staging': letters = api.get_staging_projects_short() for letter in letters: opts.project = api.prj_from_short(letter) self.options.repos = ['/'.join([opts.project, main_repo])] if not api.is_staging_bootstrapped(opts.project): self.options.repos.append('/'.join( [opts.project, 'bootstrap_copy'])) # DVD project first since it depends on main. if api.rings: opts_dvd = copy.deepcopy(opts) opts_dvd.project += ':DVD' self.options.repos.insert( 0, '/'.join([opts_dvd.project, main_repo])) self.update_and_solve_target(apiurl, target_project, target_config, main_repo, opts_dvd, skip_release=True) self.options.repos.pop(0) self.update_and_solve_target(apiurl, target_project, target_config, main_repo, opts) return
def do_staging(self, subcmd, opts, *args): """${cmd_name}: Commands to work with staging projects "accept" will accept all requests in openSUSE:Factory:Staging:<LETTER> (into Factory) "check" will check if all packages are links without changes "cleanup_rings" will try to cleanup rings content and print out problems "freeze" will freeze the sources of the project's links (not affecting the packages actually in) "list" will pick the requests not in rings "select" will add requests to the project "unselect" will remove from the project - pushing them back to the backlog Usage: osc staging accept LETTER osc staging check [--old] REPO osc staging cleanup_rings osc staging freeze PROJECT... osc staging list osc staging select [--no-freeze] [--move [--from PROJECT]] LETTER REQUEST... osc staging unselect REQUEST... """ if opts.version: self._print_version() # verify the argument counts match the commands if len(args) == 0: raise oscerr.WrongArgs('No command given, see "osc help staging"!') cmd = args[0] if cmd in ('accept', 'freeze'): min_args, max_args = 1, None elif cmd == 'check': min_args, max_args = 0, 2 elif cmd == 'select': min_args, max_args = 1, None if not opts.add: min_args = 2 elif cmd == 'unselect': min_args, max_args = 1, None elif cmd == 'adi': min_args, max_args = None, None elif cmd in ('list', 'cleanup_rings'): min_args, max_args = 0, 0 else: raise oscerr.WrongArgs('Unknown command: %s' % cmd) if len(args) - 1 < min_args: raise oscerr.WrongArgs('Too few arguments.') if max_args is not None and len(args) - 1 > max_args: raise oscerr.WrongArgs('Too many arguments.') # Init the OBS access and configuration opts.project = self._full_project_name(opts.project) opts.apiurl = self.get_api_url() opts.verbose = False Config(opts.project) with OBSLock(opts.apiurl, opts.project): api = StagingAPI(opts.apiurl, opts.project) # call the respective command and parse args by need if cmd == 'check': prj = args[1] if len(args) > 1 else None CheckCommand(api).perform(prj, opts.old) elif cmd == 'freeze': for prj in args[1:]: FreezeCommand(api).perform(api.prj_from_letter(prj)) elif cmd == 'accept': cmd = AcceptCommand(api) for prj in args[1:]: if not cmd.perform(api.prj_from_letter(prj)): return cmd.accept_other_new() cmd.update_factory_version() if api.item_exists(api.crebuild): cmd.sync_buildfailures() elif cmd == 'unselect': UnselectCommand(api).perform(args[1:]) elif cmd == 'select': tprj = api.prj_from_letter(args[1]) if opts.add: api.mark_additional_packages(tprj, [opts.add]) else: SelectCommand(api, tprj).perform(args[2:], opts.move, opts.from_, opts.no_freeze) elif cmd == 'cleanup_rings': CleanupRings(api).perform() elif cmd == 'list': ListCommand(api).perform() elif cmd == 'adi': AdiCommand(api).perform(args[1:])
def staging_api(args): Config(args.project) api = StagingAPI(osc.conf.config['apiurl'], args.project) staging = '%s:Staging' % api.project return (api, staging)
def __init__(self, project): self.project = project self.apiurl = osc.conf.config['apiurl'] Config(self.apiurl, self.project) self.api = StagingAPI(self.apiurl, self.project)
def setUp(self): self.obs = OBS() Config(APIURL, PROJECT)
def project_pseudometa_package(apiurl, project): package = Config.get(apiurl, project).get('pseudometa_package', '00Meta') if '/' in package: project, package = package.split('/', 2) return project, package
parser.add_argument( '-p', '--project', type=str, default='Factory', help='openSUSE version to make the check (Factory, 13.2)') parser.add_argument('-d', '--debug', action='store_true', default=False, help='enable debug information') args = parser.parse_args() osc.conf.get_config() osc.conf.config['debug'] = args.debug if args.force: MARGIN_HOURS = 0 config = Config(args.project) api = StagingAPI(osc.conf.config['apiurl'], args.project) config.apply_remote(api) openQA = OpenQAReport(api) if args.staging: openQA.report(api.prj_from_letter(args.staging)) else: for staging in api.get_staging_projects(): openQA.report(staging)
def load_config(self, project=None): if project is None: project = self.project self.config = Config(APIURL, project)
def do_staging(self, subcmd, opts, *args): """${cmd_name}: Commands to work with staging projects ${cmd_option_list} "accept" will accept all requests in $PROJECT:Staging:<LETTER> into $PROJECT If openSUSE:* project, requests marked ready from adi stagings will also be accepted. "acheck" will check if it is safe to accept new staging projects As $PROJECT is syncing the right package versions between /standard, /totest and /snapshot, it is important that the projects are clean prior to a checkin round. "adi" will list already staged requests, stage new requests, and supersede requests where applicable. New adi stagings will be created for new packages based on the grouping options used. The default grouping is by source project. When adi stagings are ready the request will be marked ready, unstaged, and the adi staging deleted. "check" will check if all packages are links without changes "cleanup_rings" will try to cleanup rings content and print out problems "freeze" will freeze the sources of the project's links while not affecting the source packages "frozenage" will show when the respective staging project was last frozen "ignore" will ignore a request from "list" and "adi" commands until unignored "unignore" will remove from requests from ignore list If the --cleanup flag is included then all ignored requests that were changed from state new or review more than 3 days ago will be removed. "list" will list/supersede requests for ring packages or all if no rings. The package list is used to limit what requests are superseded when called with the --supersede option. "repair" will attempt to repair the state of a request that has been corrupted. "select" will add requests to the project Stagings are expected to be either in short-hand or the full project name. For example letter or named stagings can be specified simply as A, B, Gcc6, etc, while adi stagings can be specified as adi:1, adi:2, etc. Currently, adi stagings are not supported in proposal mode. Requests may either be the target package or the request ID. When using --filter-by or --group-by the xpath will be applied to the request node as returned by OBS. Several values will supplement the normal request node. - ./action/target/@devel_project: the devel project for the package - ./action/target/@ring: the ring to which the package belongs - ./@ignored: either false or the provided message Some useful examples: --filter-by './action/target[starts-with(@package, "yast-")]' --filter-by './action/target/[@devel_project="YaST:Head"]' --filter-by './action/target[starts-with(@ring, "1")]' --filter-by '@id!="1234567"' --group-by='./action/target/@devel_project' --group-by='./action/target/@ring' Multiple filter-by or group-by options may be used at the same time. Note that when using proposal mode, multiple stagings to consider may be provided in addition to a list of requests by which to filter. A more complex example: select --group-by='./action/target/@devel_project' A B C 123 456 789 This will separate the requests 123, 456, 789 by devel project and only consider stagings A, B, or C, if available, for placement. No arguments is also a valid choice and will propose all non-ignored requests into the first available staging. Note that bootstrapped stagings are only used when either required or no other stagings are available. Another useful example is placing all open requests into a specific letter staging with: select A Interactive mode allows the proposal to be modified before application. "unselect" will remove from the project - pushing them back to the backlog "unlock" will remove the staging lock in case it gets stuck Usage: osc staging accept [--force] [--no-cleanup] [LETTER...] osc staging acheck osc staging adi [--move] [--by-develproject] [--split] REQUEST... osc staging check [--old] REPO osc staging cleanup_rings osc staging freeze [--no-boostrap] PROJECT... osc staging frozenage PROJECT... osc staging ignore [-m MESSAGE] REQUEST... osc staging unignore [--cleanup] REQUEST...|all osc staging list [--supersede] [PACKAGE...] osc staging select [--no-freeze] [--move [--from PROJECT] STAGING REQUEST... osc staging select [--no-freeze] [[--interactive] [--filter-by...] [--group-by...]] [STAGING...] [REQUEST...] osc staging unselect REQUEST... osc staging unlock osc staging repair REQUEST... """ if opts.version: self._print_version() # verify the argument counts match the commands if len(args) == 0: raise oscerr.WrongArgs('No command given, see "osc help staging"!') cmd = args[0] if cmd in ('freeze', 'frozenage', 'repair'): min_args, max_args = 1, None elif cmd == 'check': min_args, max_args = 0, 1 elif cmd == 'select': min_args, max_args = 0, None elif cmd == 'unselect': min_args, max_args = 1, None elif cmd == 'adi': min_args, max_args = 0, None elif cmd == 'ignore': min_args, max_args = 1, None elif cmd == 'unignore': min_args, max_args = 0, None elif cmd in ('list', 'accept'): min_args, max_args = 0, None elif cmd in ('cleanup_rings', 'acheck'): min_args, max_args = 0, 0 elif cmd == 'unlock': min_args, max_args = 0, 0 else: raise oscerr.WrongArgs('Unknown command: %s' % cmd) if len(args) - 1 < min_args: raise oscerr.WrongArgs('Too few arguments.') if max_args is not None and len(args) - 1 > max_args: raise oscerr.WrongArgs('Too many arguments.') # Init the OBS access and configuration opts.project = self._full_project_name(opts.project) opts.apiurl = self.get_api_url() opts.verbose = False Config(opts.project) if opts.wipe_cache: Cache.delete_all() lock = OBSLock(opts.apiurl, opts.project) if cmd == 'unlock': lock.release() return with lock: api = StagingAPI(opts.apiurl, opts.project) # call the respective command and parse args by need if cmd == 'check': prj = args[1] if len(args) > 1 else None CheckCommand(api).perform(prj, opts.old) elif cmd == 'freeze': for prj in args[1:]: FreezeCommand(api).perform(api.prj_from_letter(prj), copy_bootstrap=opts.bootstrap) elif cmd == 'frozenage': for prj in args[1:]: print("%s last frozen %0.1f days ago" % (api.prj_from_letter(prj), api.days_since_last_freeze(api.prj_from_letter(prj)))) elif cmd == 'acheck': # Is it safe to accept? Meaning: /totest contains what it should and is not dirty version_totest = api.get_binary_version(api.project, "openSUSE-release.rpm", repository="totest", arch="x86_64") if version_totest: version_openqa = api.load_file_content( "%s:Staging" % api.project, "dashboard", "version_totest") totest_dirty = api.is_repo_dirty(api.project, 'totest') print( "version_openqa: %s / version_totest: %s / totest_dirty: %s\n" % (version_openqa, version_totest, totest_dirty)) else: print("acheck is unavailable in %s!\n" % (api.project)) elif cmd == 'accept': # Is it safe to accept? Meaning: /totest contains what it should and is not dirty version_totest = api.get_binary_version(api.project, "openSUSE-release.rpm", repository="totest", arch="x86_64") if version_totest is None or opts.force: # SLE does not have a totest_version or openqa_version - ignore it version_openqa = version_totest totest_dirty = False else: version_openqa = api.load_file_content( "%s:Staging" % api.project, "dashboard", "version_totest") totest_dirty = api.is_repo_dirty(api.project, 'totest') if version_openqa == version_totest and not totest_dirty: cmd = AcceptCommand(api) for prj in args[1:]: if not cmd.perform(api.prj_from_letter(prj), opts.force): return if not opts.no_cleanup: if api.item_exists(api.prj_from_letter(prj)): cmd.cleanup(api.prj_from_letter(prj)) if api.item_exists("%s:DVD" % api.prj_from_letter(prj)): cmd.cleanup("%s:DVD" % api.prj_from_letter(prj)) if opts.project.startswith('openSUSE:'): cmd.accept_other_new() cmd.update_factory_version() if api.item_exists(api.crebuild): cmd.sync_buildfailures() else: print("Not safe to accept: /totest is not yet synced") elif cmd == 'unselect': UnselectCommand(api).perform(args[1:]) elif cmd == 'select': # Include list of all stagings in short-hand and by full name. existing_stagings = api.get_staging_projects_short(None) existing_stagings += [ p for p in api.get_staging_projects() if not p.endswith(':DVD') ] stagings = [] requests = [] for arg in args[1:]: # Since requests may be given by either request ID or package # name and stagings may include multi-letter special stagings # there is no easy way to distinguish between stagings and # requests in arguments. Therefore, check if argument is in the # list of short-hand and full project name stagings, otherwise # consider it a request. This also allows for special stagings # with the same name as package, but the staging will be assumed # first time around. The current practice seems to be to start a # special staging with a capital letter which makes them unique. # lastly adi stagings are consistently prefix with adi: which # also makes it consistent to distinguish them from request IDs. if arg in existing_stagings and arg not in stagings: stagings.append(api.extract_staging_short(arg)) elif arg not in requests: requests.append(arg) if len(stagings) != 1 or len( requests) == 0 or opts.filter_by or opts.group_by: if opts.move or opts.from_: print( '--move and --from must be used with explicit staging and request list' ) return splitter = RequestSplitter(api, api.get_open_requests(), in_ring=True) if len(requests) > 0: splitter.filter_add_requests(requests) if len(splitter.filters) == 0: splitter.filter_add( './action[not(@type="add_role" or @type="change_devel")]' ) splitter.filter_add('@ignored="false"') if opts.filter_by: for filter_by in opts.filter_by: splitter.filter_add(filter_by) if opts.group_by: for group_by in opts.group_by: splitter.group_by(group_by) splitter.split() result = splitter.propose_assignment(stagings) if result is not True: print('Failed to generate proposal: {}'.format(result)) return proposal = splitter.proposal if len(proposal) == 0: print('Empty proposal') return if opts.interactive: with tempfile.NamedTemporaryFile(suffix='.yml') as temp: temp.write( yaml.safe_dump(splitter.proposal, default_flow_style=False) + '\n\n') temp.write( '# move requests between stagings or comment/remove them\n' ) temp.write('# change the target staging for a group\n') temp.write('# stagings\n') temp.write('# - considered: {}\n'.format(', '.join( sorted(splitter.stagings_considerable.keys())))) temp.write('# - remaining: {}\n'.format(', '.join( sorted(splitter.stagings_available.keys())))) temp.flush() editor = os.getenv('EDITOR') if not editor: editor = 'xdg-open' return_code = subprocess.call([editor, temp.name]) proposal = yaml.safe_load(open(temp.name).read()) print(yaml.safe_dump(proposal, default_flow_style=False)) print('Accept proposal? [y/n] (y): ', end='') response = raw_input().lower() if response != '' and response != 'y': print('Quit') return for group in sorted(proposal.keys()): g = proposal[group] if not g['requests']: # Skipping since all request removed, presumably in interactive. continue print('Staging {}'.format(g['staging'])) # SelectCommand expects strings. request_ids = map(str, g['requests'].keys()) target_project = api.prj_from_short(g['staging']) SelectCommand(api, target_project) \ .perform(request_ids, opts.move, opts.from_, opts.no_freeze) else: target_project = api.prj_from_short(stagings[0]) if opts.add: api.mark_additional_packages(target_project, [opts.add]) else: SelectCommand(api, target_project) \ .perform(requests, opts.move, opts.from_, opts.no_freeze) elif cmd == 'cleanup_rings': CleanupRings(api).perform() elif cmd == 'ignore': IgnoreCommand(api).perform(args[1:], opts.message) elif cmd == 'unignore': UnignoreCommand(api).perform(args[1:], opts.cleanup) elif cmd == 'list': ListCommand(api).perform(args[1:], supersede=opts.supersede) elif cmd == 'adi': AdiCommand(api).perform(args[1:], move=opts.move, by_dp=opts.by_develproject, split=opts.split) elif cmd == 'repair': RepairCommand(api).perform(args[1:])
def setUp(self): self.obs = OBS() Config(APIURL, PROJECT) self.api = StagingAPI(APIURL, PROJECT)
def staging_api(args): apiurl = osc.conf.config['apiurl'] Config(apiurl, args.project) return StagingAPI(apiurl, args.project)
default='openSUSE:Factory', help='project to check (ex. openSUSE:Factory, openSUSE:Leap:15.1)') parser.add_argument('-d', '--debug', action='store_true', default=False, help='enable debug information') parser.add_argument('-A', '--apiurl', metavar='URL', help='API URL') args = parser.parse_args() osc.conf.get_config(override_apiurl=args.apiurl) osc.conf.config['debug'] = args.debug apiurl = osc.conf.config['apiurl'] config = Config.get(apiurl, args.project) api = StagingAPI(apiurl, args.project) staging_report = InstallChecker(api, config) if args.debug: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) if args.staging: if not staging_report.staging(api.prj_from_short(args.staging), force=True): sys.exit(1) else: for staging in api.get_staging_projects(): if api.is_adi_project(staging):
def repository_check(self, repository_pairs, state_hash, simulate_merge, whitelist=None, arch_whitelist=None, post_comments=False, cycle_packages=None): comment = [] project, repository = repository_pairs[0] self.logger.info('checking {}/{}@{}[{}]'.format( project, repository, state_hash, len(repository_pairs))) archs = self.target_archs(project, repository, arch_whitelist) new_pairs = [] for pair in repository_pairs: has_all = True for arch in archs: if not repository_arch_state(self.apiurl, pair[0], pair[1], arch): has_all = False break # ignore repositories only inherited for config if has_all: new_pairs.append(pair) repository_pairs = new_pairs published = repositories_published(self.apiurl, repository_pairs, archs) if not self.force: if state_hash == self.repository_state_last(project, repository, simulate_merge): self.logger.info('{} build unchanged'.format(project)) # TODO keep track of skipped count for cycle summary return None # For submit style requests, want to process if top layer is done, # but not mark review as final until all layers are published. if published is not True and (not simulate_merge or published[0] == project): # Require all layers to be published except when the top layer # is published in a simulate merge (allows quicker feedback with # potentially incorrect resutls for staging). self.logger.info('{}/{} not published'.format(published[0], published[1])) return None # Drop non-published repository information and thus reduce to boolean. published = published is True if not simulate_merge: # Top of pseudometa file. comment.append(state_hash) if post_comments: # Stores parsed install_check() results grouped by package. self.package_results = {} if not len(archs): self.logger.debug('{} has no relevant architectures'.format(project)) return None result = True for arch in archs: directories = [] for pair_project, pair_repository in repository_pairs: directories.append(self.mirror(pair_project, pair_repository, arch)) if simulate_merge: ignore = self.simulated_merge_ignore(repository_pairs[0], repository_pairs[1], arch) if not whitelist: whitelist = self.binary_whitelist(repository_pairs[0], repository_pairs[1], arch) results = { 'cycle': self.cycle_check(repository_pairs[0][0], repository_pairs[0][1], arch, cycle_packages), 'install': self.install_check( repository_pairs[1], arch, directories, ignore, whitelist), } else: # Only products themselves will want no-filter or perhaps # projects working on cleaning up a product. no_filter = str2bool(Config.get(self.apiurl, project).get('repo_checker-no-filter')) results = { 'cycle': CheckResult(True, None), 'install': self.install_check(repository_pairs[0], arch, directories, parse=post_comments, no_filter=no_filter), } if not all(result.success for _, result in results.items()): # Not all checks passed, build comment. result = False self.result_comment(repository, arch, results, comment) if simulate_merge: info_extra = {'build': state_hash} if not result: # Some checks in group did not pass, post comment. # Avoid identical comments with different build hash during # target project build phase. Once published update regardless. self.comment_write(state='seen', result='failed', project=project, message='\n'.join(comment).strip(), identical=True, info_extra=info_extra, info_extra_identical=published, bot_name_suffix=repository) else: # Post passed comment only if previous failed comment. text = 'Previously reported problems have been resolved.' self.comment_write(state='done', result='passed', project=project, message=text, identical=True, only_replace=True, info_extra=info_extra, bot_name_suffix=repository) else: text = '\n'.join(comment).strip() if not self.dryrun: filename = self.project_pseudometa_file_name(project, repository) project_pseudometa_file_ensure( self.apiurl, project, filename, text + '\n', 'repo_checker project_only run') else: print(text) if post_comments: self.package_comments(project, repository) if result and not published: # Wait for the complete stack to build before positive result. self.logger.debug('demoting result from accept to ignore due to non-published layer') result = None return result
def do_staging(self, subcmd, opts, *args): """${cmd_name}: Commands to work with staging projects ${cmd_option_list} "accept" will accept all requests in the given stagings. Without argument, it accepts all acceptable stagings. "adi" will list already staged requests, stage new requests, and supersede requests where applicable. New adi stagings will be created for new packages based on the grouping options used. The default grouping is by source project. When adi stagings are empty, they are deleted. "check" will check if all packages are links without changes "check_local_links" lists local links that don't match multispec package "check_duplicate_binaries" list binaries provided by multiple packages "cleanup_rings" will try to cleanup rings content and print out problems "rebase" (or "freeze") will freeze the sources of the project's links while not affecting the source packages "frozenage" will show when the respective staging project was last frozen "ignore" will ignore a request from "list" and "adi" commands until unignored "unignore" will remove from requests from ignore list If the --cleanup flag is included then all ignored requests that were changed from state new or review more than 3 days ago will be removed. "list" will list/supersede requests for ring packages or all if no rings. "lock" acquire a hold on the project in order to execute multiple commands and prevent others from interrupting. An example: lock -m "checkin round" list --supersede adi accept A B C D E unlock Each command will update the lock to keep it up-to-date. "repair" will attempt to repair the state of a request that has been corrupted. Use the --cleanup flag to include all untracked requests. "select" will add requests to the project Stagings are expected to be either in short-hand or the full project name. For example letter or named stagings can be specified simply as A, B, Gcc6, etc, while adi stagings can be specified as adi:1, adi:2, etc. Currently, adi stagings are not supported in proposal mode. Requests may either be the target package or the request ID. When using --filter-by or --group-by the xpath will be applied to the request node as returned by OBS. Use the following on a current request to see the XML structure. osc api /request/1337 A number of additional values will supplement the normal request node. - ./action/target/@devel_project: the devel project for the package - ./action/target/@devel_project_super: super devel project if relevant - ./action/target/@ring: the ring to which the package belongs - ./@aged: either True or False based on splitter-request-age-threshold - ./@ignored: either False or the provided message Some useful examples: --filter-by './action/target[starts-with(@package, "yast-")]' --filter-by './action/target/[@devel_project="YaST:Head"]' --filter-by './action/target[starts-with(@ring, "1")]' --filter-by '@id!="1234567"' --filter-by 'contains(description, "#Portus")' --group-by='./action/target/@devel_project' --group-by='./action/target/@ring' Multiple filter-by or group-by options may be used at the same time. Note that when using proposal mode, multiple stagings to consider may be provided in addition to a list of requests by which to filter. A more complex example: select --group-by='./action/target/@devel_project' A B C 123 456 789 This will separate the requests 123, 456, 789 by devel project and only consider stagings A, B, or C, if available, for placement. No arguments is also a valid choice and will propose all non-ignored requests into the first available staging. Note that bootstrapped stagings are only used when either required or no other stagings are available. Another useful example is placing all open requests into a specific letter staging with: select A Built in strategies may be specified as well. For example: select --strategy devel select --strategy quick select --strategy special select --strategy super The default is none and custom is used with any filter-by or group-by arguments are provided. To merge applicable requests into an existing staging. select --merge A To automatically try all available strategies. select --try-strategies These concepts can be combined and interactive mode allows the proposal to be modified before it is executed. Moving requests can be accomplished using the --move flag. For example, to move already staged pac1 and pac2 to staging B use the following. select --move B pac1 pac2 The staging in which the requests are staged will automatically be determined and the requests will be removed from that staging and placed in the specified staging. Related to this, the --filter-from option may be used in conjunction with --move to only move requests already staged in a specific staging. This can be useful if a staging master is responsible for a specific set of packages and wants to move them into a different staging when they were already placed in a mixed staging. For example, if one had a file with a list of packages the following would move any of them found in staging A to staging B. select --move --filter-from A B $(< package.list) "unselect" will remove from the project - pushing them back to the backlog If a message is included the requests will be ignored first. Use the --cleanup flag to include all obsolete requests. "unlock" will remove the staging lock in case it gets stuck or a manual hold If a command lock gets stuck while a hold is placed on a project the unlock command will need to be run twice since there are two layers of locks. "rebuild" will rebuild broken packages in the given stagings or all The rebuild command will only trigger builds for packages with less than 3 failures since the last success or if the build log indicates a stall. If the force option is included the rebuild checks will be ignored and all packages failing to build will be triggered. "setprio" will set priority of requests withing stagings If no stagings are specified all stagings will be used. The default priority is important, but the possible values are: "critical", "important", "moderate" or "low". "supersede" will supersede requests were applicable. A request list can be used to limit what is superseded. Usage: osc staging accept [--force] [--no-cleanup] [STAGING...] osc staging adi [--move] [--by-develproject] [--split] [REQUEST...] osc staging check [STAGING...] osc staging check_duplicate_binaries osc staging check_local_links osc staging cleanup_rings osc staging rebase|freeze [--no-bootstrap] STAGING... osc staging frozenage [STAGING...] osc staging ignore [-m MESSAGE] REQUEST... osc staging unignore [--cleanup] [REQUEST...|all] osc staging list [--supersede] osc staging lock [-m MESSAGE] osc staging select [--no-freeze] [--move [--filter-from STAGING]] STAGING REQUEST... osc staging select [--no-freeze] [--interactive|--non-interactive] [--filter-by...] [--group-by...] [--merge] [--try-strategies] [--strategy] [STAGING...] [REQUEST...] osc staging unselect [--cleanup] [-m MESSAGE] [REQUEST...] osc staging unlock osc staging rebuild [--force] [STAGING...] osc staging repair [--cleanup] [REQUEST...] osc staging setprio [STAGING...] [priority] osc staging supersede [REQUEST...] """ if opts.version: self._print_version() # verify the argument counts match the commands if len(args) == 0: raise oscerr.WrongArgs('No command given, see "osc help staging"!') cmd = args[0] if cmd in ( 'accept', 'adi', 'check', 'config', 'frozenage', 'unignore', 'select', 'unselect', 'rebuild', 'repair', 'supersede', ): min_args, max_args = 0, None elif cmd in ( 'freeze', 'rebase', 'setprio', 'ignore', ): min_args, max_args = 1, None elif cmd in ( 'check_duplicate_binaries', 'check_local_links', 'cleanup_rings', 'list', 'lock', 'unlock', ): min_args, max_args = 0, 0 else: raise oscerr.WrongArgs('Unknown command: %s' % cmd) args = clean_args(args) if len(args) - 1 < min_args: raise oscerr.WrongArgs('Too few arguments.') if max_args is not None and len(args) - 1 > max_args: raise oscerr.WrongArgs('Too many arguments.') # Allow for determining project from osc store. if not opts.project: if core.is_project_dir('.'): opts.project = core.store_read_project('.') else: opts.project = 'Factory' # Cache the remote config fetch. Cache.init() # Init the OBS access and configuration opts.project = self._full_project_name(opts.project) opts.apiurl = self.get_api_url() opts.verbose = False Config(opts.apiurl, opts.project) colorama.init(autoreset=True, strip=(opts.no_color or not bool(int(conf.config.get('staging.color', True))))) # Allow colors to be changed. for name in dir(Fore): if not name.startswith('_'): # .oscrc requires keys to be lower-case. value = conf.config.get('staging.color.' + name.lower()) if value: setattr(Fore, name, ansi.code_to_chars(value)) sentry_init(opts.apiurl, {'osc_plugin': subcmd}) if opts.wipe_cache: Cache.delete_all() api = StagingAPI(opts.apiurl, opts.project) needed = lock_needed(cmd, opts) with OBSLock(opts.apiurl, opts.project, reason=cmd, needed=needed) as lock: # call the respective command and parse args by need if cmd == 'check': if len(args) == 1: CheckCommand(api).perform(None) else: for prj in args[1:]: CheckCommand(api).perform(prj) print() elif cmd == 'check_duplicate_binaries': CheckDuplicateBinariesCommand(api).perform(opts.save) elif cmd == 'check_local_links': AcceptCommand(api).check_local_links() elif cmd == 'freeze' or cmd == 'rebase': for prj in args[1:]: prj = api.prj_from_short(prj) print(Fore.YELLOW + prj) FreezeCommand(api).perform(prj, copy_bootstrap=opts.bootstrap) elif cmd == 'frozenage': projects = api.get_staging_projects_short() if len(args) == 1 else args[1:] for prj in projects: prj = api.prj_from_letter(prj) print('{} last frozen {}{:.1f} days ago'.format( Fore.YELLOW + prj + Fore.RESET, Fore.GREEN if api.prj_frozen_enough(prj) else Fore.RED, api.days_since_last_freeze(prj))) elif cmd == 'accept': cmd = AcceptCommand(api) cmd.accept_all(args[1:], opts.force, not opts.no_cleanup) elif cmd == 'unselect': UnselectCommand(api).perform(args[1:], opts.cleanup, opts.message) elif cmd == 'select': # Include list of all stagings in short-hand and by full name. existing_stagings = api.get_staging_projects_short(None) existing_stagings += api.get_staging_projects() stagings = [] requests = [] for arg in args[1:]: # Since requests may be given by either request ID or package # name and stagings may include multi-letter special stagings # there is no easy way to distinguish between stagings and # requests in arguments. Therefore, check if argument is in the # list of short-hand and full project name stagings, otherwise # consider it a request. This also allows for special stagings # with the same name as package, but the staging will be assumed # first time around. The current practice seems to be to start a # special staging with a capital letter which makes them unique. # lastly adi stagings are consistently prefix with adi: which # also makes it consistent to distinguish them from request IDs. if arg in existing_stagings and arg not in stagings: stagings.append(api.extract_staging_short(arg)) elif arg not in requests: requests.append(arg) if len(stagings) != 1 or len(requests) == 0 or opts.filter_by or opts.group_by: if opts.move or opts.filter_from: print('--move and --filter-from must be used with explicit staging and request list') return open_requests = api.get_open_requests({'withhistory': 1}) if len(open_requests) == 0: print('No open requests to consider') return splitter = RequestSplitter(api, open_requests, in_ring=True) considerable = splitter.stagings_load(stagings) if considerable == 0: print('No considerable stagings on which to act') return if opts.merge: splitter.merge() if opts.try_strategies: splitter.strategies_try() if len(requests) > 0: splitter.strategy_do('requests', requests=requests) if opts.strategy: splitter.strategy_do(opts.strategy) elif opts.filter_by or opts.group_by: kwargs = {} if opts.filter_by: kwargs['filters'] = opts.filter_by if opts.group_by: kwargs['groups'] = opts.group_by splitter.strategy_do('custom', **kwargs) else: if opts.merge: # Merge any none strategies before final none strategy. splitter.merge(strategy_none=True) splitter.strategy_do('none') splitter.strategy_do_non_bootstrapped('none') proposal = splitter.proposal if len(proposal) == 0: print('Empty proposal') return if opts.interactive: with tempfile.NamedTemporaryFile(mode='w', suffix='.yml') as temp: temp.write(yaml.safe_dump(splitter.proposal, default_flow_style=False) + '\n\n') if len(splitter.requests): temp.write('# remaining requests:\n') for request in splitter.requests: temp.write('# {}: {}\n'.format( request.get('id'), request.find('action/target').get('package'))) temp.write('\n') temp.write('# move requests between stagings or comment/remove them\n') temp.write('# change the target staging for a group\n') temp.write('# remove the group, requests, staging, or strategy to skip\n') temp.write('# stagings\n') if opts.merge: temp.write('# - mergeable: {}\n' .format(', '.join(sorted(splitter.stagings_mergeable + splitter.stagings_mergeable_none)))) temp.write('# - considered: {}\n' .format(', '.join(sorted(splitter.stagings_considerable)))) temp.write('# - remaining: {}\n' .format(', '.join(sorted(splitter.stagings_available)))) temp.flush() editor = os.getenv('EDITOR') if not editor: editor = 'xdg-open' return_code = subprocess.call(editor.split(' ') + [temp.name]) proposal = yaml.safe_load(open(temp.name).read()) # Filter invalidated groups from proposal. keys = ['group', 'requests', 'staging', 'strategy'] for group, info in sorted(proposal.items()): for key in keys: if not info.get(key): del proposal[group] break print(yaml.safe_dump(proposal, default_flow_style=False)) print('Accept proposal? [y/n] (y): ', end='') if opts.non_interactive: print('y') else: response = input().lower() if response != '' and response != 'y': print('Quit') return for group, info in sorted(proposal.items()): print('Staging {} in {}'.format(group, info['staging'])) # SelectCommand expects strings. request_ids = map(str, info['requests'].keys()) target_project = api.prj_from_short(info['staging']) # TODO: Find better place for splitter info # if 'merge' not in info: # Assume that the original splitter_info is desireable # and that this staging is simply manual followup. # api.set_splitter_info_in_prj_pseudometa(target_project, info['group'], info['strategy']) SelectCommand(api, target_project) \ .perform(request_ids, no_freeze=opts.no_freeze) else: target_project = api.prj_from_short(stagings[0]) filter_from = api.prj_from_short(opts.filter_from) if opts.filter_from else None SelectCommand(api, target_project) \ .perform(requests, opts.move, filter_from, opts.no_freeze) elif cmd == 'cleanup_rings': CleanupRings(api).perform() elif cmd == 'ignore': IgnoreCommand(api).perform(args[1:], opts.message) elif cmd == 'unignore': UnignoreCommand(api).perform(args[1:], opts.cleanup) elif cmd == 'list': ListCommand(api).perform(supersede=opts.supersede) elif cmd == 'lock': lock.hold(opts.message) elif cmd == 'adi': AdiCommand(api).perform(args[1:], move=opts.move, by_dp=opts.by_develproject, split=opts.split) elif cmd == 'rebuild': RebuildCommand(api).perform(args[1:], opts.force) elif cmd == 'repair': RepairCommand(api).perform(args[1:], opts.cleanup) elif cmd == 'setprio': stagings = [] priority = None priorities = ['critical', 'important', 'moderate', 'low'] for arg in args[1:]: if arg in priorities: priority = arg else: stagings.append(arg) PrioCommand(api).perform(stagings, priority) elif cmd == 'supersede': SupersedeCommand(api).perform(args[1:]) elif cmd == 'unlock': lock.release(force=True)
def do_staging(self, subcmd, opts, *args): """${cmd_name}: Commands to work with staging projects ${cmd_option_list} "accept" will accept all requests in $PROJECT:Staging:<LETTER> into $PROJECT If openSUSE:* project, requests marked ready from adi stagings will also be accepted. "acheck" will check if it is safe to accept new staging projects As $PROJECT is syncing the right package versions between /standard, /totest and /snapshot, it is important that the projects are clean prior to a checkin round. "adi" will list already staged requests, stage new requests, and supersede requests where applicable. New adi stagings will be created for new packages based on the grouping options used. The default grouping is by source project. When adi stagings are ready the request will be marked ready, unstaged, and the adi staging deleted. "check" will check if all packages are links without changes "check_duplicate_binaries" list binaries provided by multiple packages "config" will modify or view staging specific configuration Target project level configuration that applies to all stagings can be found in the $PROJECT:Staging/dashboard container in file "config". Both configuration locations follow the .oscrc format (space separated list). config Print all staging configuration. config key Print the value of key for stagings. conf key value... Set the value of key for stagings. config --clear Clear all staging configuration. config --clear key Clear (unset) a single key from staging configuration config --append key value... Append value to existing value or set if no existing value. All of the above may be restricted to a set of stagings. The staging configuration is automatically cleared anytime staging psuedometa is cleared (accept, or unstage all requests). The keys that may be set in staging configuration are: - repo_checker-binary-whitelist[-arch]: appended to target project list "cleanup_rings" will try to cleanup rings content and print out problems "freeze" will freeze the sources of the project's links while not affecting the source packages "frozenage" will show when the respective staging project was last frozen "ignore" will ignore a request from "list" and "adi" commands until unignored "unignore" will remove from requests from ignore list If the --cleanup flag is included then all ignored requests that were changed from state new or review more than 3 days ago will be removed. "list" will list/supersede requests for ring packages or all if no rings. "lock" acquire a hold on the project in order to execute multiple commands and prevent others from interrupting. An example: lock -m "checkin round" list --supersede adi accept A B C D E unlock Each command will update the lock to keep it up-to-date. "repair" will attempt to repair the state of a request that has been corrupted. Use the --cleanup flag to include all untracked requests. "select" will add requests to the project Stagings are expected to be either in short-hand or the full project name. For example letter or named stagings can be specified simply as A, B, Gcc6, etc, while adi stagings can be specified as adi:1, adi:2, etc. Currently, adi stagings are not supported in proposal mode. Requests may either be the target package or the request ID. When using --filter-by or --group-by the xpath will be applied to the request node as returned by OBS. Several values will supplement the normal request node. - ./action/target/@devel_project: the devel project for the package - ./action/target/@ring: the ring to which the package belongs - ./@ignored: either false or the provided message Some useful examples: --filter-by './action/target[starts-with(@package, "yast-")]' --filter-by './action/target/[@devel_project="YaST:Head"]' --filter-by './action/target[starts-with(@ring, "1")]' --filter-by '@id!="1234567"' --group-by='./action/target/@devel_project' --group-by='./action/target/@ring' Multiple filter-by or group-by options may be used at the same time. Note that when using proposal mode, multiple stagings to consider may be provided in addition to a list of requests by which to filter. A more complex example: select --group-by='./action/target/@devel_project' A B C 123 456 789 This will separate the requests 123, 456, 789 by devel project and only consider stagings A, B, or C, if available, for placement. No arguments is also a valid choice and will propose all non-ignored requests into the first available staging. Note that bootstrapped stagings are only used when either required or no other stagings are available. Another useful example is placing all open requests into a specific letter staging with: select A Built in strategies may be specified as well. For example: select --strategy devel select --strategy special select --strategy super The default is none and custom is used with any filter-by or group-by arguments are provided. To merge applicable requests into an existing staging. select --merge A To automatically try all available strategies. select --try-strategies These concepts can be combined and interactive mode allows the proposal to be modified before it is executed. "unselect" will remove from the project - pushing them back to the backlog If a message is included the requests will be ignored first. Use the --cleanup flag to include all obsolete requests. "unlock" will remove the staging lock in case it gets stuck or a manual hold If a command lock gets stuck while a hold is placed on a project the unlock command will need to be run twice since there are two layers of locks. "rebuild" will rebuild broken packages in the given stagings or all The rebuild command will only trigger builds for packages with less than 3 failures since the last success or if the build log indicates a stall. If the force option is included the rebuild checks will be ignored and all packages failing to build will be triggered. "supersede" will supersede requests were applicable. A request list can be used to limit what is superseded. Usage: osc staging accept [--force] [--no-cleanup] [LETTER...] osc staging acheck osc staging adi [--move] [--by-develproject] [--split] [REQUEST...] osc staging check [--old] [STAGING...] osc staging check_duplicate_binaries osc staging config [--append] [--clear] [STAGING...] [key] [value] osc staging cleanup_rings osc staging freeze [--no-bootstrap] STAGING... osc staging frozenage [STAGING...] osc staging ignore [-m MESSAGE] REQUEST... osc staging unignore [--cleanup] [REQUEST...|all] osc staging list [--supersede] osc staging lock [-m MESSAGE] osc staging select [--no-freeze] [--move [--from STAGING]] [--add PACKAGE] STAGING REQUEST... osc staging select [--no-freeze] [--interactive|--non-interactive] [--filter-by...] [--group-by...] [--merge] [--try-strategies] [--strategy] [STAGING...] [REQUEST...] osc staging unselect [--cleanup] [-m MESSAGE] [REQUEST...] osc staging unlock osc staging rebuild [--force] [STAGING...] osc staging repair [--cleanup] [REQUEST...] osc staging setprio [STAGING...] osc staging supersede [REQUEST...] """ if opts.version: self._print_version() # verify the argument counts match the commands if len(args) == 0: raise oscerr.WrongArgs('No command given, see "osc help staging"!') cmd = args[0] if cmd in ( 'accept', 'adi', 'check', 'config', 'frozenage', 'unignore', 'select', 'unselect', 'rebuild', 'repair', 'setprio', 'supersede', ): min_args, max_args = 0, None elif cmd in ( 'freeze', 'ignore', ): min_args, max_args = 1, None elif cmd in ( 'acheck', 'check_duplicate_binaries', 'cleanup_rings', 'list', 'lock', 'unlock', ): min_args, max_args = 0, 0 else: raise oscerr.WrongArgs('Unknown command: %s' % cmd) args = clean_args(args) if len(args) - 1 < min_args: raise oscerr.WrongArgs('Too few arguments.') if max_args is not None and len(args) - 1 > max_args: raise oscerr.WrongArgs('Too many arguments.') # Allow for determining project from osc store. if not opts.project: if core.is_project_dir('.'): opts.project = core.store_read_project('.') else: opts.project = 'Factory' # Init the OBS access and configuration opts.project = self._full_project_name(opts.project) opts.apiurl = self.get_api_url() opts.verbose = False config = Config(opts.project) colorama.init( autoreset=True, strip=(opts.no_color or not bool(int(conf.config.get('staging.color', True))))) # Allow colors to be changed. for name in dir(Fore): if not name.startswith('_'): # .oscrc requires keys to be lower-case. value = conf.config.get('staging.color.' + name.lower()) if value: setattr(Fore, name, ansi.code_to_chars(value)) if opts.wipe_cache: Cache.delete_all() needed = lock_needed(cmd, opts) with OBSLock(opts.apiurl, opts.project, reason=cmd, needed=needed) as lock: api = StagingAPI(opts.apiurl, opts.project) config.apply_remote(api) # call the respective command and parse args by need if cmd == 'check': if len(args) == 1: CheckCommand(api).perform(None, opts.old) else: for prj in args[1:]: CheckCommand(api).perform(prj, opts.old) print() elif cmd == 'check_duplicate_binaries': CheckDuplicateBinariesCommand(api).perform(opts.save) elif cmd == 'config': projects = set() key = value = None stagings = api.get_staging_projects_short(None) + \ api.get_staging_projects(include_dvd=False) for arg in args[1:]: if arg in stagings: projects.add(api.prj_from_short(arg)) elif key is None: key = arg elif value is None: value = arg else: value += ' ' + arg if not len(projects): projects = api.get_staging_projects(include_dvd=False) ConfigCommand(api).perform(projects, key, value, opts.append, opts.clear) elif cmd == 'freeze': for prj in args[1:]: prj = api.prj_from_short(prj) print(Fore.YELLOW + prj) FreezeCommand(api).perform(prj, copy_bootstrap=opts.bootstrap) elif cmd == 'frozenage': projects = api.get_staging_projects_short() if len( args) == 1 else args[1:] for prj in projects: prj = api.prj_from_letter(prj) print('{} last frozen {}{:.1f} days ago'.format( Fore.YELLOW + prj + Fore.RESET, Fore.GREEN if api.prj_frozen_enough(prj) else Fore.RED, api.days_since_last_freeze(prj))) elif cmd == 'acheck': # Is it safe to accept? Meaning: /totest contains what it should and is not dirty version_totest = api.get_binary_version(api.project, "openSUSE-release.rpm", repository="totest", arch="x86_64") if version_totest: version_openqa = api.dashboard_content_load('version_totest') totest_dirty = api.is_repo_dirty(api.project, 'totest') print( "version_openqa: %s / version_totest: %s / totest_dirty: %s\n" % (version_openqa, version_totest, totest_dirty)) else: print("acheck is unavailable in %s!\n" % (api.project)) elif cmd == 'accept': # Is it safe to accept? Meaning: /totest contains what it should and is not dirty version_totest = api.get_binary_version(api.project, "openSUSE-release.rpm", repository="totest", arch="x86_64") if version_totest is None or opts.force: # SLE does not have a totest_version or openqa_version - ignore it version_openqa = version_totest totest_dirty = False else: version_openqa = api.dashboard_content_load('version_totest') totest_dirty = api.is_repo_dirty(api.project, 'totest') if version_openqa == version_totest and not totest_dirty: cmd = AcceptCommand(api) for prj in args[1:]: if cmd.perform(api.prj_from_letter(prj), opts.force): cmd.reset_rebuild_data(prj) else: return if not opts.no_cleanup: if api.item_exists(api.prj_from_letter(prj)): cmd.cleanup(api.prj_from_letter(prj)) if api.item_exists("%s:DVD" % api.prj_from_letter(prj)): cmd.cleanup("%s:DVD" % api.prj_from_letter(prj)) cmd.accept_other_new() if opts.project.startswith('openSUSE:'): cmd.update_factory_version() if api.item_exists(api.crebuild): cmd.sync_buildfailures() else: print("Not safe to accept: /totest is not yet synced") elif cmd == 'unselect': if opts.message: print('Ignoring requests first') IgnoreCommand(api).perform(args[1:], opts.message) UnselectCommand(api).perform(args[1:], opts.cleanup) elif cmd == 'select': # Include list of all stagings in short-hand and by full name. existing_stagings = api.get_staging_projects_short(None) existing_stagings += api.get_staging_projects(include_dvd=False) stagings = [] requests = [] for arg in args[1:]: # Since requests may be given by either request ID or package # name and stagings may include multi-letter special stagings # there is no easy way to distinguish between stagings and # requests in arguments. Therefore, check if argument is in the # list of short-hand and full project name stagings, otherwise # consider it a request. This also allows for special stagings # with the same name as package, but the staging will be assumed # first time around. The current practice seems to be to start a # special staging with a capital letter which makes them unique. # lastly adi stagings are consistently prefix with adi: which # also makes it consistent to distinguish them from request IDs. if arg in existing_stagings and arg not in stagings: stagings.append(api.extract_staging_short(arg)) elif arg not in requests: requests.append(arg) if len(stagings) != 1 or len( requests) == 0 or opts.filter_by or opts.group_by: if opts.move or opts.from_: print( '--move and --from must be used with explicit staging and request list' ) return open_requests = api.get_open_requests({'withhistory': 1}) if len(open_requests) == 0: print('No open requests to consider') return splitter = RequestSplitter(api, open_requests, in_ring=True) considerable = splitter.stagings_load(stagings) if considerable == 0: print('No considerable stagings on which to act') return if opts.merge: splitter.merge() if opts.try_strategies: splitter.strategies_try() if len(requests) > 0: splitter.strategy_do('requests', requests=requests) if opts.strategy: splitter.strategy_do(opts.strategy) elif opts.filter_by or opts.group_by: kwargs = {} if opts.filter_by: kwargs['filters'] = opts.filter_by if opts.group_by: kwargs['groups'] = opts.group_by splitter.strategy_do('custom', **kwargs) else: if opts.merge: # Merge any none strategies before final none strategy. splitter.merge(strategy_none=True) splitter.strategy_do('none') splitter.strategy_do_non_bootstrapped('none') proposal = splitter.proposal if len(proposal) == 0: print('Empty proposal') return if opts.interactive: with tempfile.NamedTemporaryFile(suffix='.yml') as temp: temp.write( yaml.safe_dump(splitter.proposal, default_flow_style=False) + '\n\n') if len(splitter.requests): temp.write('# remaining requests:\n') for request in splitter.requests: temp.write('# {}: {}\n'.format( request.get('id'), request.find('action/target').get( 'package'))) temp.write('\n') temp.write( '# move requests between stagings or comment/remove them\n' ) temp.write('# change the target staging for a group\n') temp.write( '# remove the group, requests, staging, or strategy to skip\n' ) temp.write('# stagings\n') if opts.merge: temp.write('# - mergeable: {}\n'.format(', '.join( sorted(splitter.stagings_mergeable + splitter.stagings_mergeable_none)))) temp.write('# - considered: {}\n'.format(', '.join( sorted(splitter.stagings_considerable)))) temp.write('# - remaining: {}\n'.format(', '.join( sorted(splitter.stagings_available)))) temp.flush() editor = os.getenv('EDITOR') if not editor: editor = 'xdg-open' return_code = subprocess.call( editor.split(' ') + [temp.name]) proposal = yaml.safe_load(open(temp.name).read()) # Filter invalidated groups from proposal. keys = ['group', 'requests', 'staging', 'strategy'] for group, info in sorted(proposal.items()): for key in keys: if not info.get(key): del proposal[group] break print(yaml.safe_dump(proposal, default_flow_style=False)) print('Accept proposal? [y/n] (y): ', end='') if opts.non_interactive: print('y') else: response = raw_input().lower() if response != '' and response != 'y': print('Quit') return for group, info in sorted(proposal.items()): print('Staging {} in {}'.format(group, info['staging'])) # SelectCommand expects strings. request_ids = map(str, info['requests'].keys()) target_project = api.prj_from_short(info['staging']) if 'merge' not in info: # Assume that the original splitter_info is desireable # and that this staging is simply manual followup. api.set_splitter_info_in_prj_pseudometa( target_project, info['group'], info['strategy']) SelectCommand(api, target_project) \ .perform(request_ids, no_freeze=opts.no_freeze) else: target_project = api.prj_from_short(stagings[0]) if opts.add: api.mark_additional_packages(target_project, [opts.add]) else: SelectCommand(api, target_project) \ .perform(requests, opts.move, opts.from_, opts.no_freeze) elif cmd == 'cleanup_rings': CleanupRings(api).perform() elif cmd == 'ignore': IgnoreCommand(api).perform(args[1:], opts.message) elif cmd == 'unignore': UnignoreCommand(api).perform(args[1:], opts.cleanup) elif cmd == 'list': ListCommand(api).perform(supersede=opts.supersede) elif cmd == 'lock': lock.hold(opts.message) elif cmd == 'adi': AdiCommand(api).perform(args[1:], move=opts.move, by_dp=opts.by_develproject, split=opts.split) elif cmd == 'rebuild': RebuildCommand(api).perform(args[1:], opts.force) elif cmd == 'repair': RepairCommand(api).perform(args[1:], opts.cleanup) elif cmd == 'setprio': PrioCommand(api).perform(args[1:]) elif cmd == 'supersede': SupersedeCommand(api).perform(args[1:]) elif cmd == 'unlock': lock.release(force=True)
def do_update_and_solve(self, subcmd, opts): """${cmd_name}: update and solve for given scope ${cmd_usage} ${cmd_option_list} """ if opts.staging: match = re.match('(.*):Staging:(.*)', opts.staging) opts.scope = ['staging:' + match.group(2)] if opts.project: raise ValueError('--staging and --project conflict') opts.project = match.group(1) elif not opts.project: raise ValueError('project is required') elif not opts.scope: opts.scope = ['all'] apiurl = conf.config['apiurl'] Config(apiurl, opts.project) target_config = conf.config[opts.project] # Store target project as opts.project will contain subprojects. target_project = opts.project api = StagingAPI(apiurl, target_project) main_repo = target_config['main-repo'] if apiurl.find('suse.de') > 0: # used by product converter os.environ['OBS_NAME'] = 'build.suse.de' # special case for all if opts.scope == ['all']: opts.scope = target_config.get('pkglistgen-scopes', 'target').split(' ') self.error_occured = False def solve_project(project, scope): try: self.tool.reset() self.tool.dry_run = self.options.dry if self.tool.update_and_solve_target( api, target_project, target_config, main_repo, project=project, scope=scope, force=opts.force, no_checkout=opts.no_checkout, only_release_packages=opts.only_release_packages, stop_after_solve=opts.stop_after_solve): self.error_occured = True except Exception: # Print exception, but continue to prevent problems effecting one # project from killing the whole process. Downside being a common # error will be duplicated for each project. Common exceptions could # be excluded if a set list is determined, but that is likely not # practical. traceback.print_exc() self.error_occured = True for scope in opts.scope: if scope.startswith('staging:'): letter = re.match('staging:(.*)', scope).group(1) solve_project(api.prj_from_short(letter.upper()), 'staging') elif scope == 'target': solve_project(target_project, scope) elif scope == 'rings': solve_project(api.rings[1], scope) elif scope == 'staging': letters = api.get_staging_projects_short() for letter in letters: solve_project(api.prj_from_short(letter), scope) else: raise ValueError('scope "{}" must be one of: {}'.format( scope, ', '.join(self.SCOPES))) return self.error_occured
def check_pra(self, project, repository, arch): config = Config.get(self.apiurl, project) oldstate = None self.store_filename = 'rebuildpacs.{}-{}.yaml'.format( project, repository) if self.store_project and self.store_package: state_yaml = source_file_load(self.apiurl, self.store_project, self.store_package, self.store_filename) if state_yaml: oldstate = yaml.safe_load(state_yaml) oldstate = oldstate or {} oldstate.setdefault('check', {}) if not isinstance(oldstate['check'], dict): oldstate['check'] = {} oldstate.setdefault('leafs', {}) if not isinstance(oldstate['leafs'], dict): oldstate['leafs'] = {} repository_pairs = repository_path_expand(self.apiurl, project, repository) directories = [] for pair_project, pair_repository in repository_pairs: directories.append( mirror(self.apiurl, pair_project, pair_repository, arch)) parsed = dict() with tempfile.TemporaryDirectory(prefix='repochecker') as dir: pfile = os.path.join(dir, 'packages') SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__)) script = os.path.join(SCRIPT_PATH, 'write_repo_susetags_file.pl') parts = ['perl', script, dir] + directories p = subprocess.run(parts) if p.returncode: # technically only 126, but there is no other value atm - # so if some other perl error happens, we don't continue raise CorruptRepos target_packages = [] with open(os.path.join(dir, 'catalog.yml')) as file: catalog = yaml.safe_load(file) target_packages = catalog.get(directories[0], []) parsed = parsed_installcheck(pfile, arch, target_packages, []) for package in parsed: parsed[package]['output'] = "\n".join( parsed[package]['output']) # let's risk a N*N algorithm in the hope that we have a limited N for package1 in parsed: output = parsed[package1]['output'] for package2 in parsed: if package1 == package2: continue output = output.replace(parsed[package2]['output'], 'FOLLOWUP(' + package2 + ')') parsed[package1]['output'] = output for package in parsed: parsed[package]['output'] = self._split_and_filter( parsed[package]['output']) url = makeurl(self.apiurl, ['build', project, '_result'], { 'repository': repository, 'arch': arch, 'code': 'succeeded' }) root = ET.parse(http_GET(url)).getroot() succeeding = list( map(lambda x: x.get('package'), root.findall('.//status'))) per_source = dict() for package, entry in parsed.items(): source = "{}/{}/{}/{}".format(project, repository, arch, entry['source']) per_source.setdefault(source, { 'output': [], 'builds': entry['source'] in succeeding }) per_source[source]['output'].extend(entry['output']) rebuilds = set() for source in sorted(per_source): if not len(per_source[source]['output']): continue self.logger.debug("{} builds: {}".format( source, per_source[source]['builds'])) self.logger.debug(" " + "\n ".join(per_source[source]['output'])) if not per_source[source]['builds']: # nothing we can do continue old_output = oldstate['check'].get(source, {}).get('problem', []) if sorted(old_output) == sorted(per_source[source]['output']): self.logger.debug("unchanged problem") continue self.logger.info("rebuild %s", source) rebuilds.add(os.path.basename(source)) for line in difflib.unified_diff(old_output, per_source[source]['output'], 'before', 'now'): self.logger.debug(line.strip()) oldstate['check'][source] = { 'problem': per_source[source]['output'], 'rebuild': str(datetime.datetime.now()) } for source in list(oldstate['check']): if not source.startswith('{}/{}/{}/'.format( project, repository, arch)): continue if not os.path.basename(source) in succeeding: continue if source not in per_source: self.logger.info("No known problem, erasing %s", source) del oldstate['check'][source] packages = config.get('rebuildpacs-leafs', '').split() if not self.rebuild: # ignore in this case packages = [] # first round: collect all infos from obs infos = dict() for package in packages: subpacks, build_deps = self.check_leaf_package( project, repository, arch, package) infos[package] = {'subpacks': subpacks, 'deps': build_deps} # calculate rebuild triggers rebuild_triggers = dict() for package1 in packages: for package2 in packages: if package1 == package2: continue for subpack in infos[package1]['subpacks']: if subpack in infos[package2]['deps']: rebuild_triggers.setdefault(package1, set()) rebuild_triggers[package1].add(package2) # ignore this depencency. we already trigger both of them del infos[package2]['deps'][subpack] # calculate build info hashes for package in packages: if not package in succeeding: self.logger.debug("Ignore %s for the moment, not succeeding", package) continue m = hashlib.sha256() for bdep in sorted(infos[package]['deps']): m.update( bytes(bdep + '-' + infos[package]['deps'][bdep], 'utf-8')) state_key = '{}/{}/{}/{}'.format(project, repository, arch, package) olddigest = oldstate['leafs'].get(state_key, {}).get('buildinfo') if olddigest == m.hexdigest(): continue self.logger.info("rebuild leaf package %s (%s vs %s)", package, olddigest, m.hexdigest()) rebuilds.add(package) oldstate['leafs'][state_key] = { 'buildinfo': m.hexdigest(), 'rebuild': str(datetime.datetime.now()) } if self.dryrun: if self.rebuild: self.logger.info("To rebuild: %s", ' '.join(rebuilds)) return if not self.rebuild or not len(rebuilds): self.logger.debug("Nothing to rebuild") # in case we do rebuild, wait for it to succeed before saving self.store_yaml(oldstate, project, repository, arch) return query = { 'cmd': 'rebuild', 'repository': repository, 'arch': arch, 'package': rebuilds } url = makeurl(self.apiurl, ['build', project]) headers = {'Content-Type': 'application/x-www-form-urlencoded'} http_request('POST', url, headers, data=urlencode(query, doseq=True)) self.store_yaml(oldstate, project, repository, arch)
if __name__ == '__main__': parser = argparse.ArgumentParser( description='Command to publish openQA status in Staging projects') parser.add_argument('-s', '--staging', type=str, default=None, help='staging project letter') parser.add_argument('-f', '--force', action='store_true', default=False, help='force the write of the comment') parser.add_argument('-p', '--project', type=str, default='Factory', help='openSUSE version to make the check (Factory, 13.2)') parser.add_argument('-d', '--debug', action='store_true', default=False, help='enable debug information') args = parser.parse_args() osc.conf.get_config() osc.conf.config['debug'] = args.debug if args.force: MARGIN_HOURS = 0 Config('openSUSE:%s' % args.project) api = StagingAPI(osc.conf.config['apiurl'], 'openSUSE:%s' % args.project) openQA = OpenQAReport(api) if args.staging: openQA.report(api.prj_from_letter(args.staging)) else: for staging in api.get_staging_projects(): if not staging.endswith(':DVD'): openQA.report(staging)
def staging_api(self, project): if project not in self.staging_apis: Config.get(self.apiurl, project) self.staging_apis[project] = StagingAPI(self.apiurl, project) return self.staging_apis[project]
def load_config(self, project=PROJECT): self.config = Config(APIURL, project)
if __name__ == '__main__': parser = argparse.ArgumentParser( description='Command to publish openQA status in Staging projects') parser.add_argument('-s', '--staging', type=str, default=None, help='staging project letter') parser.add_argument('-f', '--force', action='store_true', default=False, help='force the write of the comment') parser.add_argument('-p', '--project', type=str, default='Factory', help='openSUSE version to make the check (Factory, 13.2)') parser.add_argument('-d', '--debug', action='store_true', default=False, help='enable debug information') args = parser.parse_args() osc.conf.get_config() osc.conf.config['debug'] = args.debug if args.force: MARGIN_HOURS = 0 Config(args.project) api = StagingAPI(osc.conf.config['apiurl'], args.project) openQA = OpenQAReport(api) if args.staging: openQA.report(api.prj_from_letter(args.staging)) else: for staging in api.get_staging_projects(include_dvd=False): openQA.report(staging)
default='openSUSE:Factory', help='project to check (ex. openSUSE:Factory, openSUSE:Leap:15.1)') parser.add_argument('-d', '--debug', action='store_true', default=False, help='enable debug information') parser.add_argument('-A', '--apiurl', metavar='URL', help='API URL') args = parser.parse_args() osc.conf.get_config(override_apiurl=args.apiurl) osc.conf.config['debug'] = args.debug apiurl = osc.conf.config['apiurl'] config = Config(apiurl, args.project) api = StagingAPI(apiurl, args.project) staging_report = InstallChecker(api, config) if args.debug: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) result = True if args.staging: result = staging_report.staging(api.prj_from_short(args.staging), force=True) else: for staging in api.get_staging_projects(): if api.is_adi_project(staging):
def do_update_and_solve(self, subcmd, opts): """${cmd_name}: update and solve for given scope ${cmd_usage} ${cmd_option_list} """ if opts.staging: match = re.match('(.*):Staging:(.*)', opts.staging) opts.scope = 'staging:' + match.group(2) if opts.project: raise ValueError('--staging and --project conflict') opts.project = match.group(1) elif not opts.project: raise ValueError('project is required') if not opts.scope: raise ValueError('--scope or --staging required') apiurl = conf.config['apiurl'] Config(apiurl, opts.project) target_config = conf.config[opts.project] # Store target project as opts.project will contain subprojects. target_project = opts.project api = StagingAPI(apiurl, target_project) main_repo = target_config['main-repo'] # used by product converter # these needs to be kept in sync with OBS config if apiurl.find('suse.de') > 0: os.environ['OBS_NAME'] = 'build.suse.de' if apiurl.find('opensuse.org') > 0: os.environ['OBS_NAME'] = 'build.opensuse.org' def solve_project(project, scope): try: self.tool.reset() self.tool.dry_run = self.options.dry return self.tool.update_and_solve_target( api, target_project, target_config, main_repo, project=project, scope=scope, force=opts.force, no_checkout=opts.no_checkout, only_release_packages=opts.only_release_packages, stop_after_solve=opts.stop_after_solve) except MismatchedRepoException: logging.error( "Failed to create weakremovers.inc due to mismatch in repos - project most likey started building again." ) # for stagings we have to be strict on the exit value if scope == 'staging': return 1 return 0 scope = opts.scope if scope.startswith('staging:'): letter = re.match('staging:(.*)', scope).group(1) return solve_project(api.prj_from_short(letter), 'staging') elif scope == 'target': return solve_project(target_project, scope) elif scope == 'ring1': return solve_project(api.rings[1], scope) else: raise ValueError('scope "{}" must be one of: {}'.format( scope, ', '.join(self.SCOPES)))
def repository_check(self, repository_pairs, state_hash, simulate_merge, post_comments=False): comment = [] project, repository = repository_pairs[0] self.logger.info('checking {}/{}@{}[{}]'.format( project, repository, state_hash, len(repository_pairs))) published = repositories_published(self.apiurl, repository_pairs) if not self.force: if state_hash == self.repository_state_last( project, repository, not simulate_merge): self.logger.info('{} build unchanged'.format(project)) # TODO keep track of skipped count for cycle summary return None # For submit style requests, want to process if top layer is done, # but not mark review as final until all layers are published. if published is not True and (not simulate_merge or published[0] == project): # Require all layers to be published except when the top layer # is published in a simulate merge (allows quicker feedback with # potentially incorrect resutls for staging). self.logger.info('{}/{} not published'.format( published[0], published[1])) return None # Drop non-published repository information and thus reduce to boolean. published = published is True if simulate_merge: # Restrict top layer archs to the whitelisted archs from merge layer. archs = set(target_archs(self.apiurl, project, repository)).intersection( set( self.target_archs( repository_pairs[1][0], repository_pairs[1][1]))) else: # Top of pseudometa file. comment.append(state_hash) archs = self.target_archs(project, repository) if post_comments: # Stores parsed install_check() results grouped by package. self.package_results = {} if not len(archs): self.logger.debug( '{} has no relevant architectures'.format(project)) return None result = True for arch in archs: directories = [] for pair_project, pair_repository in repository_pairs: directories.append( self.mirror(pair_project, pair_repository, arch)) if simulate_merge: ignore = self.simulated_merge_ignore(repository_pairs[0], repository_pairs[1], arch) whitelist = self.binary_whitelist(repository_pairs[0], repository_pairs[1], arch) results = { 'cycle': self.cycle_check(repository_pairs[0], repository_pairs[1], arch), 'install': self.install_check(repository_pairs[1], arch, directories, ignore, whitelist), } else: # Only products themselves will want no-filter or perhaps # projects working on cleaning up a product. no_filter = str2bool( Config.get(self.apiurl, project).get('repo_checker-no-filter')) results = { 'cycle': CheckResult(True, None), 'install': self.install_check(repository_pairs[0], arch, directories, parse=post_comments, no_filter=no_filter), } if not all(result.success for _, result in results.items()): # Not all checks passed, build comment. result = False self.result_comment(repository, arch, results, comment) if simulate_merge: info_extra = {'build': state_hash} if not result: # Some checks in group did not pass, post comment. # Avoid identical comments with different build hash during # target project build phase. Once published update regardless. self.comment_write(state='seen', result='failed', project=project, message='\n'.join(comment).strip(), identical=True, info_extra=info_extra, info_extra_identical=published, bot_name_suffix=repository) else: # Post passed comment only if previous failed comment. text = 'Previously reported problems have been resolved.' self.comment_write(state='done', result='passed', project=project, message=text, identical=True, only_replace=True, info_extra=info_extra, bot_name_suffix=repository) else: text = '\n'.join(comment).strip() if not self.dryrun: filename = self.project_pseudometa_file_name( project, repository) project_pseudometa_file_ensure( self.apiurl, project, filename, text + '\n', 'repo_checker project_only run') else: print(text) if post_comments: self.package_comments(project, repository) if result and not published: # Wait for the complete stack to build before positive result. self.logger.debug( 'demoting result from accept to ignore due to non-published layer' ) result = None return result
def do_staging(self, subcmd, opts, *args): """${cmd_name}: Commands to work with staging projects ${cmd_option_list} "accept" will accept all requests in openSUSE:Factory:Staging:<LETTER> (into Factory) "acheck" will check if it's safe to accept new staging projects As openSUSE:Factory is syncing the right package versions between /standard, /totest and /snapshot, it's important that the projects are clean prior to a checkin round. "check" will check if all packages are links without changes "cleanup_rings" will try to cleanup rings content and print out problems "freeze" will freeze the sources of the project's links (not affecting the packages actually in) "frozenage" will show when the respective staging project was last frozen "list" will pick the requests not in rings "select" will add requests to the project "unselect" will remove from the project - pushing them back to the backlog Usage: osc staging accept [--force] [LETTER...] osc staging check [--old] REPO osc staging cleanup_rings osc staging freeze [--no-boostrap] PROJECT... osc staging frozenage PROJECT... osc staging list [--supersede] osc staging select [--no-freeze] [--move [--from PROJECT]] LETTER REQUEST... osc staging unselect REQUEST... osc staging repair REQUEST... """ if opts.version: self._print_version() # verify the argument counts match the commands if len(args) == 0: raise oscerr.WrongArgs('No command given, see "osc help staging"!') cmd = args[0] if cmd in ('freeze', 'frozenage', 'repair'): min_args, max_args = 1, None elif cmd == 'check': min_args, max_args = 0, 2 elif cmd == 'select': min_args, max_args = 1, None if not opts.add: min_args = 2 elif cmd == 'unselect': min_args, max_args = 1, None elif cmd == 'adi': min_args, max_args = None, None elif cmd in ('list', 'accept'): min_args, max_args = 0, None elif cmd in ('cleanup_rings', 'acheck'): min_args, max_args = 0, 0 else: raise oscerr.WrongArgs('Unknown command: %s' % cmd) if len(args) - 1 < min_args: raise oscerr.WrongArgs('Too few arguments.') if max_args is not None and len(args) - 1 > max_args: raise oscerr.WrongArgs('Too many arguments.') # Init the OBS access and configuration opts.project = self._full_project_name(opts.project) opts.apiurl = self.get_api_url() opts.verbose = False Config(opts.project) with OBSLock(opts.apiurl, opts.project): api = StagingAPI(opts.apiurl, opts.project) # call the respective command and parse args by need if cmd == 'check': prj = args[1] if len(args) > 1 else None CheckCommand(api).perform(prj, opts.old) elif cmd == 'freeze': for prj in args[1:]: FreezeCommand(api).perform(api.prj_from_letter(prj), copy_bootstrap=opts.bootstrap) elif cmd == 'frozenage': for prj in args[1:]: print "%s last frozen %0.1f days ago" % (api.prj_from_letter( prj), api.days_since_last_freeze(api.prj_from_letter(prj))) elif cmd == 'acheck': # Is it safe to accept? Meaning: /totest contains what it should and is not dirty version_totest = api.get_binary_version(api.project, "openSUSE-release.rpm", repository="totest", arch="x86_64") if version_totest: version_openqa = api.load_file_content( "%s:Staging" % api.project, "dashboard", "version_totest") totest_dirty = api.is_repo_dirty(api.project, 'totest') print "version_openqa: %s / version_totest: %s / totest_dirty: %s\n" % ( version_openqa, version_totest, totest_dirty) else: print "acheck is unavailable in %s!\n" % (api.project) elif cmd == 'accept': # Is it safe to accept? Meaning: /totest contains what it should and is not dirty version_totest = api.get_binary_version(api.project, "openSUSE-release.rpm", repository="totest", arch="x86_64") if version_totest is None or opts.force: # SLE does not have a totest_version or openqa_version - ignore it version_openqa = version_totest totest_dirty = False else: version_openqa = api.load_file_content( "%s:Staging" % api.project, "dashboard", "version_totest") totest_dirty = api.is_repo_dirty(api.project, 'totest') if version_openqa == version_totest and not totest_dirty: cmd = AcceptCommand(api) for prj in args[1:]: if not cmd.perform(api.prj_from_letter(prj), opts.force): return if not opts.no_cleanup: if api.item_exists(api.prj_from_letter(prj)): cmd.cleanup(api.prj_from_letter(prj)) if api.item_exists("%s:DVD" % api.prj_from_letter(prj)): cmd.cleanup("%s:DVD" % api.prj_from_letter(prj)) if opts.project.startswith('openSUSE:'): cmd.accept_other_new() cmd.update_factory_version() if api.item_exists(api.crebuild): cmd.sync_buildfailures() else: print "Not safe to accept: /totest is not yet synced" elif cmd == 'unselect': UnselectCommand(api).perform(args[1:]) elif cmd == 'select': tprj = api.prj_from_letter(args[1]) if opts.add: api.mark_additional_packages(tprj, [opts.add]) else: SelectCommand(api, tprj).perform(args[2:], opts.move, opts.from_, opts.no_freeze) elif cmd == 'cleanup_rings': CleanupRings(api).perform() elif cmd == 'list': ListCommand(api).perform(args[1:], supersede=opts.supersede) elif cmd == 'adi': AdiCommand(api).perform(args[1:], move=opts.move, by_dp=opts.by_develproject, split=opts.split) elif cmd == 'repair': RepairCommand(api).perform(args[1:])