def do_origin(self, subcmd, opts, *args): """${cmd_name}: tools for working with origin information ${cmd_option_list} config: print expanded OSRT:OriginConfig cron: update the lookup for all projects with an OSRT:OriginConfig attribute history: list requests containing an origin annotation list: print all packages and their origin package: print the origin of package potentials: list potential origins of a package projects: list all projects with an OSRT:OriginConfig attribute report: print origin summary report update: handle package source changes as either delete or submit requests Usage: osc origin config [--origins-only] osc origin cron osc origin history [--format json|yaml] PACKAGE osc origin list [--force-refresh] [--format json|yaml] osc origin package [--debug] PACKAGE osc origin potentials [--format json|yaml] PACKAGE osc origin projects [--format json|yaml] osc origin report [--diff] [--force-refresh] [--mail] osc origin update [--listen] [--listen-seconds] [PACKAGE...] """ if len(args) == 0: raise oscerr.WrongArgs('A command must be indicated.') command = args[0] if command not in [ 'config', 'cron', 'history', 'list', 'package', 'potentials', 'projects', 'report', 'update' ]: raise oscerr.WrongArgs('Unknown command: {}'.format(command)) if command == 'package' and len(args) < 2: raise oscerr.WrongArgs('A package must be indicated.') level = logging.DEBUG if opts.debug else None logging.basicConfig(level=level, format='[%(levelname).1s] %(message)s') # Allow for determining project from osc store. if not opts.project and core.is_project_dir('.'): opts.project = core.store_read_project('.') Cache.init() apiurl = self.get_api_url() if command not in ['cron', 'projects', 'update']: if not opts.project: raise oscerr.WrongArgs('A project must be indicated.') config = config_load(apiurl, opts.project) if not config: raise oscerr.WrongArgs( 'OSRT:OriginConfig attribute missing from {}'.format( opts.project)) function = 'osrt_origin_{}'.format(command) globals()[function](apiurl, opts, *args[1:])
def do_origin(self, subcmd, opts, *args): """${cmd_name}: tools for working with origin information ${cmd_option_list} config: print expanded OSRT:OriginConfig cron: update the lookup for all projects with an OSRT:OriginConfig attribute history: list requests containing an origin annotation list: print all packages and their origin package: print the origin of package potentials: list potential origins of a package projects: list all projects with an OSRT:OriginConfig attribute report: print origin summary report Usage: osc origin config [--origins-only] osc origin cron osc origin history [--format json|yaml] PACKAGE osc origin list [--force-refresh] [--format json|yaml] osc origin package [--debug] PACKAGE osc origin potentials [--format json|yaml] PACKAGE osc origin projects [--format json|yaml] osc origin report [--diff] [--force-refresh] [--mail] """ if len(args) == 0: raise oscerr.WrongArgs('A command must be indicated.') command = args[0] if command not in ['config', 'cron', 'history', 'list', 'package', 'potentials', 'projects', 'report']: raise oscerr.WrongArgs('Unknown command: {}'.format(command)) if command == 'package' and len(args) < 2: raise oscerr.WrongArgs('A package must be indicated.') level = logging.DEBUG if opts.debug else None logging.basicConfig(level=level, format='[%(levelname).1s] %(message)s') # Allow for determining project from osc store. if not opts.project and core.is_project_dir('.'): opts.project = core.store_read_project('.') Cache.init() apiurl = self.get_api_url() if command not in ['cron', 'projects']: if not opts.project: raise oscerr.WrongArgs('A project must be indicated.') config = config_load(apiurl, opts.project) if not config: raise oscerr.WrongArgs('OSRT:OriginConfig attribute missing from {}'.format(opts.project)) function = 'osrt_origin_{}'.format(command) globals()[function](apiurl, opts, *args[1:])
def do_staging(self, subcmd, opts, *args): """${cmd_name}: Commands to work with staging projects ${cmd_option_list} "accept" will accept all requests in $PROJECT:Staging:<LETTER> into $PROJECT If openSUSE:* project, requests marked ready from adi stagings will also be accepted. "acheck" will check if it is safe to accept new staging projects As $PROJECT is syncing the right package versions between /standard, /totest and /snapshot, it is important that the projects are clean prior to a checkin round. "adi" will list already staged requests, stage new requests, and supersede requests where applicable. New adi stagings will be created for new packages based on the grouping options used. The default grouping is by source project. When adi stagings are ready the request will be marked ready, unstaged, and the adi staging deleted. "check" will check if all packages are links without changes "check_duplicate_binaries" list binaries provided by multiple packages "config" will modify or view staging specific configuration Target project OSRT:Config attribute configuration applies to all stagings. Both configuration locations follow the .oscrc format (space separated list). config Print all staging configuration. config key Print the value of key for stagings. conf key value... Set the value of key for stagings. config --clear Clear all staging configuration. config --clear key Clear (unset) a single key from staging configuration config --append key value... Append value to existing value or set if no existing value. All of the above may be restricted to a set of stagings. The staging configuration is automatically cleared anytime staging psuedometa is cleared (accept, or unstage all requests). The keys that may be set in staging configuration are: - repo_checker-binary-whitelist[-arch]: appended to target project list - todo: text to be printed after staging is accepted "cleanup_rings" will try to cleanup rings content and print out problems "freeze" will freeze the sources of the project's links while not affecting the source packages "frozenage" will show when the respective staging project was last frozen "ignore" will ignore a request from "list" and "adi" commands until unignored "unignore" will remove from requests from ignore list If the --cleanup flag is included then all ignored requests that were changed from state new or review more than 3 days ago will be removed. "list" will list/supersede requests for ring packages or all if no rings. "lock" acquire a hold on the project in order to execute multiple commands and prevent others from interrupting. An example: lock -m "checkin round" list --supersede adi accept A B C D E unlock Each command will update the lock to keep it up-to-date. "repair" will attempt to repair the state of a request that has been corrupted. Use the --cleanup flag to include all untracked requests. "select" will add requests to the project Stagings are expected to be either in short-hand or the full project name. For example letter or named stagings can be specified simply as A, B, Gcc6, etc, while adi stagings can be specified as adi:1, adi:2, etc. Currently, adi stagings are not supported in proposal mode. Requests may either be the target package or the request ID. When using --filter-by or --group-by the xpath will be applied to the request node as returned by OBS. Use the following on a current request to see the XML structure. osc api /request/1337 A number of additional values will supplement the normal request node. - ./action/target/@devel_project: the devel project for the package - ./action/target/@devel_project_super: super devel project if relevant - ./action/target/@ring: the ring to which the package belongs - ./@aged: either True or False based on splitter-request-age-threshold - ./@nonfree: set to nonfree if targetting nonfree sub project - ./@ignored: either False or the provided message Some useful examples: --filter-by './action/target[starts-with(@package, "yast-")]' --filter-by './action/target/[@devel_project="YaST:Head"]' --filter-by './action/target[starts-with(@ring, "1")]' --filter-by '@id!="1234567"' --filter-by 'contains(description, "#Portus")' --group-by='./action/target/@devel_project' --group-by='./action/target/@ring' Multiple filter-by or group-by options may be used at the same time. Note that when using proposal mode, multiple stagings to consider may be provided in addition to a list of requests by which to filter. A more complex example: select --group-by='./action/target/@devel_project' A B C 123 456 789 This will separate the requests 123, 456, 789 by devel project and only consider stagings A, B, or C, if available, for placement. No arguments is also a valid choice and will propose all non-ignored requests into the first available staging. Note that bootstrapped stagings are only used when either required or no other stagings are available. Another useful example is placing all open requests into a specific letter staging with: select A Built in strategies may be specified as well. For example: select --strategy devel select --strategy quick select --strategy special select --strategy super The default is none and custom is used with any filter-by or group-by arguments are provided. To merge applicable requests into an existing staging. select --merge A To automatically try all available strategies. select --try-strategies These concepts can be combined and interactive mode allows the proposal to be modified before it is executed. Moving requests can be accomplished using the --move flag. For example, to move already staged pac1 and pac2 to staging B use the following. select --move B pac1 pac2 The staging in which the requests are staged will automatically be determined and the requests will be removed from that staging and placed in the specified staging. Related to this, the --filter-from option may be used in conjunction with --move to only move requests already staged in a specific staging. This can be useful if a staging master is responsible for a specific set of packages and wants to move them into a different staging when they were already placed in a mixed staging. For example, if one had a file with a list of packages the following would move any of them found in staging A to staging B. select --move --filter-from A B $(< package.list) "unselect" will remove from the project - pushing them back to the backlog If a message is included the requests will be ignored first. Use the --cleanup flag to include all obsolete requests. "unlock" will remove the staging lock in case it gets stuck or a manual hold If a command lock gets stuck while a hold is placed on a project the unlock command will need to be run twice since there are two layers of locks. "rebuild" will rebuild broken packages in the given stagings or all The rebuild command will only trigger builds for packages with less than 3 failures since the last success or if the build log indicates a stall. If the force option is included the rebuild checks will be ignored and all packages failing to build will be triggered. "setprio" will set priority of requests withing stagings If no stagings are specified all stagings will be used. The default priority is important, but the possible values are: "critical", "important", "moderate" or "low". "supersede" will supersede requests were applicable. A request list can be used to limit what is superseded. Usage: osc staging accept [--force] [--no-cleanup] [LETTER...] osc staging acheck osc staging adi [--move] [--by-develproject] [--split] [REQUEST...] osc staging check [--old] [STAGING...] osc staging check_duplicate_binaries osc staging config [--append] [--clear] [STAGING...] [key] [value] osc staging cleanup_rings osc staging freeze [--no-bootstrap] STAGING... osc staging frozenage [STAGING...] osc staging ignore [-m MESSAGE] REQUEST... osc staging unignore [--cleanup] [REQUEST...|all] osc staging list [--supersede] osc staging lock [-m MESSAGE] osc staging select [--no-freeze] [--move [--filter-from STAGING]] [--add PACKAGE] STAGING REQUEST... osc staging select [--no-freeze] [--interactive|--non-interactive] [--filter-by...] [--group-by...] [--merge] [--try-strategies] [--strategy] [STAGING...] [REQUEST...] osc staging unselect [--cleanup] [-m MESSAGE] [REQUEST...] osc staging unlock osc staging rebuild [--force] [STAGING...] osc staging repair [--cleanup] [REQUEST...] osc staging setprio [STAGING...] [priority] osc staging supersede [REQUEST...] """ if opts.version: self._print_version() # verify the argument counts match the commands if len(args) == 0: raise oscerr.WrongArgs('No command given, see "osc help staging"!') cmd = args[0] if cmd in ( 'accept', 'adi', 'check', 'config', 'frozenage', 'unignore', 'select', 'unselect', 'rebuild', 'repair', 'setprio', 'supersede', ): min_args, max_args = 0, None elif cmd in ( 'freeze', 'ignore', ): min_args, max_args = 1, None elif cmd in ( 'acheck', 'check_duplicate_binaries', 'cleanup_rings', 'list', 'lock', 'unlock', ): min_args, max_args = 0, 0 else: raise oscerr.WrongArgs('Unknown command: %s' % cmd) args = clean_args(args) if len(args) - 1 < min_args: raise oscerr.WrongArgs('Too few arguments.') if max_args is not None and len(args) - 1 > max_args: raise oscerr.WrongArgs('Too many arguments.') # Allow for determining project from osc store. if not opts.project: if core.is_project_dir('.'): opts.project = core.store_read_project('.') else: opts.project = 'Factory' # Cache the remote config fetch. Cache.init() # Init the OBS access and configuration opts.project = self._full_project_name(opts.project) opts.apiurl = self.get_api_url() opts.verbose = False Config(opts.apiurl, opts.project) colorama.init(autoreset=True, strip=(opts.no_color or not bool(int(conf.config.get('staging.color', True))))) # Allow colors to be changed. for name in dir(Fore): if not name.startswith('_'): # .oscrc requires keys to be lower-case. value = conf.config.get('staging.color.' + name.lower()) if value: setattr(Fore, name, ansi.code_to_chars(value)) if opts.wipe_cache: Cache.delete_all() api = StagingAPI(opts.apiurl, opts.project) needed = lock_needed(cmd, opts) with OBSLock(opts.apiurl, opts.project, reason=cmd, needed=needed) as lock: # call the respective command and parse args by need if cmd == 'check': if len(args) == 1: CheckCommand(api).perform(None, opts.old) else: for prj in args[1:]: CheckCommand(api).perform(prj, opts.old) print() elif cmd == 'check_duplicate_binaries': CheckDuplicateBinariesCommand(api).perform(opts.save) elif cmd == 'config': projects = set() key = value = None stagings = api.get_staging_projects_short(None) + \ api.get_staging_projects() for arg in args[1:]: if arg in stagings: projects.add(api.prj_from_short(arg)) elif key is None: key = arg elif value is None: value = arg else: value += ' ' + arg if not len(projects): projects = api.get_staging_projects() ConfigCommand(api).perform(projects, key, value, opts.append, opts.clear) elif cmd == 'freeze': for prj in args[1:]: prj = api.prj_from_short(prj) print(Fore.YELLOW + prj) FreezeCommand(api).perform(prj, copy_bootstrap=opts.bootstrap) elif cmd == 'frozenage': projects = api.get_staging_projects_short() if len(args) == 1 else args[1:] for prj in projects: prj = api.prj_from_letter(prj) print('{} last frozen {}{:.1f} days ago'.format( Fore.YELLOW + prj + Fore.RESET, Fore.GREEN if api.prj_frozen_enough(prj) else Fore.RED, api.days_since_last_freeze(prj))) elif cmd == 'acheck': # Is it safe to accept? Meaning: /totest contains what it should and is not dirty version_totest = api.get_binary_version(api.project, "openSUSE-release.rpm", repository="totest", arch="x86_64") if version_totest: version_openqa = api.pseudometa_file_load('version_totest') totest_dirty = api.is_repo_dirty(api.project, 'totest') print("version_openqa: %s / version_totest: %s / totest_dirty: %s\n" % (version_openqa, version_totest, totest_dirty)) else: print("acheck is unavailable in %s!\n" % (api.project)) elif cmd == 'accept': # Is it safe to accept? Meaning: /totest contains what it should and is not dirty version_totest = api.get_binary_version(api.project, "openSUSE-release.rpm", repository="totest", arch="x86_64") if version_totest is None or opts.force: # SLE does not have a totest_version or openqa_version - ignore it version_openqa = version_totest totest_dirty = False else: version_openqa = api.pseudometa_file_load('version_totest') totest_dirty = api.is_repo_dirty(api.project, 'totest') if version_openqa == version_totest and not totest_dirty: cmd = AcceptCommand(api) for prj in args[1:]: if cmd.perform(api.prj_from_letter(prj), opts.force): cmd.reset_rebuild_data(prj) else: return if not opts.no_cleanup: if api.item_exists(api.prj_from_letter(prj)): cmd.cleanup(api.prj_from_letter(prj)) cmd.accept_other_new() if opts.project.startswith('openSUSE:'): cmd.update_factory_version() if api.item_exists(api.crebuild): cmd.sync_buildfailures() else: print("Not safe to accept: /totest is not yet synced") elif cmd == 'unselect': if opts.message: print('Ignoring requests first') IgnoreCommand(api).perform(args[1:], opts.message) UnselectCommand(api).perform(args[1:], opts.cleanup) elif cmd == 'select': # Include list of all stagings in short-hand and by full name. existing_stagings = api.get_staging_projects_short(None) existing_stagings += api.get_staging_projects() stagings = [] requests = [] for arg in args[1:]: # Since requests may be given by either request ID or package # name and stagings may include multi-letter special stagings # there is no easy way to distinguish between stagings and # requests in arguments. Therefore, check if argument is in the # list of short-hand and full project name stagings, otherwise # consider it a request. This also allows for special stagings # with the same name as package, but the staging will be assumed # first time around. The current practice seems to be to start a # special staging with a capital letter which makes them unique. # lastly adi stagings are consistently prefix with adi: which # also makes it consistent to distinguish them from request IDs. if arg in existing_stagings and arg not in stagings: stagings.append(api.extract_staging_short(arg)) elif arg not in requests: requests.append(arg) if len(stagings) != 1 or len(requests) == 0 or opts.filter_by or opts.group_by: if opts.move or opts.filter_from: print('--move and --filter-from must be used with explicit staging and request list') return open_requests = api.get_open_requests({'withhistory': 1}) if len(open_requests) == 0: print('No open requests to consider') return splitter = RequestSplitter(api, open_requests, in_ring=True) considerable = splitter.stagings_load(stagings) if considerable == 0: print('No considerable stagings on which to act') return if opts.merge: splitter.merge() if opts.try_strategies: splitter.strategies_try() if len(requests) > 0: splitter.strategy_do('requests', requests=requests) if opts.strategy: splitter.strategy_do(opts.strategy) elif opts.filter_by or opts.group_by: kwargs = {} if opts.filter_by: kwargs['filters'] = opts.filter_by if opts.group_by: kwargs['groups'] = opts.group_by splitter.strategy_do('custom', **kwargs) else: if opts.merge: # Merge any none strategies before final none strategy. splitter.merge(strategy_none=True) splitter.strategy_do('none') splitter.strategy_do_non_bootstrapped('none') proposal = splitter.proposal if len(proposal) == 0: print('Empty proposal') return if opts.interactive: with tempfile.NamedTemporaryFile(suffix='.yml') as temp: temp.write(yaml.safe_dump(splitter.proposal, default_flow_style=False) + '\n\n') if len(splitter.requests): temp.write('# remaining requests:\n') for request in splitter.requests: temp.write('# {}: {}\n'.format( request.get('id'), request.find('action/target').get('package'))) temp.write('\n') temp.write('# move requests between stagings or comment/remove them\n') temp.write('# change the target staging for a group\n') temp.write('# remove the group, requests, staging, or strategy to skip\n') temp.write('# stagings\n') if opts.merge: temp.write('# - mergeable: {}\n' .format(', '.join(sorted(splitter.stagings_mergeable + splitter.stagings_mergeable_none)))) temp.write('# - considered: {}\n' .format(', '.join(sorted(splitter.stagings_considerable)))) temp.write('# - remaining: {}\n' .format(', '.join(sorted(splitter.stagings_available)))) temp.flush() editor = os.getenv('EDITOR') if not editor: editor = 'xdg-open' return_code = subprocess.call(editor.split(' ') + [temp.name]) proposal = yaml.safe_load(open(temp.name).read()) # Filter invalidated groups from proposal. keys = ['group', 'requests', 'staging', 'strategy'] for group, info in sorted(proposal.items()): for key in keys: if not info.get(key): del proposal[group] break print(yaml.safe_dump(proposal, default_flow_style=False)) print('Accept proposal? [y/n] (y): ', end='') if opts.non_interactive: print('y') else: response = raw_input().lower() if response != '' and response != 'y': print('Quit') return for group, info in sorted(proposal.items()): print('Staging {} in {}'.format(group, info['staging'])) # SelectCommand expects strings. request_ids = map(str, info['requests'].keys()) target_project = api.prj_from_short(info['staging']) if 'merge' not in info: # Assume that the original splitter_info is desireable # and that this staging is simply manual followup. api.set_splitter_info_in_prj_pseudometa(target_project, info['group'], info['strategy']) SelectCommand(api, target_project) \ .perform(request_ids, no_freeze=opts.no_freeze) else: target_project = api.prj_from_short(stagings[0]) if opts.add: api.mark_additional_packages(target_project, [opts.add]) else: SelectCommand(api, target_project) \ .perform(requests, opts.move, api.prj_from_short(opts.filter_from), opts.no_freeze) elif cmd == 'cleanup_rings': CleanupRings(api).perform() elif cmd == 'ignore': IgnoreCommand(api).perform(args[1:], opts.message) elif cmd == 'unignore': UnignoreCommand(api).perform(args[1:], opts.cleanup) elif cmd == 'list': ListCommand(api).perform(supersede=opts.supersede) elif cmd == 'lock': lock.hold(opts.message) elif cmd == 'adi': AdiCommand(api).perform(args[1:], move=opts.move, by_dp=opts.by_develproject, split=opts.split) elif cmd == 'rebuild': RebuildCommand(api).perform(args[1:], opts.force) elif cmd == 'repair': RepairCommand(api).perform(args[1:], opts.cleanup) elif cmd == 'setprio': stagings = [] priority = None priorities = ['critical', 'important', 'moderate', 'low'] for arg in args[1:]: if arg in priorities: priority = arg else: stagings.append(arg) PrioCommand(api).perform(stagings, priority) elif cmd == 'supersede': SupersedeCommand(api).perform(args[1:]) elif cmd == 'unlock': lock.release(force=True)
def main(apiurl, opts, argv): repo = argv[0] arch = argv[1] build_descr = argv[2] xp = [] build_root = None cache_dir = None build_uid='' vm_type = config['build-type'] build_descr = os.path.abspath(build_descr) build_type = os.path.splitext(build_descr)[1][1:] if build_type not in ['spec', 'dsc', 'kiwi']: raise oscerr.WrongArgs( 'Unknown build type: \'%s\'. Build description should end in .spec, .dsc or .kiwi.' \ % build_type) if not os.path.isfile(build_descr): raise oscerr.WrongArgs('Error: build description file named \'%s\' does not exist.' % build_descr) buildargs = [] if not opts.userootforbuild: buildargs.append('--norootforbuild') if opts.clean: buildargs.append('--clean') if opts.noinit: buildargs.append('--noinit') if opts.nochecks: buildargs.append('--no-checks') if not opts.no_changelog: buildargs.append('--changelog') if opts.root: build_root = opts.root if opts.jobs: buildargs.append('--jobs=%s' % opts.jobs) elif config['build-jobs'] > 1: buildargs.append('--jobs=%s' % config['build-jobs']) if opts.icecream or config['icecream'] != '0': if opts.icecream: num = opts.icecream else: num = config['icecream'] if int(num) > 0: buildargs.append('--icecream=%s' % num) xp.append('icecream') xp.append('gcc-c++') if opts.ccache: buildargs.append('--ccache') xp.append('ccache') if opts.linksources: buildargs.append('--linksources') if opts.baselibs: buildargs.append('--baselibs') if opts.debuginfo: buildargs.append('--debug') if opts._with: for o in opts._with: buildargs.append('--with=%s' % o) if opts.without: for o in opts.without: buildargs.append('--without=%s' % o) if opts.define: for o in opts.define: buildargs.append('--define=%s' % o) if config['build-uid']: build_uid = config['build-uid'] if opts.build_uid: build_uid = opts.build_uid if build_uid: buildidre = re.compile('^[0-9]{1,5}:[0-9]{1,5}$') if build_uid == 'caller': buildargs.append('--uid=%s:%s' % (os.getuid(), os.getgid())) elif buildidre.match(build_uid): buildargs.append('--uid=%s' % build_uid) else: print >>sys.stderr, 'Error: build-uid arg must be 2 colon separated numerics: "uid:gid" or "caller"' return 1 if opts.vm_type: vm_type = opts.vm_type if opts.alternative_project: prj = opts.alternative_project pac = '_repository' else: prj = store_read_project(os.curdir) if opts.local_package: pac = '_repository' else: pac = store_read_package(os.curdir) if opts.shell: buildargs.append("--shell") # make it possible to override configuration of the rc file for var in ['OSC_PACKAGECACHEDIR', 'OSC_SU_WRAPPER', 'OSC_BUILD_ROOT']: val = os.getenv(var) if val: if var.startswith('OSC_'): var = var[4:] var = var.lower().replace('_', '-') if config.has_key(var): print 'Overriding config value for %s=\'%s\' with \'%s\'' % (var, config[var], val) config[var] = val pacname = pac if pacname == '_repository': if not opts.local_package: try: pacname = store_read_package(os.curdir) except oscerr.NoWorkingCopy: opts.local_package = True if opts.local_package: pacname = os.path.splitext(build_descr)[0] apihost = urlparse.urlsplit(apiurl)[1] if not build_root: build_root = config['build-root'] % {'repo': repo, 'arch': arch, 'project': prj, 'package': pacname, 'apihost': apihost} cache_dir = config['packagecachedir'] % {'apihost': apihost} extra_pkgs = [] if not opts.extra_pkgs: extra_pkgs = config['extra-pkgs'] elif opts.extra_pkgs != ['']: extra_pkgs = opts.extra_pkgs if xp: extra_pkgs += xp prefer_pkgs = {} build_descr_data = open(build_descr).read() # XXX: dirty hack but there's no api to provide custom defines if opts.without: s = '' for i in opts.without: s += "%%define _without_%s 1\n" % i s += "%%define _with_%s 0\n" % i build_descr_data = s + build_descr_data if opts._with: s = '' for i in opts._with: s += "%%define _without_%s 0\n" % i s += "%%define _with_%s 1\n" % i build_descr_data = s + build_descr_data if opts.define: s = '' for i in opts.define: s += "%%define %s\n" % i build_descr_data = s + build_descr_data if opts.prefer_pkgs: print 'Scanning the following dirs for local packages: %s' % ', '.join(opts.prefer_pkgs) prefer_pkgs, cpio = get_prefer_pkgs(opts.prefer_pkgs, arch, build_type) cpio.add(os.path.basename(build_descr), build_descr_data) build_descr_data = cpio.get() # special handling for overlay and rsync-src/dest specialcmdopts = [] if opts.rsyncsrc or opts.rsyncdest : if not opts.rsyncsrc or not opts.rsyncdest: raise oscerr.WrongOptions('When using --rsync-{src,dest} both parameters have to be specified.') myrsyncsrc = os.path.abspath(os.path.expanduser(os.path.expandvars(opts.rsyncsrc))) if not os.path.isdir(myrsyncsrc): raise oscerr.WrongOptions('--rsync-src %s is no valid directory!' % opts.rsyncsrc) # can't check destination - its in the target chroot ;) - but we can check for sanity myrsyncdest = os.path.expandvars(opts.rsyncdest) if not os.path.isabs(myrsyncdest): raise oscerr.WrongOptions('--rsync-dest %s is no absolute path (starting with \'/\')!' % opts.rsyncdest) specialcmdopts = ['--rsync-src='+myrsyncsrc, '--rsync-dest='+myrsyncdest] if opts.overlay: myoverlay = os.path.abspath(os.path.expanduser(os.path.expandvars(opts.overlay))) if not os.path.isdir(myoverlay): raise oscerr.WrongOptions('--overlay %s is no valid directory!' % opts.overlay) specialcmdopts += ['--overlay='+myoverlay] bi_file = None bc_file = None bi_filename = '_buildinfo-%s-%s.xml' % (repo, arch) bc_filename = '_buildconfig-%s-%s' % (repo, arch) if is_package_dir('.') and os.access(osc.core.store, os.W_OK): bi_filename = os.path.join(os.getcwd(), osc.core.store, bi_filename) bc_filename = os.path.join(os.getcwd(), osc.core.store, bc_filename) elif not os.access('.', os.W_OK): bi_file = NamedTemporaryFile(prefix=bi_filename) bi_filename = bi_file.name bc_file = NamedTemporaryFile(prefix=bc_filename) bc_filename = bc_file.name else: bi_filename = os.path.abspath(bi_filename) bc_filename = os.path.abspath(bc_filename) try: if opts.noinit: if not os.path.isfile(bi_filename): raise oscerr.WrongOptions('--noinit is not possible, no local buildinfo file') print 'Use local \'%s\' file as buildinfo' % bi_filename if not os.path.isfile(bc_filename): raise oscerr.WrongOptions('--noinit is not possible, no local buildconfig file') print 'Use local \'%s\' file as buildconfig' % bc_filename elif opts.offline: if not os.path.isfile(bi_filename): raise oscerr.WrongOptions('--offline is not possible, no local buildinfo file') print 'Use local \'%s\' file as buildinfo' % bi_filename if not os.path.isfile(bc_filename): raise oscerr.WrongOptions('--offline is not possible, no local buildconfig file') else: print 'Getting buildinfo from server and store to %s' % bi_filename bi_text = ''.join(get_buildinfo(apiurl, prj, pac, repo, arch, specfile=build_descr_data, addlist=extra_pkgs)) if not bi_file: bi_file = open(bi_filename, 'w') # maybe we should check for errors before saving the file bi_file.write(bi_text) bi_file.flush() print 'Getting buildconfig from server and store to %s' % bc_filename bc = get_buildconfig(apiurl, prj, repo) if not bc_file: bc_file = open(bc_filename, 'w') bc_file.write(bc) bc_file.flush() except urllib2.HTTPError, e: if e.code == 404: # check what caused the 404 if meta_exists(metatype='prj', path_args=(quote_plus(prj), ), template_args=None, create_new=False, apiurl=apiurl): pkg_meta_e = None try: # take care, not to run into double trouble. pkg_meta_e = meta_exists(metatype='pkg', path_args=(quote_plus(prj), quote_plus(pac)), template_args=None, create_new=False, apiurl=apiurl) except: pass if pkg_meta_e: print >>sys.stderr, 'ERROR: Either wrong repo/arch as parameter or a parse error of .spec/.dsc/.kiwi file due to syntax error' else: print >>sys.stderr, 'The package \'%s\' does not exists - please ' \ 'rerun with \'--local-package\'' % pac else: print >>sys.stderr, 'The project \'%s\' does not exists - please ' \ 'rerun with \'--alternative-project <alternative_project>\'' % prj sys.exit(1) else: raise
def main(apiurl, opts, argv): repo = argv[0] arch = argv[1] build_descr = argv[2] xp = [] build_root = None cache_dir = None build_uid = "" vm_type = config["build-type"] build_descr = os.path.abspath(build_descr) build_type = os.path.splitext(build_descr)[1][1:] if build_type not in ["spec", "dsc", "kiwi"]: raise oscerr.WrongArgs( "Unknown build type: '%s'. Build description should end in .spec, .dsc or .kiwi." % build_type ) if not os.path.isfile(build_descr): raise oscerr.WrongArgs("Error: build description file named '%s' does not exist." % build_descr) buildargs = [] if not opts.userootforbuild: buildargs.append("--norootforbuild") if opts.clean: buildargs.append("--clean") if opts.noinit: buildargs.append("--noinit") if opts.nochecks: buildargs.append("--no-checks") if not opts.no_changelog: buildargs.append("--changelog") if opts.root: build_root = opts.root if opts.jobs: buildargs.append("--jobs=%s" % opts.jobs) elif config["build-jobs"] > 1: buildargs.append("--jobs=%s" % config["build-jobs"]) if opts.icecream or config["icecream"] != "0": if opts.icecream: num = opts.icecream else: num = config["icecream"] if int(num) > 0: buildargs.append("--icecream=%s" % num) xp.append("icecream") xp.append("gcc-c++") if opts.ccache: buildargs.append("--ccache") xp.append("ccache") if opts.linksources: buildargs.append("--linksources") if opts.baselibs: buildargs.append("--baselibs") if opts.debuginfo: buildargs.append("--debug") if opts._with: for o in opts._with: buildargs.append("--with=%s" % o) if opts.without: for o in opts.without: buildargs.append("--without=%s" % o) if opts.define: for o in opts.define: buildargs.append("--define=%s" % o) if config["build-uid"]: build_uid = config["build-uid"] if opts.build_uid: build_uid = opts.build_uid if build_uid: buildidre = re.compile("^[0-9]{1,5}:[0-9]{1,5}$") if build_uid == "caller": buildargs.append("--uid=%s:%s" % (os.getuid(), os.getgid())) elif buildidre.match(build_uid): buildargs.append("--uid=%s" % build_uid) else: print >> sys.stderr, 'Error: build-uid arg must be 2 colon separated numerics: "uid:gid" or "caller"' return 1 if opts.vm_type: vm_type = opts.vm_type if opts.alternative_project: prj = opts.alternative_project pac = "_repository" else: prj = store_read_project(os.curdir) if opts.local_package: pac = "_repository" else: pac = store_read_package(os.curdir) if opts.shell: buildargs.append("--shell") # make it possible to override configuration of the rc file for var in ["OSC_PACKAGECACHEDIR", "OSC_SU_WRAPPER", "OSC_BUILD_ROOT"]: val = os.getenv(var) if val: if var.startswith("OSC_"): var = var[4:] var = var.lower().replace("_", "-") if config.has_key(var): print "Overriding config value for %s='%s' with '%s'" % (var, config[var], val) config[var] = val pacname = pac if pacname == "_repository": if not opts.local_package: try: pacname = store_read_package(os.curdir) except oscerr.NoWorkingCopy: opts.local_package = True if opts.local_package: pacname = os.path.splitext(build_descr)[0] apihost = urlparse.urlsplit(apiurl)[1] if not build_root: build_root = config["build-root"] % { "repo": repo, "arch": arch, "project": prj, "package": pacname, "apihost": apihost, } cache_dir = config["packagecachedir"] % {"apihost": apihost} extra_pkgs = [] if not opts.extra_pkgs: extra_pkgs = config["extra-pkgs"] elif opts.extra_pkgs != [""]: extra_pkgs = opts.extra_pkgs if xp: extra_pkgs += xp prefer_pkgs = {} build_descr_data = open(build_descr).read() # XXX: dirty hack but there's no api to provide custom defines if opts.without: s = "" for i in opts.without: s += "%%define _without_%s 1\n" % i s += "%%define _with_%s 0\n" % i build_descr_data = s + build_descr_data if opts._with: s = "" for i in opts._with: s += "%%define _without_%s 0\n" % i s += "%%define _with_%s 1\n" % i build_descr_data = s + build_descr_data if opts.define: s = "" for i in opts.define: s += "%%define %s\n" % i build_descr_data = s + build_descr_data if opts.prefer_pkgs: print "Scanning the following dirs for local packages: %s" % ", ".join(opts.prefer_pkgs) prefer_pkgs, cpio = get_prefer_pkgs(opts.prefer_pkgs, arch, build_type) cpio.add(os.path.basename(build_descr), build_descr_data) build_descr_data = cpio.get() # special handling for overlay and rsync-src/dest specialcmdopts = [] if opts.rsyncsrc or opts.rsyncdest: if not opts.rsyncsrc or not opts.rsyncdest: raise oscerr.WrongOptions("When using --rsync-{src,dest} both parameters have to be specified.") myrsyncsrc = os.path.abspath(os.path.expanduser(os.path.expandvars(opts.rsyncsrc))) if not os.path.isdir(myrsyncsrc): raise oscerr.WrongOptions("--rsync-src %s is no valid directory!" % opts.rsyncsrc) # can't check destination - its in the target chroot ;) - but we can check for sanity myrsyncdest = os.path.expandvars(opts.rsyncdest) if not os.path.isabs(myrsyncdest): raise oscerr.WrongOptions("--rsync-dest %s is no absolute path (starting with '/')!" % opts.rsyncdest) specialcmdopts = ["--rsync-src=" + myrsyncsrc, "--rsync-dest=" + myrsyncdest] if opts.overlay: myoverlay = os.path.abspath(os.path.expanduser(os.path.expandvars(opts.overlay))) if not os.path.isdir(myoverlay): raise oscerr.WrongOptions("--overlay %s is no valid directory!" % opts.overlay) specialcmdopts += ["--overlay=" + myoverlay] bi_file = None bc_file = None bi_filename = "_buildinfo-%s-%s.xml" % (repo, arch) bc_filename = "_buildconfig-%s-%s" % (repo, arch) if is_package_dir(".") and os.access(osc.core.store, os.W_OK): bi_filename = os.path.join(os.getcwd(), osc.core.store, bi_filename) bc_filename = os.path.join(os.getcwd(), osc.core.store, bc_filename) elif not os.access(".", os.W_OK): bi_file = NamedTemporaryFile(prefix=bi_filename) bi_filename = bi_file.name bc_file = NamedTemporaryFile(prefix=bc_filename) bc_filename = bc_file.name else: bi_filename = os.path.abspath(bi_filename) bc_filename = os.path.abspath(bc_filename) try: if opts.noinit: if not os.path.isfile(bi_filename): raise oscerr.WrongOptions("--noinit is not possible, no local buildinfo file") print "Use local '%s' file as buildinfo" % bi_filename if not os.path.isfile(bc_filename): raise oscerr.WrongOptions("--noinit is not possible, no local buildconfig file") print "Use local '%s' file as buildconfig" % bc_filename elif opts.offline: if not os.path.isfile(bi_filename): raise oscerr.WrongOptions("--offline is not possible, no local buildinfo file") print "Use local '%s' file as buildinfo" % bi_filename if not os.path.isfile(bc_filename): raise oscerr.WrongOptions("--offline is not possible, no local buildconfig file") else: print "Getting buildinfo from server and store to %s" % bi_filename bi_text = "".join( get_buildinfo(apiurl, prj, pac, repo, arch, specfile=build_descr_data, addlist=extra_pkgs) ) if not bi_file: bi_file = open(bi_filename, "w") # maybe we should check for errors before saving the file bi_file.write(bi_text) bi_file.flush() print "Getting buildconfig from server and store to %s" % bc_filename bc = get_buildconfig(apiurl, prj, repo) if not bc_file: bc_file = open(bc_filename, "w") bc_file.write(bc) bc_file.flush() except urllib2.HTTPError, e: if e.code == 404: # check what caused the 404 if meta_exists( metatype="prj", path_args=(quote_plus(prj),), template_args=None, create_new=False, apiurl=apiurl ): pkg_meta_e = None try: # take care, not to run into double trouble. pkg_meta_e = meta_exists( metatype="pkg", path_args=(quote_plus(prj), quote_plus(pac)), template_args=None, create_new=False, apiurl=apiurl, ) except: pass if pkg_meta_e: print >> sys.stderr, "ERROR: Either wrong repo/arch as parameter or a parse error of .spec/.dsc/.kiwi file due to syntax error" else: print >> sys.stderr, "The package '%s' does not exists - please " "rerun with '--local-package'" % pac else: print >> sys.stderr, "The project '%s' does not exists - please " "rerun with '--alternative-project <alternative_project>'" % prj sys.exit(1) else: raise
def main(apiurl, opts, argv): repo = argv[0] arch = argv[1] build_descr = argv[2] xp = [] build_root = None cache_dir = None build_uid = '' vm_type = config['build-type'] build_descr = os.path.abspath(build_descr) build_type = os.path.splitext(build_descr)[1][1:] if os.path.basename(build_descr) == 'PKGBUILD': build_type = 'arch' if build_type not in ['spec', 'dsc', 'kiwi', 'arch', 'livebuild']: raise oscerr.WrongArgs( 'Unknown build type: \'%s\'. Build description should end in .spec, .dsc, .kiwi or .livebuild.' \ % build_type) if not os.path.isfile(build_descr): raise oscerr.WrongArgs( 'Error: build description file named \'%s\' does not exist.' % build_descr) buildargs = [] if not opts.userootforbuild: buildargs.append('--norootforbuild') if opts.clean: buildargs.append('--clean') if opts.noinit: buildargs.append('--noinit') if opts.nochecks: buildargs.append('--no-checks') if not opts.no_changelog: buildargs.append('--changelog') if opts.root: build_root = opts.root if opts.target: buildargs.append('--target=%s' % opts.target) if opts.threads: buildargs.append('--threads=%s' % opts.threads) if opts.jobs: buildargs.append('--jobs=%s' % opts.jobs) elif config['build-jobs'] > 1: buildargs.append('--jobs=%s' % config['build-jobs']) if opts.icecream or config['icecream'] != '0': if opts.icecream: num = opts.icecream else: num = config['icecream'] if int(num) > 0: buildargs.append('--icecream=%s' % num) xp.append('icecream') xp.append('gcc-c++') if opts.ccache: buildargs.append('--ccache') xp.append('ccache') if opts.linksources: buildargs.append('--linksources') if opts.baselibs: buildargs.append('--baselibs') if opts.debuginfo: buildargs.append('--debug') if opts._with: for o in opts._with: buildargs.append('--with=%s' % o) if opts.without: for o in opts.without: buildargs.append('--without=%s' % o) if opts.define: for o in opts.define: buildargs.append('--define=%s' % o) if config['build-uid']: build_uid = config['build-uid'] if opts.build_uid: build_uid = opts.build_uid if build_uid: buildidre = re.compile('^[0-9]{1,5}:[0-9]{1,5}$') if build_uid == 'caller': buildargs.append('--uid=%s:%s' % (os.getuid(), os.getgid())) elif buildidre.match(build_uid): buildargs.append('--uid=%s' % build_uid) else: print( 'Error: build-uid arg must be 2 colon separated numerics: "uid:gid" or "caller"', file=sys.stderr) return 1 if opts.vm_type: vm_type = opts.vm_type if opts.alternative_project: prj = opts.alternative_project pac = '_repository' else: prj = store_read_project(os.curdir) if opts.local_package: pac = '_repository' else: pac = store_read_package(os.curdir) if opts.shell: buildargs.append("--shell") orig_build_root = config['build-root'] # make it possible to override configuration of the rc file for var in ['OSC_PACKAGECACHEDIR', 'OSC_SU_WRAPPER', 'OSC_BUILD_ROOT']: val = os.getenv(var) if val: if var.startswith('OSC_'): var = var[4:] var = var.lower().replace('_', '-') if var in config: print('Overriding config value for %s=\'%s\' with \'%s\'' % (var, config[var], val)) config[var] = val pacname = pac if pacname == '_repository': if not opts.local_package: try: pacname = store_read_package(os.curdir) except oscerr.NoWorkingCopy: opts.local_package = True if opts.local_package: pacname = os.path.splitext(build_descr)[0] apihost = urlsplit(apiurl)[1] if not build_root: build_root = config['build-root'] if build_root == orig_build_root: # ENV var was not set build_root = config['api_host_options'][apiurl].get( 'build-root', build_root) try: build_root = build_root % { 'repo': repo, 'arch': arch, 'project': prj, 'package': pacname, 'apihost': apihost } except: pass cache_dir = config['packagecachedir'] % {'apihost': apihost} extra_pkgs = [] if not opts.extra_pkgs: extra_pkgs = config['extra-pkgs'] elif opts.extra_pkgs != ['']: extra_pkgs = opts.extra_pkgs if xp: extra_pkgs += xp prefer_pkgs = {} build_descr_data = open(build_descr).read() # XXX: dirty hack but there's no api to provide custom defines if opts.without: s = '' for i in opts.without: s += "%%define _without_%s 1\n" % i build_descr_data = s + build_descr_data if opts._with: s = '' for i in opts._with: s += "%%define _with_%s 1\n" % i build_descr_data = s + build_descr_data if opts.define: s = '' for i in opts.define: s += "%%define %s\n" % i build_descr_data = s + build_descr_data cpiodata = None buildenvfile = os.path.join(os.path.dirname(build_descr), "_buildenv." + repo + "." + arch) if not os.path.isfile(buildenvfile): buildenvfile = os.path.join(os.path.dirname(build_descr), "_buildenv") if not os.path.isfile(buildenvfile): buildenvfile = None if buildenvfile: print('Using buildenv file: %s' % os.path.basename(buildenvfile)) from .util import cpio if not cpiodata: cpiodata = cpio.CpioWrite() if opts.prefer_pkgs: print('Scanning the following dirs for local packages: %s' % ', '.join(opts.prefer_pkgs)) from .util import cpio if not cpiodata: cpiodata = cpio.CpioWrite() prefer_pkgs = get_prefer_pkgs(opts.prefer_pkgs, arch, build_type, cpiodata) if cpiodata: cpiodata.add(os.path.basename(build_descr), build_descr_data) # buildenv must come last for compatibility reasons... if buildenvfile: cpiodata.add("buildenv", open(buildenvfile).read()) build_descr_data = cpiodata.get() # special handling for overlay and rsync-src/dest specialcmdopts = [] if opts.rsyncsrc or opts.rsyncdest: if not opts.rsyncsrc or not opts.rsyncdest: raise oscerr.WrongOptions( 'When using --rsync-{src,dest} both parameters have to be specified.' ) myrsyncsrc = os.path.abspath( os.path.expanduser(os.path.expandvars(opts.rsyncsrc))) if not os.path.isdir(myrsyncsrc): raise oscerr.WrongOptions('--rsync-src %s is no valid directory!' % opts.rsyncsrc) # can't check destination - its in the target chroot ;) - but we can check for sanity myrsyncdest = os.path.expandvars(opts.rsyncdest) if not os.path.isabs(myrsyncdest): raise oscerr.WrongOptions( '--rsync-dest %s is no absolute path (starting with \'/\')!' % opts.rsyncdest) specialcmdopts = [ '--rsync-src=' + myrsyncsrc, '--rsync-dest=' + myrsyncdest ] if opts.overlay: myoverlay = os.path.abspath( os.path.expanduser(os.path.expandvars(opts.overlay))) if not os.path.isdir(myoverlay): raise oscerr.WrongOptions('--overlay %s is no valid directory!' % opts.overlay) specialcmdopts += ['--overlay=' + myoverlay] bi_file = None bc_file = None bi_filename = '_buildinfo-%s-%s.xml' % (repo, arch) bc_filename = '_buildconfig-%s-%s' % (repo, arch) if is_package_dir('.') and os.access(osc.core.store, os.W_OK): bi_filename = os.path.join(os.getcwd(), osc.core.store, bi_filename) bc_filename = os.path.join(os.getcwd(), osc.core.store, bc_filename) elif not os.access('.', os.W_OK): bi_file = NamedTemporaryFile(prefix=bi_filename) bi_filename = bi_file.name bc_file = NamedTemporaryFile(prefix=bc_filename) bc_filename = bc_file.name else: bi_filename = os.path.abspath(bi_filename) bc_filename = os.path.abspath(bc_filename) try: if opts.noinit: if not os.path.isfile(bi_filename): raise oscerr.WrongOptions( '--noinit is not possible, no local buildinfo file') print('Use local \'%s\' file as buildinfo' % bi_filename) if not os.path.isfile(bc_filename): raise oscerr.WrongOptions( '--noinit is not possible, no local buildconfig file') print('Use local \'%s\' file as buildconfig' % bc_filename) elif opts.offline: if not os.path.isfile(bi_filename): raise oscerr.WrongOptions( '--offline is not possible, no local buildinfo file') print('Use local \'%s\' file as buildinfo' % bi_filename) if not os.path.isfile(bc_filename): raise oscerr.WrongOptions( '--offline is not possible, no local buildconfig file') else: print('Getting buildinfo from server and store to %s' % bi_filename) bi_text = ''.join( get_buildinfo(apiurl, prj, pac, repo, arch, specfile=build_descr_data, addlist=extra_pkgs)) if not bi_file: bi_file = open(bi_filename, 'w') # maybe we should check for errors before saving the file bi_file.write(bi_text) bi_file.flush() print('Getting buildconfig from server and store to %s' % bc_filename) bc = get_buildconfig(apiurl, prj, repo) if not bc_file: bc_file = open(bc_filename, 'w') bc_file.write(bc) bc_file.flush() except HTTPError as e: if e.code == 404: # check what caused the 404 if meta_exists(metatype='prj', path_args=(quote_plus(prj), ), template_args=None, create_new=False, apiurl=apiurl): pkg_meta_e = None try: # take care, not to run into double trouble. pkg_meta_e = meta_exists(metatype='pkg', path_args=(quote_plus(prj), quote_plus(pac)), template_args=None, create_new=False, apiurl=apiurl) except: pass if pkg_meta_e: print( 'ERROR: Either wrong repo/arch as parameter or a parse error of .spec/.dsc/.kiwi file due to syntax error', file=sys.stderr) else: print('The package \'%s\' does not exists - please ' \ 'rerun with \'--local-package\'' % pac, file=sys.stderr) else: print('The project \'%s\' does not exists - please ' \ 'rerun with \'--alternative-project <alternative_project>\'' % prj, file=sys.stderr) sys.exit(1) else: raise bi = Buildinfo(bi_filename, apiurl, build_type, list(prefer_pkgs.keys())) if bi.debuginfo and not (opts.disable_debuginfo or '--debug' in buildargs): buildargs.append('--debug') if opts.release: bi.release = opts.release if bi.release: buildargs.append('--release=%s' % bi.release) # real arch of this machine # vs. # arch we are supposed to build for if bi.hostarch != None: if hostarch != bi.hostarch and not bi.hostarch in can_also_build.get( hostarch, []): print('Error: hostarch \'%s\' is required.' % (bi.hostarch), file=sys.stderr) return 1 elif hostarch != bi.buildarch: if not bi.buildarch in can_also_build.get(hostarch, []): # OBSOLETE: qemu_can_build should not be needed anymore since OBS 2.3 if vm_type != "emulator" and not bi.buildarch in qemu_can_build: print('Error: hostarch \'%s\' cannot build \'%s\'.' % (hostarch, bi.buildarch), file=sys.stderr) return 1 print( 'WARNING: It is guessed to build on hostarch \'%s\' for \'%s\' via QEMU.' % (hostarch, bi.buildarch), file=sys.stderr) rpmlist_prefers = [] if prefer_pkgs: print('Evaluating preferred packages') for name, path in prefer_pkgs.items(): if bi.has_dep(name): # We remove a preferred package from the buildinfo, so that the # fetcher doesn't take care about them. # Instead, we put it in a list which is appended to the rpmlist later. # At the same time, this will make sure that these packages are # not verified. bi.remove_dep(name) rpmlist_prefers.append((name, path)) print(' - %s (%s)' % (name, path)) print('Updating cache of required packages') urllist = [] if not opts.download_api_only: # transform 'url1, url2, url3' form into a list if 'urllist' in config: if isinstance(config['urllist'], str): re_clist = re.compile('[, ]+') urllist = [ i.strip() for i in re_clist.split(config['urllist'].strip()) ] else: urllist = config['urllist'] # OBS 1.5 and before has no downloadurl defined in buildinfo if bi.downloadurl: urllist.append( bi.downloadurl + '/%(extproject)s/%(extrepository)s/%(arch)s/%(filename)s') if opts.disable_cpio_bulk_download: urllist.append( '%(apiurl)s/build/%(project)s/%(repository)s/%(repoarch)s/%(repopackage)s/%(repofilename)s' ) fetcher = Fetcher(cache_dir, urllist=urllist, api_host_options=config['api_host_options'], offline=opts.noinit or opts.offline, http_debug=config['http_debug'], enable_cpio=not opts.disable_cpio_bulk_download, cookiejar=cookiejar) # implicitly trust the project we are building for check_trusted_projects(apiurl, [i for i in bi.projects.keys() if not i == prj]) # now update the package cache fetcher.run(bi) old_pkg_dir = None if opts.oldpackages: old_pkg_dir = opts.oldpackages if not old_pkg_dir.startswith('/') and not opts.offline: data = [prj, pacname, repo, arch] if old_pkg_dir == '_link': p = osc.core.findpacs(os.curdir)[0] if not p.islink(): raise oscerr.WrongOptions('package is not a link') data[0] = p.linkinfo.project data[1] = p.linkinfo.package repos = osc.core.get_repositories_of_project(apiurl, data[0]) # hack for links to e.g. Factory if not data[2] in repos and 'standard' in repos: data[2] = 'standard' elif old_pkg_dir != '' and old_pkg_dir != '_self': a = old_pkg_dir.split('/') for i in range(0, len(a)): data[i] = a[i] destdir = os.path.join(cache_dir, data[0], data[2], data[3]) old_pkg_dir = None try: print("Downloading previous build from %s ..." % '/'.join(data)) binaries = get_binarylist(apiurl, data[0], data[2], data[3], package=data[1], verbose=True) except Exception as e: print("Error: failed to get binaries: %s" % str(e)) binaries = [] if binaries: class mytmpdir: """ temporary directory that removes itself""" def __init__(self, *args, **kwargs): self.name = mkdtemp(*args, **kwargs) _rmtree = staticmethod(shutil.rmtree) def cleanup(self): self._rmtree(self.name) def __del__(self): self.cleanup() def __exit__(self): self.cleanup() def __str__(self): return self.name old_pkg_dir = mytmpdir(prefix='.build.oldpackages', dir=os.path.abspath(os.curdir)) if not os.path.exists(destdir): os.makedirs(destdir) for i in binaries: fname = os.path.join(destdir, i.name) os.symlink(fname, os.path.join(str(old_pkg_dir), i.name)) if os.path.exists(fname): st = os.stat(fname) if st.st_mtime == i.mtime and st.st_size == i.size: continue get_binary_file(apiurl, data[0], data[2], data[3], i.name, package=data[1], target_filename=fname, target_mtime=i.mtime, progress_meter=True) if old_pkg_dir != None: buildargs.append('--oldpackages=%s' % old_pkg_dir) # Make packages from buildinfo available as repos for kiwi if build_type == 'kiwi': if os.path.exists('repos'): shutil.rmtree('repos') os.mkdir('repos') for i in bi.deps: if not i.extproject: # remove bi.deps.remove(i) continue # project pdir = str(i.extproject).replace(':/', ':') # repo rdir = str(i.extrepository).replace(':/', ':') # arch adir = i.repoarch # project/repo prdir = "repos/" + pdir + "/" + rdir # project/repo/arch pradir = prdir + "/" + adir # source fullfilename sffn = i.fullfilename filename = sffn.split("/")[-1] # target fullfilename tffn = pradir + "/" + filename if not os.path.exists(os.path.join(pradir)): os.makedirs(os.path.join(pradir)) if not os.path.exists(tffn): print("Using package: " + sffn) if opts.linksources: os.link(sffn, tffn) else: os.symlink(sffn, tffn) if prefer_pkgs: for name, path in prefer_pkgs.items(): if name == filename: print("Using prefered package: " + path + "/" + filename) os.unlink(tffn) if opts.linksources: os.link(path + "/" + filename, tffn) else: os.symlink(path + "/" + filename, tffn) # Is a obsrepositories tag used? try: tree = ET.parse(build_descr) except: print('could not parse the kiwi file:', file=sys.stderr) print(open(build_descr).read(), file=sys.stderr) sys.exit(1) root = tree.getroot() # product for xml in root.findall('instsource'): if xml.find('instrepo').find('source').get( 'path') == 'obsrepositories:/': print( "obsrepositories:/ for product builds is not yet supported in osc!" ) sys.exit(1) # appliance expand_obsrepos = None for xml in root.findall('repository'): if xml.find('source').get('path') == 'obsrepositories:/': expand_obsrepos = True if expand_obsrepos: buildargs.append('--kiwi-parameter') buildargs.append('--ignore-repos') for xml in root.findall('repository'): if xml.find('source').get('path') == 'obsrepositories:/': for path in bi.pathes: if not os.path.isdir("repos/" + path): continue buildargs.append('--kiwi-parameter') buildargs.append('--add-repo') buildargs.append('--kiwi-parameter') buildargs.append("repos/" + path) buildargs.append('--kiwi-parameter') buildargs.append('--add-repotype') buildargs.append('--kiwi-parameter') buildargs.append('rpm-md') if xml.get('priority'): buildargs.append('--kiwi-parameter') buildargs.append('--add-repoprio=' + xml.get('priority')) else: m = re.match(r"obs://[^/]+/([^/]+)/(\S+)", xml.find('source').get('path')) if not m: # short path without obs instance name m = re.match(r"obs://([^/]+)/(.+)", xml.find('source').get('path')) project = m.group(1).replace(":", ":/") repo = m.group(2) buildargs.append('--kiwi-parameter') buildargs.append('--add-repo') buildargs.append('--kiwi-parameter') buildargs.append("repos/" + project + "/" + repo) buildargs.append('--kiwi-parameter') buildargs.append('--add-repotype') buildargs.append('--kiwi-parameter') buildargs.append('rpm-md') if xml.get('priority'): buildargs.append('--kiwi-parameter') buildargs.append('--add-repopriority=' + xml.get('priority')) if vm_type == "xen" or vm_type == "kvm" or vm_type == "lxc": print( 'Skipping verification of package signatures due to secure VM build' ) elif bi.pacsuffix == 'rpm': if opts.no_verify: print('Skipping verification of package signatures') else: print('Verifying integrity of cached packages') verify_pacs(bi) elif bi.pacsuffix == 'deb': if opts.no_verify or opts.noinit: print('Skipping verification of package signatures') else: print( 'WARNING: deb packages get not verified, they can compromise your system !' ) else: print( 'WARNING: unknown packages get not verified, they can compromise your system !' ) for i in bi.deps: if i.hdrmd5: from .util import packagequery hdrmd5 = packagequery.PackageQuery.queryhdrmd5(i.fullfilename) if not hdrmd5: print("Error: cannot get hdrmd5 for %s" % i.fullfilename) sys.exit(1) if hdrmd5 != i.hdrmd5: print("Error: hdrmd5 mismatch for %s: %s != %s" % (i.fullfilename, hdrmd5, i.hdrmd5)) sys.exit(1) print('Writing build configuration') if build_type == 'kiwi': rpmlist = [ '%s %s\n' % (i.name, i.fullfilename) for i in bi.deps if not i.noinstall ] else: rpmlist = ['%s %s\n' % (i.name, i.fullfilename) for i in bi.deps] rpmlist += ['%s %s\n' % (i[0], i[1]) for i in rpmlist_prefers] rpmlist.append('preinstall: ' + ' '.join(bi.preinstall_list) + '\n') rpmlist.append('vminstall: ' + ' '.join(bi.vminstall_list) + '\n') rpmlist.append('runscripts: ' + ' '.join(bi.runscripts_list) + '\n') if build_type != 'kiwi' and bi.noinstall_list: rpmlist.append('noinstall: ' + ' '.join(bi.noinstall_list) + '\n') if build_type != 'kiwi' and bi.installonly_list: rpmlist.append('installonly: ' + ' '.join(bi.installonly_list) + '\n') rpmlist_file = NamedTemporaryFile(prefix='rpmlist.') rpmlist_filename = rpmlist_file.name rpmlist_file.writelines(rpmlist) rpmlist_file.flush() subst = {'repo': repo, 'arch': arch, 'project': prj, 'package': pacname} vm_options = [] # XXX check if build-device present my_build_device = '' if config['build-device']: my_build_device = config['build-device'] % subst else: # obs worker uses /root here but that collides with the # /root directory if the build root was used without vm # before my_build_device = build_root + '/img' need_root = True if vm_type: if config['build-swap']: my_build_swap = config['build-swap'] % subst else: my_build_swap = build_root + '/swap' vm_options = ['--vm-type=%s' % vm_type] if vm_type != 'lxc' and vm_type != 'emulator': vm_options += ['--vm-disk=' + my_build_device] vm_options += ['--vm-swap=' + my_build_swap] vm_options += ['--logfile=%s/.build.log' % build_root] if vm_type == 'kvm': if os.access(build_root, os.W_OK) and os.access( '/dev/kvm', os.W_OK): # so let's hope there's also an fstab entry need_root = False if config['build-kernel']: vm_options += ['--vm-kernel=' + config['build-kernel']] if config['build-initrd']: vm_options += ['--vm-initrd=' + config['build-initrd']] build_root += '/.mount' if config['build-memory']: vm_options += ['--memory=' + config['build-memory']] if config['build-vmdisk-rootsize']: vm_options += [ '--vmdisk-rootsize=' + config['build-vmdisk-rootsize'] ] if config['build-vmdisk-swapsize']: vm_options += [ '--vmdisk-swapsize=' + config['build-vmdisk-swapsize'] ] if config['build-vmdisk-filesystem']: vm_options += [ '--vmdisk-filesystem=' + config['build-vmdisk-filesystem'] ] if opts.preload: print("Preload done for selected repo/arch.") sys.exit(0) print('Running build') cmd = [ config['build-cmd'], '--root=' + build_root, '--rpmlist=' + rpmlist_filename, '--dist=' + bc_filename, '--arch=' + bi.buildarch ] cmd += specialcmdopts + vm_options + buildargs cmd += [build_descr] if need_root: sucmd = config['su-wrapper'].split() if sucmd[0] == 'su': if sucmd[-1] == '-c': sucmd.pop() cmd = sucmd + ['-s', cmd[0], 'root', '--'] + cmd[1:] else: cmd = sucmd + cmd # change personality, if needed if hostarch != bi.buildarch and bi.buildarch in change_personality: cmd = [change_personality[bi.buildarch]] + cmd try: rc = run_external(cmd[0], *cmd[1:]) if rc: print() print('The buildroot was:', build_root) sys.exit(rc) except KeyboardInterrupt as i: print("keyboard interrupt, killing build ...") cmd.append('--kill') run_external(cmd[0], *cmd[1:]) raise i pacdir = os.path.join(build_root, '.build.packages') if os.path.islink(pacdir): pacdir = os.readlink(pacdir) pacdir = os.path.join(build_root, pacdir) if os.path.exists(pacdir): (s_built, b_built) = get_built_files(pacdir, bi.buildtype) print() if s_built: print(s_built) print() print(b_built) if opts.keep_pkgs: for i in b_built.splitlines() + s_built.splitlines(): shutil.copy2(i, os.path.join(opts.keep_pkgs, os.path.basename(i))) if bi_file: bi_file.close() if bc_file: bc_file.close() rpmlist_file.close()
def main(apiurl, opts, argv): repo = argv[0] arch = argv[1] build_descr = argv[2] xp = [] build_root = None cache_dir = None build_uid = '' vm_type = config['build-type'] vm_telnet = None build_descr = os.path.abspath(build_descr) build_type = os.path.splitext(build_descr)[1][1:] if os.path.basename(build_descr) == 'PKGBUILD': build_type = 'arch' if os.path.basename(build_descr) == 'build.collax': build_type = 'collax' if build_type not in ['spec', 'dsc', 'kiwi', 'arch', 'collax', 'livebuild']: raise oscerr.WrongArgs( 'Unknown build type: \'%s\'. Build description should end in .spec, .dsc, .kiwi or .livebuild.' \ % build_type) if not os.path.isfile(build_descr): raise oscerr.WrongArgs('Error: build description file named \'%s\' does not exist.' % build_descr) buildargs = [] if not opts.userootforbuild: buildargs.append('--norootforbuild') if opts.clean: buildargs.append('--clean') if opts.noinit: buildargs.append('--noinit') if opts.nochecks: buildargs.append('--no-checks') if not opts.no_changelog: buildargs.append('--changelog') if opts.root: build_root = opts.root if opts.target: buildargs.append('--target=%s' % opts.target) if opts.threads: buildargs.append('--threads=%s' % opts.threads) if opts.jobs: buildargs.append('--jobs=%s' % opts.jobs) elif config['build-jobs'] > 1: buildargs.append('--jobs=%s' % config['build-jobs']) if opts.icecream or config['icecream'] != '0': if opts.icecream: num = opts.icecream else: num = config['icecream'] if int(num) > 0: buildargs.append('--icecream=%s' % num) xp.append('icecream') xp.append('gcc-c++') if opts.ccache: buildargs.append('--ccache') xp.append('ccache') if opts.linksources: buildargs.append('--linksources') if opts.baselibs: buildargs.append('--baselibs') if opts.debuginfo: buildargs.append('--debug') if opts._with: for o in opts._with: buildargs.append('--with=%s' % o) if opts.without: for o in opts.without: buildargs.append('--without=%s' % o) if opts.define: for o in opts.define: buildargs.append('--define=%s' % o) if config['build-uid']: build_uid = config['build-uid'] if opts.build_uid: build_uid = opts.build_uid if build_uid: buildidre = re.compile('^[0-9]{1,5}:[0-9]{1,5}$') if build_uid == 'caller': buildargs.append('--uid=%s:%s' % (os.getuid(), os.getgid())) elif buildidre.match(build_uid): buildargs.append('--uid=%s' % build_uid) else: print('Error: build-uid arg must be 2 colon separated numerics: "uid:gid" or "caller"', file=sys.stderr) return 1 if opts.vm_type: vm_type = opts.vm_type if opts.vm_telnet: vm_telnet = opts.vm_telnet if opts.alternative_project: prj = opts.alternative_project pac = '_repository' else: prj = store_read_project(os.curdir) if opts.local_package: pac = '_repository' else: pac = store_read_package(os.curdir) if opts.shell: buildargs.append("--shell") orig_build_root = config['build-root'] # make it possible to override configuration of the rc file for var in ['OSC_PACKAGECACHEDIR', 'OSC_SU_WRAPPER', 'OSC_BUILD_ROOT']: val = os.getenv(var) if val: if var.startswith('OSC_'): var = var[4:] var = var.lower().replace('_', '-') if var in config: print('Overriding config value for %s=\'%s\' with \'%s\'' % (var, config[var], val)) config[var] = val pacname = pac if pacname == '_repository': if not opts.local_package: try: pacname = store_read_package(os.curdir) except oscerr.NoWorkingCopy: opts.local_package = True if opts.local_package: pacname = os.path.splitext(build_descr)[0] apihost = urlsplit(apiurl)[1] if not build_root: build_root = config['build-root'] if build_root == orig_build_root: # ENV var was not set build_root = config['api_host_options'][apiurl].get('build-root', build_root) try: build_root = build_root % {'repo': repo, 'arch': arch, 'project': prj, 'package': pacname, 'apihost': apihost} except: pass cache_dir = config['packagecachedir'] % {'apihost': apihost} extra_pkgs = [] if not opts.extra_pkgs: extra_pkgs = config['extra-pkgs'] elif opts.extra_pkgs != ['']: extra_pkgs = opts.extra_pkgs if xp: extra_pkgs += xp prefer_pkgs = {} build_descr_data = open(build_descr).read() # XXX: dirty hack but there's no api to provide custom defines if opts.without: s = '' for i in opts.without: s += "%%define _without_%s 1\n" % i build_descr_data = s + build_descr_data if opts._with: s = '' for i in opts._with: s += "%%define _with_%s 1\n" % i build_descr_data = s + build_descr_data if opts.define: s = '' for i in opts.define: s += "%%define %s\n" % i build_descr_data = s + build_descr_data cpiodata = None servicefile = os.path.join(os.path.dirname(build_descr), "_service") if not os.path.isfile(servicefile): servicefile = os.path.join(os.path.dirname(build_descr), "_service") if not os.path.isfile(servicefile): servicefile = None else: print('Using local _service file') buildenvfile = os.path.join(os.path.dirname(build_descr), "_buildenv." + repo + "." + arch) if not os.path.isfile(buildenvfile): buildenvfile = os.path.join(os.path.dirname(build_descr), "_buildenv") if not os.path.isfile(buildenvfile): buildenvfile = None else: print('Using local buildenv file: %s' % os.path.basename(buildenvfile)) if buildenvfile or servicefile: from .util import cpio if not cpiodata: cpiodata = cpio.CpioWrite() if opts.prefer_pkgs: print('Scanning the following dirs for local packages: %s' % ', '.join(opts.prefer_pkgs)) from .util import cpio if not cpiodata: cpiodata = cpio.CpioWrite() prefer_pkgs = get_prefer_pkgs(opts.prefer_pkgs, arch, build_type, cpiodata) if cpiodata: cpiodata.add(os.path.basename(build_descr), build_descr_data) # buildenv must come last for compatibility reasons... if buildenvfile: cpiodata.add("buildenv", open(buildenvfile).read()) if servicefile: cpiodata.add("_service", open(servicefile).read()) build_descr_data = cpiodata.get() # special handling for overlay and rsync-src/dest specialcmdopts = [] if opts.rsyncsrc or opts.rsyncdest : if not opts.rsyncsrc or not opts.rsyncdest: raise oscerr.WrongOptions('When using --rsync-{src,dest} both parameters have to be specified.') myrsyncsrc = os.path.abspath(os.path.expanduser(os.path.expandvars(opts.rsyncsrc))) if not os.path.isdir(myrsyncsrc): raise oscerr.WrongOptions('--rsync-src %s is no valid directory!' % opts.rsyncsrc) # can't check destination - its in the target chroot ;) - but we can check for sanity myrsyncdest = os.path.expandvars(opts.rsyncdest) if not os.path.isabs(myrsyncdest): raise oscerr.WrongOptions('--rsync-dest %s is no absolute path (starting with \'/\')!' % opts.rsyncdest) specialcmdopts = ['--rsync-src='+myrsyncsrc, '--rsync-dest='+myrsyncdest] if opts.overlay: myoverlay = os.path.abspath(os.path.expanduser(os.path.expandvars(opts.overlay))) if not os.path.isdir(myoverlay): raise oscerr.WrongOptions('--overlay %s is no valid directory!' % opts.overlay) specialcmdopts += ['--overlay='+myoverlay] bi_file = None bc_file = None bi_filename = '_buildinfo-%s-%s.xml' % (repo, arch) bc_filename = '_buildconfig-%s-%s' % (repo, arch) if is_package_dir('.') and os.access(osc.core.store, os.W_OK): bi_filename = os.path.join(os.getcwd(), osc.core.store, bi_filename) bc_filename = os.path.join(os.getcwd(), osc.core.store, bc_filename) elif not os.access('.', os.W_OK): bi_file = NamedTemporaryFile(prefix=bi_filename) bi_filename = bi_file.name bc_file = NamedTemporaryFile(prefix=bc_filename) bc_filename = bc_file.name else: bi_filename = os.path.abspath(bi_filename) bc_filename = os.path.abspath(bc_filename) try: if opts.noinit: if not os.path.isfile(bi_filename): raise oscerr.WrongOptions('--noinit is not possible, no local buildinfo file') print('Use local \'%s\' file as buildinfo' % bi_filename) if not os.path.isfile(bc_filename): raise oscerr.WrongOptions('--noinit is not possible, no local buildconfig file') print('Use local \'%s\' file as buildconfig' % bc_filename) elif opts.offline: if not os.path.isfile(bi_filename): raise oscerr.WrongOptions('--offline is not possible, no local buildinfo file') print('Use local \'%s\' file as buildinfo' % bi_filename) if not os.path.isfile(bc_filename): raise oscerr.WrongOptions('--offline is not possible, no local buildconfig file') else: print('Getting buildinfo from server and store to %s' % bi_filename) bi_text = ''.join(get_buildinfo(apiurl, prj, pac, repo, arch, specfile=build_descr_data, addlist=extra_pkgs)) if not bi_file: bi_file = open(bi_filename, 'w') # maybe we should check for errors before saving the file bi_file.write(bi_text) bi_file.flush() print('Getting buildconfig from server and store to %s' % bc_filename) bc = get_buildconfig(apiurl, prj, repo) if not bc_file: bc_file = open(bc_filename, 'w') bc_file.write(bc) bc_file.flush() except HTTPError as e: if e.code == 404: # check what caused the 404 if meta_exists(metatype='prj', path_args=(quote_plus(prj), ), template_args=None, create_new=False, apiurl=apiurl): pkg_meta_e = None try: # take care, not to run into double trouble. pkg_meta_e = meta_exists(metatype='pkg', path_args=(quote_plus(prj), quote_plus(pac)), template_args=None, create_new=False, apiurl=apiurl) except: pass if pkg_meta_e: print('ERROR: Either wrong repo/arch as parameter or a parse error of .spec/.dsc/.kiwi file due to syntax error', file=sys.stderr) else: print('The package \'%s\' does not exist - please ' \ 'rerun with \'--local-package\'' % pac, file=sys.stderr) else: print('The project \'%s\' does not exist - please ' \ 'rerun with \'--alternative-project <alternative_project>\'' % prj, file=sys.stderr) sys.exit(1) else: raise bi = Buildinfo(bi_filename, apiurl, build_type, list(prefer_pkgs.keys())) if bi.debuginfo and not (opts.disable_debuginfo or '--debug' in buildargs): buildargs.append('--debug') if opts.release: bi.release = opts.release if bi.release: buildargs.append('--release=%s' % bi.release) # real arch of this machine # vs. # arch we are supposed to build for if bi.hostarch != None: if hostarch != bi.hostarch and not bi.hostarch in can_also_build.get(hostarch, []): print('Error: hostarch \'%s\' is required.' % (bi.hostarch), file=sys.stderr) return 1 elif hostarch != bi.buildarch: if not bi.buildarch in can_also_build.get(hostarch, []): # OBSOLETE: qemu_can_build should not be needed anymore since OBS 2.3 if vm_type != "emulator" and not bi.buildarch in qemu_can_build: print('Error: hostarch \'%s\' cannot build \'%s\'.' % (hostarch, bi.buildarch), file=sys.stderr) return 1 print('WARNING: It is guessed to build on hostarch \'%s\' for \'%s\' via QEMU.' % (hostarch, bi.buildarch), file=sys.stderr) rpmlist_prefers = [] if prefer_pkgs: print('Evaluating preferred packages') for name, path in prefer_pkgs.items(): if bi.has_dep(name): # We remove a preferred package from the buildinfo, so that the # fetcher doesn't take care about them. # Instead, we put it in a list which is appended to the rpmlist later. # At the same time, this will make sure that these packages are # not verified. bi.remove_dep(name) rpmlist_prefers.append((name, path)) print(' - %s (%s)' % (name, path)) print('Updating cache of required packages') urllist = [] if not opts.download_api_only: # transform 'url1, url2, url3' form into a list if 'urllist' in config: if isinstance(config['urllist'], str): re_clist = re.compile('[, ]+') urllist = [ i.strip() for i in re_clist.split(config['urllist'].strip()) ] else: urllist = config['urllist'] # OBS 1.5 and before has no downloadurl defined in buildinfo if bi.downloadurl: urllist.append(bi.downloadurl + '/%(extproject)s/%(extrepository)s/%(arch)s/%(filename)s') if opts.disable_cpio_bulk_download: urllist.append( '%(apiurl)s/build/%(project)s/%(repository)s/%(repoarch)s/%(repopackage)s/%(repofilename)s' ) fetcher = Fetcher(cache_dir, urllist = urllist, api_host_options = config['api_host_options'], offline = opts.noinit or opts.offline, http_debug = config['http_debug'], enable_cpio = not opts.disable_cpio_bulk_download, cookiejar=cookiejar) if not opts.trust_all_projects: # implicitly trust the project we are building for check_trusted_projects(apiurl, [ i for i in bi.projects.keys() if not i == prj ]) imagefile = '' imagesource = '' imagebins = [] if (not config['no_preinstallimage'] and not opts.nopreinstallimage and bi.preinstallimage and not opts.noinit and not opts.offline and (opts.clean or (not os.path.exists(build_root + "/installed-pkg") and not os.path.exists(build_root + "/.build/init_buildsystem.data")))): (imagefile, imagesource, imagebins) = get_preinstall_image(apiurl, arch, cache_dir, bi.preinstallimage) if imagefile: # remove binaries from build deps which are included in preinstall image for i in bi.deps: if i.name in imagebins: bi.remove_dep(i.name) # now update the package cache fetcher.run(bi) old_pkg_dir = None if opts.oldpackages: old_pkg_dir = opts.oldpackages if not old_pkg_dir.startswith('/') and not opts.offline: data = [ prj, pacname, repo, arch] if old_pkg_dir == '_link': p = osc.core.findpacs(os.curdir)[0] if not p.islink(): raise oscerr.WrongOptions('package is not a link') data[0] = p.linkinfo.project data[1] = p.linkinfo.package repos = osc.core.get_repositories_of_project(apiurl, data[0]) # hack for links to e.g. Factory if not data[2] in repos and 'standard' in repos: data[2] = 'standard' elif old_pkg_dir != '' and old_pkg_dir != '_self': a = old_pkg_dir.split('/') for i in range(0, len(a)): data[i] = a[i] destdir = os.path.join(cache_dir, data[0], data[2], data[3]) old_pkg_dir = None try: print("Downloading previous build from %s ..." % '/'.join(data)) binaries = get_binarylist(apiurl, data[0], data[2], data[3], package=data[1], verbose=True) except Exception as e: print("Error: failed to get binaries: %s" % str(e)) binaries = [] if binaries: class mytmpdir: """ temporary directory that removes itself""" def __init__(self, *args, **kwargs): self.name = mkdtemp(*args, **kwargs) _rmtree = staticmethod(shutil.rmtree) def cleanup(self): self._rmtree(self.name) def __del__(self): self.cleanup() def __exit__(self): self.cleanup() def __str__(self): return self.name old_pkg_dir = mytmpdir(prefix='.build.oldpackages', dir=os.path.abspath(os.curdir)) if not os.path.exists(destdir): os.makedirs(destdir) for i in binaries: fname = os.path.join(destdir, i.name) os.symlink(fname, os.path.join(str(old_pkg_dir), i.name)) if os.path.exists(fname): st = os.stat(fname) if st.st_mtime == i.mtime and st.st_size == i.size: continue get_binary_file(apiurl, data[0], data[2], data[3], i.name, package = data[1], target_filename = fname, target_mtime = i.mtime, progress_meter = True) if old_pkg_dir != None: buildargs.append('--oldpackages=%s' % old_pkg_dir) # Make packages from buildinfo available as repos for kiwi if build_type == 'kiwi': if os.path.exists('repos'): shutil.rmtree('repos') os.mkdir('repos') for i in bi.deps: if not i.extproject: # remove bi.deps.remove(i) continue # project pdir = str(i.extproject).replace(':/', ':') # repo rdir = str(i.extrepository).replace(':/', ':') # arch adir = i.repoarch # project/repo prdir = "repos/"+pdir+"/"+rdir # project/repo/arch pradir = prdir+"/"+adir # source fullfilename sffn = i.fullfilename filename = sffn.split("/")[-1] # target fullfilename tffn = pradir+"/"+filename if not os.path.exists(os.path.join(pradir)): os.makedirs(os.path.join(pradir)) if not os.path.exists(tffn): print("Using package: "+sffn) if opts.linksources: os.link(sffn, tffn) else: os.symlink(sffn, tffn) if prefer_pkgs: for name, path in prefer_pkgs.items(): if name == filename: print("Using prefered package: " + path + "/" + filename) os.unlink(tffn) if opts.linksources: os.link(path + "/" + filename, tffn) else: os.symlink(path + "/" + filename, tffn) # Is a obsrepositories tag used? try: tree = ET.parse(build_descr) except: print('could not parse the kiwi file:', file=sys.stderr) print(open(build_descr).read(), file=sys.stderr) sys.exit(1) root = tree.getroot() # product for xml in root.findall('instsource'): if xml.find('instrepo').find('source').get('path') == 'obsrepositories:/': print("obsrepositories:/ for product builds is not yet supported in osc!") sys.exit(1) # appliance expand_obsrepos=None for xml in root.findall('repository'): if xml.find('source').get('path') == 'obsrepositories:/': expand_obsrepos=True if expand_obsrepos: buildargs.append('--kiwi-parameter') buildargs.append('--ignore-repos') for xml in root.findall('repository'): if xml.find('source').get('path') == 'obsrepositories:/': for path in bi.pathes: if not os.path.isdir("repos/"+path): continue buildargs.append('--kiwi-parameter') buildargs.append('--add-repo') buildargs.append('--kiwi-parameter') buildargs.append("repos/"+path) buildargs.append('--kiwi-parameter') buildargs.append('--add-repotype') buildargs.append('--kiwi-parameter') buildargs.append('rpm-md') if xml.get('priority'): buildargs.append('--kiwi-parameter') buildargs.append('--add-repoprio='+xml.get('priority')) else: m = re.match(r"obs://[^/]+/([^/]+)/(\S+)", xml.find('source').get('path')) if not m: # short path without obs instance name m = re.match(r"obs://([^/]+)/(.+)", xml.find('source').get('path')) project=m.group(1).replace(":", ":/") repo=m.group(2) buildargs.append('--kiwi-parameter') buildargs.append('--add-repo') buildargs.append('--kiwi-parameter') buildargs.append("repos/"+project+"/"+repo) buildargs.append('--kiwi-parameter') buildargs.append('--add-repotype') buildargs.append('--kiwi-parameter') buildargs.append('rpm-md') if xml.get('priority'): buildargs.append('--kiwi-parameter') buildargs.append('--add-repopriority='+xml.get('priority')) if vm_type == "xen" or vm_type == "kvm" or vm_type == "lxc": print('Skipping verification of package signatures due to secure VM build') elif bi.pacsuffix == 'rpm': if opts.no_verify: print('Skipping verification of package signatures') else: print('Verifying integrity of cached packages') verify_pacs(bi) elif bi.pacsuffix == 'deb': if opts.no_verify or opts.noinit: print('Skipping verification of package signatures') else: print('WARNING: deb packages get not verified, they can compromise your system !') else: print('WARNING: unknown packages get not verified, they can compromise your system !') for i in bi.deps: if i.hdrmd5: from .util import packagequery hdrmd5 = packagequery.PackageQuery.queryhdrmd5(i.fullfilename) if not hdrmd5: print("Error: cannot get hdrmd5 for %s" % i.fullfilename) sys.exit(1) if hdrmd5 != i.hdrmd5: print("Error: hdrmd5 mismatch for %s: %s != %s" % (i.fullfilename, hdrmd5, i.hdrmd5)) sys.exit(1) print('Writing build configuration') if build_type == 'kiwi': rpmlist = [ '%s %s\n' % (i.name, i.fullfilename) for i in bi.deps if not i.noinstall ] else: rpmlist = [ '%s %s\n' % (i.name, i.fullfilename) for i in bi.deps ] for i in imagebins: rpmlist.append('%s preinstallimage\n' % i) rpmlist += [ '%s %s\n' % (i[0], i[1]) for i in rpmlist_prefers ] if imagefile: rpmlist.append('preinstallimage: %s\n' % imagefile) if imagesource: rpmlist.append('preinstallimagesource: %s\n' % imagesource) rpmlist.append('preinstall: ' + ' '.join(bi.preinstall_list) + '\n') rpmlist.append('vminstall: ' + ' '.join(bi.vminstall_list) + '\n') rpmlist.append('runscripts: ' + ' '.join(bi.runscripts_list) + '\n') if build_type != 'kiwi' and bi.noinstall_list: rpmlist.append('noinstall: ' + ' '.join(bi.noinstall_list) + '\n') if build_type != 'kiwi' and bi.installonly_list: rpmlist.append('installonly: ' + ' '.join(bi.installonly_list) + '\n') rpmlist_file = NamedTemporaryFile(prefix='rpmlist.') rpmlist_filename = rpmlist_file.name rpmlist_file.writelines(rpmlist) rpmlist_file.flush() subst = { 'repo': repo, 'arch': arch, 'project' : prj, 'package' : pacname } vm_options = [] # XXX check if build-device present my_build_device = '' if config['build-device']: my_build_device = config['build-device'] % subst else: # obs worker uses /root here but that collides with the # /root directory if the build root was used without vm # before my_build_device = build_root + '/img' need_root = True if vm_type: if config['build-swap']: my_build_swap = config['build-swap'] % subst else: my_build_swap = build_root + '/swap' vm_options = [ '--vm-type=%s' % vm_type ] if vm_telnet: vm_options += [ '--vm-telnet=' + vm_telnet ] if config['build-memory']: vm_options += [ '--memory=' + config['build-memory'] ] if vm_type != 'lxc': vm_options += [ '--vm-disk=' + my_build_device ] vm_options += [ '--vm-swap=' + my_build_swap ] vm_options += [ '--logfile=%s/.build.log' % build_root ] if vm_type == 'kvm': if os.access(build_root, os.W_OK) and os.access('/dev/kvm', os.W_OK): # so let's hope there's also an fstab entry need_root = False if config['build-kernel']: vm_options += [ '--vm-kernel=' + config['build-kernel'] ] if config['build-initrd']: vm_options += [ '--vm-initrd=' + config['build-initrd'] ] build_root += '/.mount' if config['build-memory']: vm_options += [ '--memory=' + config['build-memory'] ] if config['build-vmdisk-rootsize']: vm_options += [ '--vmdisk-rootsize=' + config['build-vmdisk-rootsize'] ] if config['build-vmdisk-swapsize']: vm_options += [ '--vmdisk-swapsize=' + config['build-vmdisk-swapsize'] ] if config['build-vmdisk-filesystem']: vm_options += [ '--vmdisk-filesystem=' + config['build-vmdisk-filesystem'] ] if config['build-vm-user']: vm_options += [ '--vm-user='******'build-vm-user'] ] if opts.preload: print("Preload done for selected repo/arch.") sys.exit(0) print('Running build') cmd = [ config['build-cmd'], '--root='+build_root, '--rpmlist='+rpmlist_filename, '--dist='+bc_filename, '--arch='+bi.buildarch ] cmd += specialcmdopts + vm_options + buildargs cmd += [ build_descr ] if need_root: sucmd = config['su-wrapper'].split() if sucmd[0] == 'su': if sucmd[-1] == '-c': sucmd.pop() cmd = sucmd + ['-s', cmd[0], 'root', '--' ] + cmd[1:] else: cmd = sucmd + cmd # change personality, if needed if hostarch != bi.buildarch and bi.buildarch in change_personality: cmd = [ change_personality[bi.buildarch] ] + cmd try: rc = run_external(cmd[0], *cmd[1:]) if rc: print() print('The buildroot was:', build_root) sys.exit(rc) except KeyboardInterrupt as i: print("keyboard interrupt, killing build ...") cmd.append('--kill') run_external(cmd[0], *cmd[1:]) raise i pacdir = os.path.join(build_root, '.build.packages') if os.path.islink(pacdir): pacdir = os.readlink(pacdir) pacdir = os.path.join(build_root, pacdir) if os.path.exists(pacdir): (s_built, b_built) = get_built_files(pacdir, bi.buildtype) print() if s_built: print(s_built) print() print(b_built) if opts.keep_pkgs: for i in b_built.splitlines() + s_built.splitlines(): shutil.copy2(i, os.path.join(opts.keep_pkgs, os.path.basename(i))) if bi_file: bi_file.close() if bc_file: bc_file.close() rpmlist_file.close()
def main(apiurl, opts, argv): repo = argv[0] arch = argv[1] build_descr = argv[2] xp = [] build_root = None cache_dir = None build_uid = '' vm_type = config['build-type'] build_descr = os.path.abspath(build_descr) build_type = os.path.splitext(build_descr)[1][1:] if build_type not in ['spec', 'dsc', 'kiwi']: raise oscerr.WrongArgs( 'Unknown build type: \'%s\'. Build description should end in .spec, .dsc or .kiwi.' \ % build_type) if not os.path.isfile(build_descr): raise oscerr.WrongArgs( 'Error: build description file named \'%s\' does not exist.' % build_descr) buildargs = [] if not opts.userootforbuild: buildargs.append('--norootforbuild') if opts.clean: buildargs.append('--clean') if opts.noinit: buildargs.append('--noinit') if opts.nochecks: buildargs.append('--no-checks') if not opts.no_changelog: buildargs.append('--changelog') if opts.root: build_root = opts.root if opts.jobs: buildargs.append('--jobs=%s' % opts.jobs) elif config['build-jobs'] > 1: buildargs.append('--jobs=%s' % config['build-jobs']) if opts.icecream or config['icecream'] != '0': if opts.icecream: num = opts.icecream else: num = config['icecream'] if int(num) > 0: buildargs.append('--icecream=%s' % num) xp.append('icecream') xp.append('gcc-c++') if opts.ccache: buildargs.append('--ccache') xp.append('ccache') if opts.linksources: buildargs.append('--linksources') if opts.baselibs: buildargs.append('--baselibs') if opts.debuginfo: buildargs.append('--debug') if opts._with: for o in opts._with: buildargs.append('--with=%s' % o) if opts.without: for o in opts.without: buildargs.append('--without=%s' % o) if opts.define: for o in opts.define: buildargs.append('--define=%s' % o) if config['build-uid']: build_uid = config['build-uid'] if opts.build_uid: build_uid = opts.build_uid if build_uid: buildidre = re.compile('^[0-9]{1,5}:[0-9]{1,5}$') if build_uid == 'caller': buildargs.append('--uid=%s:%s' % (os.getuid(), os.getgid())) elif buildidre.match(build_uid): buildargs.append('--uid=%s' % build_uid) else: print >> sys.stderr, 'Error: build-uid arg must be 2 colon separated numerics: "uid:gid" or "caller"' return 1 if opts.vm_type: vm_type = opts.vm_type if opts.alternative_project: prj = opts.alternative_project pac = '_repository' else: prj = store_read_project(os.curdir) if opts.local_package: pac = '_repository' else: pac = store_read_package(os.curdir) if opts.shell: buildargs.append("--shell") # make it possible to override configuration of the rc file for var in ['OSC_PACKAGECACHEDIR', 'OSC_SU_WRAPPER', 'OSC_BUILD_ROOT']: val = os.getenv(var) if val: if var.startswith('OSC_'): var = var[4:] var = var.lower().replace('_', '-') if config.has_key(var): print 'Overriding config value for %s=\'%s\' with \'%s\'' % ( var, config[var], val) config[var] = val pacname = pac if pacname == '_repository': if not opts.local_package: try: pacname = store_read_package(os.curdir) except oscerr.NoWorkingCopy: opts.local_package = True if opts.local_package: pacname = os.path.splitext(build_descr)[0] apihost = urlparse.urlsplit(apiurl)[1] if not build_root: build_root = config['build-root'] % { 'repo': repo, 'arch': arch, 'project': prj, 'package': pacname, 'apihost': apihost } cache_dir = config['packagecachedir'] % {'apihost': apihost} extra_pkgs = [] if not opts.extra_pkgs: extra_pkgs = config['extra-pkgs'] elif opts.extra_pkgs != ['']: extra_pkgs = opts.extra_pkgs if xp: extra_pkgs += xp prefer_pkgs = {} build_descr_data = open(build_descr).read() # XXX: dirty hack but there's no api to provide custom defines if opts.without: s = '' for i in opts.without: s += "%%define _without_%s 1\n" % i s += "%%define _with_%s 0\n" % i build_descr_data = s + build_descr_data if opts._with: s = '' for i in opts._with: s += "%%define _without_%s 0\n" % i s += "%%define _with_%s 1\n" % i build_descr_data = s + build_descr_data if opts.define: s = '' for i in opts.define: s += "%%define %s\n" % i build_descr_data = s + build_descr_data if opts.prefer_pkgs: print 'Scanning the following dirs for local packages: %s' % ', '.join( opts.prefer_pkgs) prefer_pkgs, cpio = get_prefer_pkgs(opts.prefer_pkgs, arch, build_type) cpio.add(os.path.basename(build_descr), build_descr_data) build_descr_data = cpio.get() # special handling for overlay and rsync-src/dest specialcmdopts = [] if opts.rsyncsrc or opts.rsyncdest: if not opts.rsyncsrc or not opts.rsyncdest: raise oscerr.WrongOptions( 'When using --rsync-{src,dest} both parameters have to be specified.' ) myrsyncsrc = os.path.abspath( os.path.expanduser(os.path.expandvars(opts.rsyncsrc))) if not os.path.isdir(myrsyncsrc): raise oscerr.WrongOptions('--rsync-src %s is no valid directory!' % opts.rsyncsrc) # can't check destination - its in the target chroot ;) - but we can check for sanity myrsyncdest = os.path.expandvars(opts.rsyncdest) if not os.path.isabs(myrsyncdest): raise oscerr.WrongOptions( '--rsync-dest %s is no absolute path (starting with \'/\')!' % opts.rsyncdest) specialcmdopts = [ '--rsync-src=' + myrsyncsrc, '--rsync-dest=' + myrsyncdest ] if opts.overlay: myoverlay = os.path.abspath( os.path.expanduser(os.path.expandvars(opts.overlay))) if not os.path.isdir(myoverlay): raise oscerr.WrongOptions('--overlay %s is no valid directory!' % opts.overlay) specialcmdopts += ['--overlay=' + myoverlay] bi_file = None bc_file = None bi_filename = '_buildinfo-%s-%s.xml' % (repo, arch) bc_filename = '_buildconfig-%s-%s' % (repo, arch) if is_package_dir('.') and os.access(osc.core.store, os.W_OK): bi_filename = os.path.join(os.getcwd(), osc.core.store, bi_filename) bc_filename = os.path.join(os.getcwd(), osc.core.store, bc_filename) elif not os.access('.', os.W_OK): bi_file = NamedTemporaryFile(prefix=bi_filename) bi_filename = bi_file.name bc_file = NamedTemporaryFile(prefix=bc_filename) bc_filename = bc_file.name else: bi_filename = os.path.abspath(bi_filename) bc_filename = os.path.abspath(bc_filename) try: if opts.noinit: if not os.path.isfile(bi_filename): raise oscerr.WrongOptions( '--noinit is not possible, no local buildinfo file') print 'Use local \'%s\' file as buildinfo' % bi_filename if not os.path.isfile(bc_filename): raise oscerr.WrongOptions( '--noinit is not possible, no local buildconfig file') print 'Use local \'%s\' file as buildconfig' % bc_filename elif opts.offline: if not os.path.isfile(bi_filename): raise oscerr.WrongOptions( '--offline is not possible, no local buildinfo file') print 'Use local \'%s\' file as buildinfo' % bi_filename if not os.path.isfile(bc_filename): raise oscerr.WrongOptions( '--offline is not possible, no local buildconfig file') else: print 'Getting buildinfo from server and store to %s' % bi_filename bi_text = ''.join( get_buildinfo(apiurl, prj, pac, repo, arch, specfile=build_descr_data, addlist=extra_pkgs)) if not bi_file: bi_file = open(bi_filename, 'w') # maybe we should check for errors before saving the file bi_file.write(bi_text) bi_file.flush() print 'Getting buildconfig from server and store to %s' % bc_filename bc = get_buildconfig(apiurl, prj, repo) if not bc_file: bc_file = open(bc_filename, 'w') bc_file.write(bc) bc_file.flush() except urllib2.HTTPError, e: if e.code == 404: # check what caused the 404 if meta_exists(metatype='prj', path_args=(quote_plus(prj), ), template_args=None, create_new=False, apiurl=apiurl): pkg_meta_e = None try: # take care, not to run into double trouble. pkg_meta_e = meta_exists(metatype='pkg', path_args=(quote_plus(prj), quote_plus(pac)), template_args=None, create_new=False, apiurl=apiurl) except: pass if pkg_meta_e: print >> sys.stderr, 'ERROR: Either wrong repo/arch as parameter or a parse error of .spec/.dsc/.kiwi file due to syntax error' else: print >>sys.stderr, 'The package \'%s\' does not exists - please ' \ 'rerun with \'--local-package\'' % pac else: print >>sys.stderr, 'The project \'%s\' does not exists - please ' \ 'rerun with \'--alternative-project <alternative_project>\'' % prj sys.exit(1) else: raise
def do_staging(self, subcmd, opts, *args): """${cmd_name}: Commands to work with staging projects ${cmd_option_list} "accept" will accept all requests in $PROJECT:Staging:<LETTER> into $PROJECT If openSUSE:* project, requests marked ready from adi stagings will also be accepted. "acheck" will check if it is safe to accept new staging projects As $PROJECT is syncing the right package versions between /standard, /totest and /snapshot, it is important that the projects are clean prior to a checkin round. "adi" will list already staged requests, stage new requests, and supersede requests where applicable. New adi stagings will be created for new packages based on the grouping options used. The default grouping is by source project. When adi stagings are ready the request will be marked ready, unstaged, and the adi staging deleted. "check" will check if all packages are links without changes "check_duplicate_binaries" list binaries provided by multiple packages "config" will modify or view staging specific configuration Target project OSRT:Config attribute configuration applies to all stagings. Both configuration locations follow the .oscrc format (space separated list). config Print all staging configuration. config key Print the value of key for stagings. conf key value... Set the value of key for stagings. config --clear Clear all staging configuration. config --clear key Clear (unset) a single key from staging configuration config --append key value... Append value to existing value or set if no existing value. All of the above may be restricted to a set of stagings. The staging configuration is automatically cleared anytime staging psuedometa is cleared (accept, or unstage all requests). The keys that may be set in staging configuration are: - repo_checker-binary-whitelist[-arch]: appended to target project list - todo: text to be printed after staging is accepted "cleanup_rings" will try to cleanup rings content and print out problems "freeze" will freeze the sources of the project's links while not affecting the source packages "frozenage" will show when the respective staging project was last frozen "ignore" will ignore a request from "list" and "adi" commands until unignored "unignore" will remove from requests from ignore list If the --cleanup flag is included then all ignored requests that were changed from state new or review more than 3 days ago will be removed. "list" will list/supersede requests for ring packages or all if no rings. "lock" acquire a hold on the project in order to execute multiple commands and prevent others from interrupting. An example: lock -m "checkin round" list --supersede adi accept A B C D E unlock Each command will update the lock to keep it up-to-date. "repair" will attempt to repair the state of a request that has been corrupted. Use the --cleanup flag to include all untracked requests. "select" will add requests to the project Stagings are expected to be either in short-hand or the full project name. For example letter or named stagings can be specified simply as A, B, Gcc6, etc, while adi stagings can be specified as adi:1, adi:2, etc. Currently, adi stagings are not supported in proposal mode. Requests may either be the target package or the request ID. When using --filter-by or --group-by the xpath will be applied to the request node as returned by OBS. Use the following on a current request to see the XML structure. osc api /request/1337 A number of additional values will supplement the normal request node. - ./action/target/@devel_project: the devel project for the package - ./action/target/@devel_project_super: super devel project if relevant - ./action/target/@ring: the ring to which the package belongs - ./@aged: either True or False based on splitter-request-age-threshold - ./@nonfree: set to nonfree if targetting nonfree sub project - ./@ignored: either False or the provided message Some useful examples: --filter-by './action/target[starts-with(@package, "yast-")]' --filter-by './action/target/[@devel_project="YaST:Head"]' --filter-by './action/target[starts-with(@ring, "1")]' --filter-by '@id!="1234567"' --filter-by 'contains(description, "#Portus")' --group-by='./action/target/@devel_project' --group-by='./action/target/@ring' Multiple filter-by or group-by options may be used at the same time. Note that when using proposal mode, multiple stagings to consider may be provided in addition to a list of requests by which to filter. A more complex example: select --group-by='./action/target/@devel_project' A B C 123 456 789 This will separate the requests 123, 456, 789 by devel project and only consider stagings A, B, or C, if available, for placement. No arguments is also a valid choice and will propose all non-ignored requests into the first available staging. Note that bootstrapped stagings are only used when either required or no other stagings are available. Another useful example is placing all open requests into a specific letter staging with: select A Built in strategies may be specified as well. For example: select --strategy devel select --strategy quick select --strategy special select --strategy super The default is none and custom is used with any filter-by or group-by arguments are provided. To merge applicable requests into an existing staging. select --merge A To automatically try all available strategies. select --try-strategies These concepts can be combined and interactive mode allows the proposal to be modified before it is executed. Moving requests can be accomplished using the --move flag. For example, to move already staged pac1 and pac2 to staging B use the following. select --move B pac1 pac2 The staging in which the requests are staged will automatically be determined and the requests will be removed from that staging and placed in the specified staging. Related to this, the --filter-from option may be used in conjunction with --move to only move requests already staged in a specific staging. This can be useful if a staging master is responsible for a specific set of packages and wants to move them into a different staging when they were already placed in a mixed staging. For example, if one had a file with a list of packages the following would move any of them found in staging A to staging B. select --move --filter-from A B $(< package.list) "unselect" will remove from the project - pushing them back to the backlog If a message is included the requests will be ignored first. Use the --cleanup flag to include all obsolete requests. "unlock" will remove the staging lock in case it gets stuck or a manual hold If a command lock gets stuck while a hold is placed on a project the unlock command will need to be run twice since there are two layers of locks. "rebuild" will rebuild broken packages in the given stagings or all The rebuild command will only trigger builds for packages with less than 3 failures since the last success or if the build log indicates a stall. If the force option is included the rebuild checks will be ignored and all packages failing to build will be triggered. "setprio" will set priority of requests withing stagings If no stagings are specified all stagings will be used. The default priority is important, but the possible values are: "critical", "important", "moderate" or "low". "supersede" will supersede requests were applicable. A request list can be used to limit what is superseded. Usage: osc staging accept [--force] [--no-cleanup] [LETTER...] osc staging acheck osc staging adi [--move] [--by-develproject] [--split] [REQUEST...] osc staging check [--old] [STAGING...] osc staging check_duplicate_binaries osc staging config [--append] [--clear] [STAGING...] [key] [value] osc staging cleanup_rings osc staging freeze [--no-bootstrap] STAGING... osc staging frozenage [STAGING...] osc staging ignore [-m MESSAGE] REQUEST... osc staging unignore [--cleanup] [REQUEST...|all] osc staging list [--supersede] osc staging lock [-m MESSAGE] osc staging select [--no-freeze] [--move [--filter-from STAGING]] [--add PACKAGE] STAGING REQUEST... osc staging select [--no-freeze] [--interactive|--non-interactive] [--filter-by...] [--group-by...] [--merge] [--try-strategies] [--strategy] [STAGING...] [REQUEST...] osc staging unselect [--cleanup] [-m MESSAGE] [REQUEST...] osc staging unlock osc staging rebuild [--force] [STAGING...] osc staging repair [--cleanup] [REQUEST...] osc staging setprio [STAGING...] [priority] osc staging supersede [REQUEST...] """ if opts.version: self._print_version() # verify the argument counts match the commands if len(args) == 0: raise oscerr.WrongArgs('No command given, see "osc help staging"!') cmd = args[0] if cmd in ( 'accept', 'adi', 'check', 'config', 'frozenage', 'unignore', 'select', 'unselect', 'rebuild', 'repair', 'setprio', 'supersede', ): min_args, max_args = 0, None elif cmd in ( 'freeze', 'ignore', ): min_args, max_args = 1, None elif cmd in ( 'acheck', 'check_duplicate_binaries', 'cleanup_rings', 'list', 'lock', 'unlock', ): min_args, max_args = 0, 0 else: raise oscerr.WrongArgs('Unknown command: %s' % cmd) args = clean_args(args) if len(args) - 1 < min_args: raise oscerr.WrongArgs('Too few arguments.') if max_args is not None and len(args) - 1 > max_args: raise oscerr.WrongArgs('Too many arguments.') # Allow for determining project from osc store. if not opts.project: if core.is_project_dir('.'): opts.project = core.store_read_project('.') else: opts.project = 'Factory' # Cache the remote config fetch. Cache.init() # Init the OBS access and configuration opts.project = self._full_project_name(opts.project) opts.apiurl = self.get_api_url() opts.verbose = False Config(opts.apiurl, opts.project) colorama.init(autoreset=True, strip=(opts.no_color or not bool(int(conf.config.get('staging.color', True))))) # Allow colors to be changed. for name in dir(Fore): if not name.startswith('_'): # .oscrc requires keys to be lower-case. value = conf.config.get('staging.color.' + name.lower()) if value: setattr(Fore, name, ansi.code_to_chars(value)) if opts.wipe_cache: Cache.delete_all() api = StagingAPI(opts.apiurl, opts.project) needed = lock_needed(cmd, opts) with OBSLock(opts.apiurl, opts.project, reason=cmd, needed=needed) as lock: # call the respective command and parse args by need if cmd == 'check': if len(args) == 1: CheckCommand(api).perform(None, opts.old) else: for prj in args[1:]: CheckCommand(api).perform(prj, opts.old) print() elif cmd == 'check_duplicate_binaries': CheckDuplicateBinariesCommand(api).perform(opts.save) elif cmd == 'config': projects = set() key = value = None stagings = api.get_staging_projects_short(None) + \ api.get_staging_projects() for arg in args[1:]: if arg in stagings: projects.add(api.prj_from_short(arg)) elif key is None: key = arg elif value is None: value = arg else: value += ' ' + arg if not len(projects): projects = api.get_staging_projects() ConfigCommand(api).perform(projects, key, value, opts.append, opts.clear) elif cmd == 'freeze': for prj in args[1:]: prj = api.prj_from_short(prj) print(Fore.YELLOW + prj) FreezeCommand(api).perform(prj, copy_bootstrap=opts.bootstrap) elif cmd == 'frozenage': projects = api.get_staging_projects_short() if len(args) == 1 else args[1:] for prj in projects: prj = api.prj_from_letter(prj) print('{} last frozen {}{:.1f} days ago'.format( Fore.YELLOW + prj + Fore.RESET, Fore.GREEN if api.prj_frozen_enough(prj) else Fore.RED, api.days_since_last_freeze(prj))) elif cmd == 'acheck': # Is it safe to accept? Meaning: /totest contains what it should and is not dirty version_totest = api.get_binary_version(api.project, "openSUSE-release.rpm", repository="totest", arch="x86_64") if version_totest: version_openqa = api.pseudometa_file_load('version_totest') totest_dirty = api.is_repo_dirty(api.project, 'totest') print("version_openqa: %s / version_totest: %s / totest_dirty: %s\n" % (version_openqa, version_totest, totest_dirty)) else: print("acheck is unavailable in %s!\n" % (api.project)) elif cmd == 'accept': # Is it safe to accept? Meaning: /totest contains what it should and is not dirty version_totest = api.get_binary_version(api.project, "openSUSE-release.rpm", repository="totest", arch="x86_64") if version_totest is None or opts.force: # SLE does not have a totest_version or openqa_version - ignore it version_openqa = version_totest totest_dirty = False else: version_openqa = api.pseudometa_file_load('version_totest') totest_dirty = api.is_repo_dirty(api.project, 'totest') if version_openqa == version_totest and not totest_dirty: cmd = AcceptCommand(api) for prj in args[1:]: if cmd.perform(api.prj_from_letter(prj), opts.force): cmd.reset_rebuild_data(prj) else: return if not opts.no_cleanup: if api.item_exists(api.prj_from_letter(prj)): cmd.cleanup(api.prj_from_letter(prj)) cmd.accept_other_new() if opts.project.startswith('openSUSE:'): cmd.update_factory_version() if api.item_exists(api.crebuild): cmd.sync_buildfailures() else: print("Not safe to accept: /totest is not yet synced") elif cmd == 'unselect': if opts.message: print('Ignoring requests first') IgnoreCommand(api).perform(args[1:], opts.message) UnselectCommand(api).perform(args[1:], opts.cleanup) elif cmd == 'select': # Include list of all stagings in short-hand and by full name. existing_stagings = api.get_staging_projects_short(None) existing_stagings += api.get_staging_projects() stagings = [] requests = [] for arg in args[1:]: # Since requests may be given by either request ID or package # name and stagings may include multi-letter special stagings # there is no easy way to distinguish between stagings and # requests in arguments. Therefore, check if argument is in the # list of short-hand and full project name stagings, otherwise # consider it a request. This also allows for special stagings # with the same name as package, but the staging will be assumed # first time around. The current practice seems to be to start a # special staging with a capital letter which makes them unique. # lastly adi stagings are consistently prefix with adi: which # also makes it consistent to distinguish them from request IDs. if arg in existing_stagings and arg not in stagings: stagings.append(api.extract_staging_short(arg)) elif arg not in requests: requests.append(arg) if len(stagings) != 1 or len(requests) == 0 or opts.filter_by or opts.group_by: if opts.move or opts.filter_from: print('--move and --filter-from must be used with explicit staging and request list') return open_requests = api.get_open_requests({'withhistory': 1}, include_nonfree=False) if len(open_requests) == 0: print('No open requests to consider') return splitter = RequestSplitter(api, open_requests, in_ring=True) considerable = splitter.stagings_load(stagings) if considerable == 0: print('No considerable stagings on which to act') return if opts.merge: splitter.merge() if opts.try_strategies: splitter.strategies_try() if len(requests) > 0: splitter.strategy_do('requests', requests=requests) if opts.strategy: splitter.strategy_do(opts.strategy) elif opts.filter_by or opts.group_by: kwargs = {} if opts.filter_by: kwargs['filters'] = opts.filter_by if opts.group_by: kwargs['groups'] = opts.group_by splitter.strategy_do('custom', **kwargs) else: if opts.merge: # Merge any none strategies before final none strategy. splitter.merge(strategy_none=True) splitter.strategy_do('none') splitter.strategy_do_non_bootstrapped('none') proposal = splitter.proposal if len(proposal) == 0: print('Empty proposal') return if opts.interactive: with tempfile.NamedTemporaryFile(suffix='.yml') as temp: temp.write(yaml.safe_dump(splitter.proposal, default_flow_style=False) + '\n\n') if len(splitter.requests): temp.write('# remaining requests:\n') for request in splitter.requests: temp.write('# {}: {}\n'.format( request.get('id'), request.find('action/target').get('package'))) temp.write('\n') temp.write('# move requests between stagings or comment/remove them\n') temp.write('# change the target staging for a group\n') temp.write('# remove the group, requests, staging, or strategy to skip\n') temp.write('# stagings\n') if opts.merge: temp.write('# - mergeable: {}\n' .format(', '.join(sorted(splitter.stagings_mergeable + splitter.stagings_mergeable_none)))) temp.write('# - considered: {}\n' .format(', '.join(sorted(splitter.stagings_considerable)))) temp.write('# - remaining: {}\n' .format(', '.join(sorted(splitter.stagings_available)))) temp.flush() editor = os.getenv('EDITOR') if not editor: editor = 'xdg-open' return_code = subprocess.call(editor.split(' ') + [temp.name]) proposal = yaml.safe_load(open(temp.name).read()) # Filter invalidated groups from proposal. keys = ['group', 'requests', 'staging', 'strategy'] for group, info in sorted(proposal.items()): for key in keys: if not info.get(key): del proposal[group] break print(yaml.safe_dump(proposal, default_flow_style=False)) print('Accept proposal? [y/n] (y): ', end='') if opts.non_interactive: print('y') else: response = input().lower() if response != '' and response != 'y': print('Quit') return for group, info in sorted(proposal.items()): print('Staging {} in {}'.format(group, info['staging'])) # SelectCommand expects strings. request_ids = map(str, info['requests'].keys()) target_project = api.prj_from_short(info['staging']) if 'merge' not in info: # Assume that the original splitter_info is desireable # and that this staging is simply manual followup. api.set_splitter_info_in_prj_pseudometa(target_project, info['group'], info['strategy']) SelectCommand(api, target_project) \ .perform(request_ids, no_freeze=opts.no_freeze) else: target_project = api.prj_from_short(stagings[0]) if opts.add: api.mark_additional_packages(target_project, [opts.add]) else: filter_from = api.prj_from_short(opts.filter_from) if opts.filter_from else None SelectCommand(api, target_project) \ .perform(requests, opts.move, filter_from, opts.no_freeze) elif cmd == 'cleanup_rings': CleanupRings(api).perform() elif cmd == 'ignore': IgnoreCommand(api).perform(args[1:], opts.message) elif cmd == 'unignore': UnignoreCommand(api).perform(args[1:], opts.cleanup) elif cmd == 'list': ListCommand(api).perform(supersede=opts.supersede) elif cmd == 'lock': lock.hold(opts.message) elif cmd == 'adi': AdiCommand(api).perform(args[1:], move=opts.move, by_dp=opts.by_develproject, split=opts.split) elif cmd == 'rebuild': RebuildCommand(api).perform(args[1:], opts.force) elif cmd == 'repair': RepairCommand(api).perform(args[1:], opts.cleanup) elif cmd == 'setprio': stagings = [] priority = None priorities = ['critical', 'important', 'moderate', 'low'] for arg in args[1:]: if arg in priorities: priority = arg else: stagings.append(arg) PrioCommand(api).perform(stagings, priority) elif cmd == 'supersede': SupersedeCommand(api).perform(args[1:]) elif cmd == 'unlock': lock.release(force=True)