def test_fails_krb_login_wo_ticket(shared_datadir): """ GIVEN we have a profile that has an authtype of 'kerberos' WHEN we try to log in with a koji client that is not kinit'ed THEN we should get back an AuthError and not be logged in. """ tw = KojiWrapperBase(profile='mykoji', user_config=shared_datadir / 'mykoji.conf') logged_in = tw.login() assert tw.profile == 'mykoji' assert logged_in is False
def test_fails_login_w_bad_ssl(shared_datadir): """ GIVEN we have a profile that connects via ssl WHEN we try to log in with a koji client that does not have the correct credentials THEN we should get back an AuthError and not be logged in. """ tw = KojiWrapperBase(profile='ssl_koji', user_config=shared_datadir / 'ssl_koji.conf') logged_in = tw.login() assert tw.profile == 'ssl_koji' assert logged_in is False
def test_logs_in_w_ssl(shared_datadir): """ GIVEN we have a profile that connects via ssl WHEN we try to log in with a koji client that has the correct credentials THEN we should get back True and successfully log in. """ tw = KojiWrapperBase(profile='ssl_koji', user_config=shared_datadir / 'ssl_koji.conf') tw.session.ssl_login = MagicMock(return_value=True) logged_in = tw.login() assert tw.profile == 'ssl_koji' assert logged_in is True assert tw.session.ssl_login.called
def test_logs_in_w_kerberos(shared_datadir): """ GIVEN we have a profile that has an authtype of 'kerberos' WHEN we try to log in with a koji client that is kinit'ed THEN we should get back True and successfully log in. """ tw = KojiWrapperBase(profile='mykoji', user_config=shared_datadir / 'mykoji.conf') tw.session.krb_login = MagicMock(return_value=True) logged_in = tw.login() assert tw.profile == 'mykoji' assert logged_in is True assert tw.session.krb_login.called
def tag_over(src_tag, compare_tag, dest_tag, missing, display): delta_info = delta(src_tag, compare_tag) # This could be done as a list generation, but it becomes less readable builds = [] for comp in delta_info['downgrades']: builds.append(drop_epoch(delta_info['downgrades'][comp]['old'])) if missing: for comp in delta_info['removed']: builds.append(drop_epoch(delta_info['removed'][comp])) # if we're tagging somewhere else, avoid dup tagging # Search all builds, not just latest if dest_tag != compare_tag: brew = KojiWrapperBase(profile='brew') t = KojiTag(session=brew, tag=dest_tag) nvrs = [b['nvr'] for b in t.builds()] b = set(builds) & set(nvrs) builds = list(set(builds) - b) # Simply print what we'd tag if display: for b in builds: print(b) return 0 tag_over = ['brew', 'tag-pkg', dest_tag] chunked_builds = chunky(builds, 10) for b in chunked_builds: cmd = copy.copy(tag_over) cmd.extend(b) subprocess.call(cmd) return 0
def test_config_throws_error_on_no_profile(): """ GIVEN we do NOT have the profile defined in the standard location koji looks WHEN we try to create a koji.ClientSession object THEN we should get back a koji.ConfigurationError """ with pytest.raises(koji.ConfigurationError): KojiWrapperBase(profile='not_a_profile')
def test_config_throws_error_on_no_file(): """ GIVEN we do NOT have the expected file defined in the specified location WHEN we try to create a koji.ClientSession object THEN we should get back a koji.ConfigurationError """ with pytest.raises(koji.ConfigurationError): KojiWrapperBase(profile='mykoji', user_config='/tmp/not_a_file.conf')
def test_parses_config(shared_datadir): """ GIVEN we have a profile defined in a user-specified directory WHEN we create a koji.ClientSession object THEN we should successfully create a Client using this profile. """ tw = KojiWrapperBase(profile='mykoji', user_config=shared_datadir / 'mykoji.conf') assert tw.profile == 'mykoji' assert isinstance(tw.session, koji.ClientSession)
def release_set_as_nevr(release_name_or_id, koji_session, **kwargs): global __koji_session if 'session' in kwargs: __koji_session = kwargs['session'] if __koji_session is None: __koji_session = KojiWrapperBase(profile='brew') if __koji_session is None: raise Exception('Could not connect to koji') return get_build_for_release(release_name_or_id.strip('et:'), __koji_session)
def tag_to_latest_builds(tag, **kwargs): global __koji_session inherit = False if 'session' in kwargs: __koji_session = kwargs['session'] if 'inherit' in kwargs: inherit = kwargs['inherit'] if isinstance(tag, str): if __koji_session is None: __koji_session = KojiWrapperBase(profile='brew') if __koji_session is None: raise Exception('Could not connect to koji') koji_tag = KojiTag(tag, session=__koji_session) elif isinstance(tag, koji_wrapper.tag.KojiTag): koji_tag = tag else: raise ValueError('Expected KojiTag or str') if koji_tag.tagged_list is None: koji_tag.builds(inherit=inherit) return latest_tagged_as_nevr(koji_tag)
def tag_cleaner(args): print('attempting to do tag cleanup on {0}'.format(args.brew_tag)) exclude_components = set(args.exclude.split(',')) base_tag = None for sub_tag in [ '-trunk-candidate', '-trunk-override', '-candidate', '-pending', '-override' ]: if sub_tag in args.brew_tag: base_tag = args.brew_tag[0:-len(sub_tag)] break if base_tag is None: raise Exception("brew tag must be either -candidate, -trunk-candidate," " -trunk-override, -pending, or -override otherwise " "I don't know what to do") candidate_tag = args.brew_tag container_tag = base_tag + '-container-released' bw = KojiWrapperBase(profile='brew') if args.latest: released_builds = KojiTag(session=bw, tag=candidate_tag) else: released_builds = KojiTag(session=bw, tag=base_tag) staged_builds = KojiTag(session=bw, tag=candidate_tag) released_builds.builds(inherit=False, latest=args.latest) staged_builds.builds(inherit=False, latest=False) released_containers = None dc = [] try: if not args.latest and container_tag: released_containers = KojiTag(session=bw, tag=container_tag) released_containers.builds(inherit=False) dc = released_containers.builds_by_attribute('name') except koji.GenericError: released_containers = None dc = [] lc = released_builds.builds_by_attribute('name') rc = staged_builds.builds_by_attribute('name') common = sorted(list((set(lc) & set(rc)) - exclude_components)) # NOQA new_build_components = sorted( list((set(rc) - set(lc) - set(dc)) - exclude_components)) # NOQA tagged_only_components = sorted( list((set(lc) - set(rc)) - exclude_components)) # NOQA tagged_only_containers = sorted( list((set(dc) - set(rc)) - exclude_components)) # NOQA common_containers = sorted(list((set(dc) & set(rc)) - exclude_components)) # NOQA if args.debug: print({ 'tagged_only': tagged_only_components, 'new_builds': new_build_components, 'common': common, 'common_containers': common_containers, 'tagged_only_containers': tagged_only_containers }) builds_to_untag = [] for c in common: rel_build = latest_package(released_builds, c) can_builds = builds_package(staged_builds, c) if args.debug: print('released', rel_build) print('-candidate', can_builds) if ((args.clean_all) and (rel_build in can_builds) and (rel_build == latest_package(staged_builds, c))): if args.debug: print("Latest build {0} is released".format(rel_build) + ", untagging them all") for build in can_builds: builds_to_untag.append(build) continue for build in can_builds: if build == rel_build: if not args.latest: if args.debug: print("Latest build {0} is released".format(rel_build)) builds_to_untag.append(build) else: if args.debug: print("Preserving latest build {0}".format(rel_build)) continue else: (ln, lv, lr, le, la) = splitFilename(rel_build) (rn, rv, rr, re, ra) = splitFilename(build) v = labelCompare((le, lv, lr), (re, rv, rr)) if v > 0: if args.debug: print("released is newer, untagging {0}".format(build)) builds_to_untag.append(build) elif v < 0: if args.debug: print("Skipping {0} newer than released".format(build)) else: if args.debug: print("released equal, untagging {0}".format(build)) if not args.latest: builds_to_untag.append(build) for c in common_containers: if args.debug: print('Looking at container {0}'.format(c)) rel_build = latest_package(released_containers, c) can_builds = builds_package(staged_builds, c) if args.debug: print('released', rel_build) print('-candidate', can_builds) if ((args.clean_all) and (rel_build in can_builds) and (rel_build == latest_package(staged_builds, c))): if args.debug: print("Latest build {0} is released".format(rel_build) + ", untagging them all") for build in can_builds: builds_to_untag.append(build) continue for build in can_builds: if build == rel_build: if args.debug: print("Latest build {0} is released".format(rel_build)) builds_to_untag.append(build) continue else: (ln, lv, lr, le, la) = splitFilename(rel_build) (rn, rv, rr, re, ra) = splitFilename(build) v = labelCompare((le, lv, lr), (re, rv, rr)) if v > 0: if args.debug: print("released is newer, untagging {0}".format(build)) builds_to_untag.append(build) elif v < 0: if args.debug: print("Skipping {0} newer than released".format(build)) else: if args.debug: print("released equal, untagging {0}".format(build)) builds_to_untag.append(build) # NOTE: jschlueter 2016-12-16 # for RH-OSP we have several image builds that use # brew image-build-indirection to generate the released images # rhosp-director-images ==> director-utility,director-input,minimal-input # director-utility and *-input are used to generate overcloud-full # and ironic-python-agent images. Those images are then embedded # in rhos-director-images. # This means we never release these auxiliary images and they # need to be cleaned out of the -candidate tag but we would like to leave # tagged in -candidate any auxiliary images equal or newer to the last # released overcloud-full package. # # Beginning with OSP 16.1, the multiarch name is included in the # image build name, e.g. overcloud-full-(x86_64|ppc64le) corresponds # respectively to director-input-(x86_64|ppc64le), # director-utility-(x86_64|ppc64le), minimal-input-(x86_64|ppc64le) # # add logic to handle auxiliary image builds # we want to keep corresponding -input and -utility images # matching build is: # overcloud-full-(ARCH-)<VR> # == director-input-(ARCH-)<VR> # == director-utility-(ARCH-)<VR> # == minimal-input-(ARCH-)<VR> (16.1+ only) latest_images = None # By default use the pre-16.1 naming convention oc_build_name = 'overcloud-full' aux_build_names = ['director-input', 'director-utility'] # If the multiarch overcloud-full-x86_64 build is present, use # multiarch-named builds if ('overcloud-full-x86_64' in lc): oc_build_name = 'overcloud-full-x86_64' aux_build_names = [ 'director-input-x86_64', 'director-input-ppc64le', 'director-utility-x86_64', 'director-utility-ppc64le', 'minimal-input-x86_64', 'minimal-input-ppc64le' ] if ([a for a in aux_build_names if a in rc] and oc_build_name in lc): latest_images = latest_package(released_builds, oc_build_name) if args.debug: if latest_images is not None: print('Attempting director-input/utility image cleanup') else: print( 'Skipping director-input/utility no overcloud-full found') if latest_images is not None: VR = latest_images[len(oc_build_name):] keep = [abn + VR for abn in aux_build_names] print("Trying to clean up director-input and director-utility") print("Keeping {0} for Released {1}".format(keep, latest_images)) for c in aux_build_names: for build in builds_package(staged_builds, c): if build in keep: continue (ln, lv, lr, le, la) = splitFilename(c + VR) (rn, rv, rr, re, ra) = splitFilename(build) v = labelCompare((le, lv, lr), (re, rv, rr)) if v > 0: if args.debug: print("released is newer, untagging") builds_to_untag.append(build) if len(builds_to_untag) > 0: untag_it(candidate_tag, builds_to_untag, dry_run=args.dry_run, verbose=args.debug) else: print("All Clean. nothing to do for {0}".format(candidate_tag))