def dump_yaml(foo, outfile, no_anchors=False): if no_anchors: pyyaml.add_representer(int, hexint_presenter) pyyaml.dump(foo, outfile, Dumper=NoAliasDumper) else: yaml = YAML(typ="rt") yaml.default_flow_style = False yaml.allow_unicode = True yaml.compact(seq_seq=False, seq_map=False) yaml.indent = 4 yaml.block_seq_indent = 2 yaml.dump(foo, outfile)
def scan_kustomization_for_images(kust_dir): """Scan kustomization folder and produce a list of images Args: kust_dir (str): Path where the kustomize application resides. """ yaml = YAML() yaml.block_seq_indent = 0 # Load kustomization with open(path.join(kust_dir, "kustomization.yaml")) as f: try: kustomization = yaml.load(f) except Exception as e: log.error("Error loading kustomization in %s: %s", kust_dir, e) raise (e) # Get current image list from kustomization img_list = kustomization.get("images", []) # Get local resource files (_, _, filenames) = next(walk(kust_dir)) filenames = [ filename for filename in filenames if filename != "kustomization.yaml" and filename != "params.yaml" and filename.endswith(".yaml") ] for filename in filenames: with open(path.join(kust_dir, filename)) as f: resources = list(yaml.load_all(f)) for r in resources: if not isinstance(r, Mapping): continue if r.get("kind", "").lower() in accepted_kinds: try: containers = r["spec"]["template"]["spec"]["containers"] except KeyError: continue for c in containers: try: img_str = c["image"] except KeyError: continue new_img = image_from_string(img_str) append_or_update(img_list, new_img) if img_list: kustomization["images"] = img_list with open(path.join(kust_dir, "kustomization.yaml"), "w") as f: yaml.dump(kustomization, f)
def load_yaml(foo, no_anchors=False): if no_anchors: yaml = YAML(typ="safe") else: yaml = YAML(typ="rt") yaml.default_flow_style = False yaml.allow_unicode = True yaml.compact(seq_seq=False, seq_map=False) yaml.indent = 4 yaml.block_seq_indent = 2 try: with open(foo, "r") as file: return yaml.load(file) except ruamel.yaml.constructor.DuplicateKeyError as msg: logger = logging.getLogger(__name__) error = "\n".join(str(msg).split("\n")[2:-7]) logger.error(error) raise SystemExit
def create_compose_file(self): ds = { 'version': '3', 'networks': { 'ssonet': { 'ipam': { 'config': [{ 'subnet': '172.23.0.0/24' }] } } }, 'volumes': { 'local_postgres_data': {}, 'local_postgres_data_backups': {}, 'local_zookeeper_data': {}, 'local_kafka_data': {} }, 'services': { 'sso.local.redhat.com': { 'container_name': 'sso.local.redhat.com', 'image': 'quay.io/keycloak/keycloak:11.0.0', 'environment': { 'DB_VENDOR': 'h2', 'PROXY_ADDRESS_FORWARDING': "true", 'KEYCLOAK_USER': '******', 'KEYCLOAK_PASSWORD': '******', }, #'ports': ['8443:8443'], 'expose': [8443], 'networks': { 'ssonet': { 'ipv4_address': '172.23.0.3' } } }, 'kcadmin': { 'container_name': 'kcadmin', 'image': 'python:3', 'build': { 'context': f"{os.path.join(self.checkouts_root, 'keycloak_admin')}", }, 'volumes': [ f"./{os.path.join(self.checkouts_root, 'keycloak_admin')}:/app" ], 'depends_on': ['sso.local.redhat.com'], #'command': '/bin/bash -c "cd /app && pip install -r requirements.txt && flask run --host=0.0.0.0 --port=80"' 'command': '/bin/bash -c "cd /app && pip install -r requirements.txt && python -c \'from kchelper import init_realm; init_realm()\' && flask run --host=0.0.0.0 --port=80"', 'networks': { 'ssonet': { 'ipv4_address': '172.23.0.4' } } }, 'insights_proxy': { 'container_name': 'insights_proxy', 'image': 'redhatinsights/insights-proxy', #'ports': ['1337:1337'], 'ports': ['8443:8443'], 'environment': ['PLATFORM=linux', 'CUSTOM_CONF=true'], 'security_opt': ['label=disable'], 'extra_hosts': ['prod.foo.redhat.com:127.0.0.1'], 'environment': { 'SPANDX_PORT': 8443 }, 'volumes': [ f'./{os.path.join(self.checkouts_root, "www", "spandx.config.js")}:/config/spandx.config.js' ] }, 'webroot': { 'container_name': 'webroot', 'image': 'nginx', 'volumes': [ f"./{os.path.join(self.checkouts_root, 'www')}:/usr/share/nginx/html", f"./{os.path.join(self.checkouts_root, 'nginx.conf.d')}:/etc/nginx/conf.d" ], 'command': ['nginx-debug', '-g', 'daemon off;'] }, 'chrome': { 'container_name': 'chrome', 'image': 'nginx', 'volumes': [ f"./{os.path.join(self.checkouts_root, 'insights-chrome')}:/usr/share/nginx/html" ], 'command': ['nginx-debug', '-g', 'daemon off;'] }, 'chrome_beta': { 'container_name': 'chrome_beta', 'image': 'nginx', 'volumes': [ f"./{os.path.join(self.checkouts_root, 'insights-chrome')}:/usr/share/nginx/html" ], 'command': ['nginx-debug', '-g', 'daemon off;'] }, 'entitlements': { 'container_name': 'entitlements', 'image': 'python:3', 'build': { 'context': f"{os.path.join(self.checkouts_root, 'entitlements')}", }, 'volumes': [ f"./{os.path.join(self.checkouts_root, 'entitlements')}:/app" ], 'command': '/bin/bash -c "cd /app && pip install -r requirements.txt && python api.py"' }, 'rbac': { 'container_name': 'rbac', 'image': 'python:3', 'build': { 'context': f"{os.path.join(self.checkouts_root, 'rbac')}", }, 'volumes': [f"./{os.path.join(self.checkouts_root, 'rbac')}:/app"], 'command': '/bin/bash -c "cd /app && pip install -r requirements.txt && python api.py"' } } } ds['services'].update(self.get_tower_analytics_frontend_service()) ds['services'].update(self.get_landing_services()) #import epdb; epdb.st() # macs can't do static IPs if platform.system().lower() == 'darwin': ds.pop('networks', None) # Add squid for the mac users who can't directly connect to containers if not self.args.integration: squid_logs = os.path.join(self.checkouts_root, 'squid', 'logs') squid_conf = os.path.join(self.checkouts_root, 'squid', 'conf') if not os.path.exists(squid_logs): os.makedirs(squid_logs) ds['services']['squid'] = { 'container_name': 'squid', 'image': 'datadog/squid', 'ports': ['3128:3128'], 'volumes': [ f"./{squid_conf}:/etc/squid", f"./{squid_logs}:/var/log/squid", ] } if True: ds['services']['kcadmin'].pop('networks', None) ds['services']['sso.local.redhat.com'].pop('networks', None) ds['services']['sso.local.redhat.com'].pop('depends_on', None) pf = copy.deepcopy(ds['services']['insights_proxy']) pf['container_name'] = 'prod.foo.redhat.com' ds['services'].pop('insights_proxy', None) ds['services']['prod.foo.redhat.com'] = pf # if static, chrome/landing/frontend should be compiled and put into wwwroot if self.args.static: if 'all' in self.args.static or 'chrome' in self.args.static: ds['services'].pop('chrome', None) ds['services'].pop('chrome_beta', None) if 'all' in self.args.static or 'landing' in self.args.static: ds['services'].pop('landing', None) if 'all' in self.args.static or 'tower-analytics-frontend' in self.args.static: ds['services'].pop('aafrontend', None) for fc in self.frontend_services: if 'all' in self.args.static or fc.www_app_name in self.args.static: for dp in fc.www_deploy_paths: src = os.path.join(fc.srcpath, fc.distdir) dst = f"/usr/share/nginx/html/{dp}" volume = f"./{src}:{dst}" ds['services']['webroot']['volumes'].append(volume) #import epdb; epdb.st() # build the backend? if self.args.backend_mock: aa_be_srcpath = os.path.join(self.checkouts_root, 'aa_backend_mock') bs = { 'container_name': 'aabackend', 'image': 'python:3', 'build': { 'context': f"./{aa_be_srcpath}" }, 'environment': { 'API_SECURE': '1', }, 'volumes': [f"./{aa_be_srcpath}:/app"], 'command': '/bin/bash -c "cd /app && pip install -r requirements.txt && python api.py"' } ds['services']['aabackend'] = bs elif self.args.backend_address: pass else: svcs = self.get_backend_compose_services() ds['services'].update(svcs) #import epdb; epdb.st() if self.args.integration: ds['services']['integration'] = self.get_integration_compose() yaml = YAML(typ='rt', pure=True) yaml.preserve_quotes = False yaml.indent = 4 yaml.block_seq_indent = 4 yaml.explicit_start = True yaml.width = 1000 yaml.default_flow_style = False with open('genstack.yml', 'w') as f: yaml.dump(ds, f) # fix port quoting for sshd ... with open('genstack.yml', 'r') as f: fyaml = f.read() fyaml = fyaml.replace('2222:22', '\'2222:22\'') with open('genstack.yml', 'w') as f: f.write(fyaml)
or conda-forge conda install -c conda-forge ruamel.yaml """ from ruamel.yaml import YAML, __version__ as ryvers from itertools import product from distutils.version import StrictVersion if StrictVersion(ryvers) < StrictVersion('0.15.0'): raise ImportError('ruamel.yaml must be at least version 0.15.0') yaml = YAML() yaml.default_flow_style = False yaml.preserve_quotes = True yaml.block_seq_indent = 2 yaml.indent = 4 yaml.width = 200 numpy_versions = ['1.12', '1.13'] pythons = { '2.7': numpy_versions, '3.5': numpy_versions, '3.6': numpy_versions, } travis_env = 'BUILD_PYTHON="{python}" BUILD_ARCH="{arch}" BUILD_NPY="{numpy}"' travis_matrix = [] for arch, python in product(['x86', 'x64'], pythons.keys()): for numpy in pythons[python]:
def update_os_release_file(**kwargs): """ Update in tree a release file with a given branch (code name) and version (release number) inside a new checkin of the openstack/release repo in your workdir """ LOGGER.info("Doing pre-flight checks") releases_repo_url = OPENSTACK_REPOS + '/releases.git' releases_folder = kwargs['workdir'] + '/releases' oa_folder = kwargs['workdir'] + '/openstack-ansible' click.confirm(("Are your sure your {} folder is properly " "checked out at the right version?").format(oa_folder), abort=True) # Args validation if kwargs['branch'] not in VALID_CODE_NAMES: raise SystemExit("Invalid branch name {}".format(kwargs['branch'])) # Version validation if kwargs['version'] == "auto": fpth, version = get_oa_version(oa_folder) LOGGER.info("Version {} found in {}".format(version, fpth)) if version == "master": raise SystemExit("You should not release from a moving target") else: version = kwargs['version'] pre_release = (version.endswith(PRE_RELEASE_PREFIXES)) if not pre_release: # For extra safety, ensure it's semver. try: semver_res = semver.parse(version) except Exception as exc: raise SystemExit(exc) major_version = semver_res['major'] else: major_version = int(version.split(".")[0]) if major_version != VALID_CODE_NAMES[kwargs['branch']]: raise SystemExit("Not a valid number for this series") # Args validation done. yaml = YAML() oa = Repo(oa_folder) head_commit = oa.head.commit LOGGER.info("OpenStack-Ansible current SHA {}".format(head_commit)) if os.path.lexists(releases_folder): click.confirm('Deleting ' + releases_folder + '. OK?', abort=True) shutil.rmtree(releases_folder) releases_repo = Repo.clone_from(url=releases_repo_url, to_path=releases_folder, branch="master") LOGGER.info("Reading ansible-role-requirements") arr, _, _ = load_yaml(kwargs['workdir'] + ARR_PATH) LOGGER.info("Reading releases deliverable for the given branch") deliverable_file_path = ('deliverables/' + kwargs['branch'] + '/openstack-ansible.yaml') deliverable_file = releases_folder + "/" + deliverable_file_path deliverable, ind, bsi = load_yaml(deliverable_file) # if no releases yet (start of cycle), prepare releases, as a list if not deliverable.get('releases'): deliverable['releases'] = [] # Ensure the new release is last deliverable['releases'].append({ 'version': "{}".format(version), 'projects': [] }) # Now we can build in the order we want and still keep std dicts deliverable['releases'][-1]['projects'].append({ 'repo': 'openstack/openstack-ansible', 'hash': "{}".format(head_commit) }) # Select OpenStack Projects and rename them for releases. # Keep their SHA regex = re.compile('^' + OPENSTACK_REPOS + '/.*') for role in arr: if regex.match(role['src']): deliverable['releases'][-1]['projects'].append({ 'repo': urlparse(role['src']).path.lstrip('/'), 'hash': role['version'] }) with open(deliverable_file, 'w') as df_h: yaml.explicit_start = True yaml.block_seq_indent = bsi yaml.indent = ind yaml.dump(deliverable, df_h) LOGGER.info("Patched!") if kwargs['commit']: message = """Release OpenStack-Ansible {}/{} """.format(kwargs['branch'], version) releases_repo.index.add([deliverable_file_path]) releases_repo.index.commit(message)
def bump_arr(**kwargs): """ Update Roles in Ansible Role Requirements for branch, effectively freezing them. Fetches their release notes """ # Discover branch currently tracking oa_folder = kwargs['workdir'] + '/openstack-ansible/' try: remote_branch = tracking_branch_name(oa_folder) except ValueError as verr: raise SystemExit(verr) # Load ARRrrrr (pirate mode) arr, ind, bsi = load_yaml(kwargs['workdir'] + ARR_PATH) # Cleanup before doing anything else click.confirm("Deleting all the role folders in workspace {}\n" "Are you sure? ".format(kwargs['workdir'])) # Clone only the OpenStack hosted roles regex = re.compile(OPENSTACK_REPOS + '/(.*)') for role in arr: LOGGER.info("Updating {} SHA".format(role['name'])) role_path = kwargs['workdir'] + '/' + role['name'] if regex.match(role['src']): if os.path.lexists(role_path): shutil.rmtree(role_path) # We need to clone instead of ls-remote-ing this # way we can rsync the release notes role_repo = Repo.clone_from( url=role['src'], to_path=role_path, branch=remote_branch, ) role['version'] = "{}".format(role_repo.head.commit) if kwargs['release_notes']: LOGGER.info("Copying role release notes...") release_notes_files = glob.glob( "{}/releasenotes/notes/*.yaml".format(role_path)) LOGGER.debug(release_notes_files) for filepath in release_notes_files: subprocess.call([ "rsync", "-aq", filepath, "{}/releasenotes/notes/".format(oa_folder) ]) elif kwargs['external_roles']: # For external roles, don't clone, # find the latest "matching" tag (patch release) # or the latest sha (master) role['version'] = find_latest_remote_ref(role['src'], role['version']) with open(kwargs['workdir'] + ARR_PATH, 'w') as role_req_file: yaml = YAML() yaml.default_flow_style = False yaml.block_seq_indent = bsi yaml.indent = ind yaml.dump(arr, role_req_file) LOGGER.info("Ansible Role Requirements file patched!") msg = ("Here is a commit message you could use:\n" "Update all SHAs for {new_version}\n\n" "This patch updates all the roles to the latest available stable \n" "SHA's, copies the release notes from the updated roles into the \n" "integrated repo, updates all the OpenStack Service SHA's, and \n" "updates the appropriate python requirements pins. \n\n" "Depends-On: {release_changeid}").format( new_version=os.environ.get('new_version', '<NEW VERSION>'), release_changeid=os.environ.get('release_changeid', '<TODO>'), ) click.echo(msg)
def create_compose_file(self): ''' networks: testnet: ipam: config: - subnet: 172.23.0.0/24 networks: testnet: ipv4_address: 172.23.0.3 ''' ds = { 'version': '3', 'networks': { 'ssonet': { 'ipam': { 'config': [{ 'subnet': '172.23.0.0/24' }] } } }, 'services': { 'kcpostgres': { 'container_name': 'kcpostgres', 'image': 'postgres:12.2', 'environment': { 'POSTGRES_DB': 'keycloak', 'POSTGRES_USER': '******', 'POSTGRES_PASSWORD': '******', }, 'networks': { 'ssonet': { 'ipv4_address': '172.23.0.2' } } }, 'sso.local.redhat.com': { 'container_name': 'sso.local.redhat.com', 'image': 'quay.io/keycloak/keycloak:11.0.0', 'environment': { 'DB_VENDOR': 'postgres', 'DB_ADDR': 'kcpostgres', 'DB_DATABASE': 'keycloak', 'DB_USER': '******', 'DB_PASSWORD': '******', 'PROXY_ADDRESS_FORWARDING': "true", 'KEYCLOAK_USER': '******', 'KEYCLOAK_PASSWORD': '******', }, 'ports': ['8443:8443'], 'depends_on': ['kcpostgres'], 'networks': { 'ssonet': { 'ipv4_address': '172.23.0.3' } } }, 'kcadmin': { 'container_name': 'kcadmin', 'image': 'python:3', 'build': { 'context': f"{os.path.join(self.checkouts_root, 'keycloak_admin')}", }, 'volumes': [ f"./{os.path.join(self.checkouts_root, 'keycloak_admin')}:/app" ], 'depends_on': ['sso.local.redhat.com'], #'command': '/bin/bash -c "cd /app && pip install -r requirements.txt && flask run --host=0.0.0.0 --port=80"' 'command': '/bin/bash -c "cd /app && pip install -r requirements.txt && python -c \'from kchelper import init_realm; init_realm()\' && flask run --host=0.0.0.0 --port=80"', 'networks': { 'ssonet': { 'ipv4_address': '172.23.0.4' } } }, 'insights_proxy': { 'container_name': 'insights_proxy', 'image': 'redhatinsights/insights-proxy', 'ports': ['1337:1337'], 'environment': ['PLATFORM=linux', 'CUSTOM_CONF=true'], 'security_opt': ['label=disable'], 'extra_hosts': ['prod.foo.redhat.com:127.0.0.1'], 'volumes': [ f'./{os.path.join(self.checkouts_root, "www", "spandx.config.js")}:/config/spandx.config.js' ] }, 'webroot': { 'container_name': 'webroot', 'image': 'nginx', 'volumes': [ f"./{os.path.join(self.checkouts_root, 'www')}:/usr/share/nginx/html" ], 'command': ['nginx-debug', '-g', 'daemon off;'] }, 'chrome': { 'container_name': 'chrome', 'image': 'nginx', 'volumes': [ f"./{os.path.join(self.checkouts_root, 'insights-chrome')}:/usr/share/nginx/html" ], 'command': ['nginx-debug', '-g', 'daemon off;'] }, 'chrome_beta': { 'container_name': 'chrome_beta', 'image': 'nginx', 'volumes': [ f"./{os.path.join(self.checkouts_root, 'insights-chrome')}:/usr/share/nginx/html" ], 'command': ['nginx-debug', '-g', 'daemon off;'] }, 'landing': { 'container_name': 'landing', 'image': 'nginx', 'volumes': [ f"./{os.path.join(self.checkouts_root, 'landing-page-frontend', 'dist')}:/usr/share/nginx/html/apps/landing" ], 'command': ['nginx-debug', '-g', 'daemon off;'] }, 'landing_beta': { 'container_name': 'landing_beta', 'image': 'nginx', 'volumes': [ f"./{os.path.join(self.checkouts_root, 'landing-page-frontend', 'dist')}:/usr/share/nginx/html/beta/apps/landing" ], 'command': ['nginx-debug', '-g', 'daemon off;'] }, 'entitlements': { 'container_name': 'entitlements', 'image': 'python:3', 'build': { 'context': f"{os.path.join(self.checkouts_root, 'entitlements')}", }, 'volumes': [ f"./{os.path.join(self.checkouts_root, 'entitlements')}:/app" ], 'command': '/bin/bash -c "cd /app && pip install -r requirements.txt && python api.py"' }, 'rbac': { 'container_name': 'rbac', 'image': 'python:3', 'build': { 'context': f"{os.path.join(self.checkouts_root, 'rbac')}", }, 'volumes': [f"./{os.path.join(self.checkouts_root, 'rbac')}:/app"], 'command': '/bin/bash -c "cd /app && pip install -r requirements.txt && python api.py"' } } } # add frontend if path or hash given if self.args.frontend_path or self.args.frontend_hash: if self.args.frontend_hash: raise Exception('frontend hash not yet implemented!') elif self.args.frontend_path: fs = { 'container_name': 'aafrontend', 'image': 'node:10.22.0', 'user': '******', 'ports': ['8002:8002'], 'environment': { 'DEBUG': '*:*', }, 'command': '/bin/bash -c "cd /app && npm install && npm run start:container"', 'volumes': [ f"{os.path.abspath(os.path.expanduser(self.args.frontend_path))}:/app" ] } ds['services']['frontend'] = fs else: # build the frontend? aa_fe_srcpath = os.path.join(self.checkouts_root, 'tower-analytics-frontend') fs = { 'container_name': 'aafrontend', 'image': 'node:10.22.0', 'user': '******', 'ports': ['8002:8002'], 'environment': { 'DEBUG': '*:*', }, 'command': '/bin/bash -c "cd /app && npm install && npm run start:container"', 'volumes': [f"./{aa_fe_srcpath}:/app"] } ds['services']['aafrontend'] = fs # build the backend? if self.args.backend_mock: aa_be_srcpath = os.path.join(self.checkouts_root, 'aa_backend_mock') bs = { 'container_name': 'aabackend', 'image': 'python:3', 'build': { 'context': f"./{aa_be_srcpath}" }, 'environment': { 'API_SECURE': '1', }, 'volumes': [f"./{aa_be_srcpath}:/app"], 'command': '/bin/bash -c "cd /app && pip install -r requirements.txt && python api.py"' } ds['services']['aabackend'] = bs else: raise Exception('real backend not yet implemented!') ''' kctuple = '%s:%s' % ('sso.local.redhat.com', self.keycloak_ip) for k,v in ds['services'].items(): #'extra_hosts': ['prod.foo.redhat.com:127.0.0.1'], if 'extra_hosts' not in v: ds['services'][k]['extra_hosts'] = [] if kctuple not in ds['services'][k]['extra_hosts']: ds['services'][k]['extra_hosts'].append(kctuple) ''' yaml = YAML(typ='rt', pure=True) yaml.preserve_quotes = True yaml.indent = 4 yaml.block_seq_indent = 4 yaml.explicit_start = True yaml.width = 1000 yaml.default_flow_style = False #pprint(ds) with open('genstack.yml', 'w') as f: yaml.dump(ds, f)