def run( dry_run, thread_pool_size=10, internal=None, use_jump_host=True, vault_output_path="", defer=None, ): namespaces = canonicalize_namespaces(queries.get_serviceaccount_tokens()) ri, oc_map = ob.fetch_current_state( namespaces=namespaces, thread_pool_size=thread_pool_size, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, override_managed_types=["Secret"], internal=internal, use_jump_host=use_jump_host, ) defer(oc_map.cleanup) fetch_desired_state(namespaces, ri, oc_map) ob.realize_data(dry_run, oc_map, ri, thread_pool_size) if not dry_run and vault_output_path: write_outputs_to_vault(vault_output_path, ri) if ri.has_error_registered(): sys.exit(1)
def run(dry_run, io_dir='throughput/', print_only=False, config_name=None, job_name=None, instance_name=None, defer=None): if not print_only and config_name is not None: raise Exception("--config-name must works with --print-only mode") jjb, additional_repo_urls = \ init_jjb(instance_name, config_name, print_only) defer(lambda: jjb.cleanup()) if print_only: jjb.print_jobs(job_name=job_name) if config_name is not None: jjb.generate(io_dir, 'printout') sys.exit(0) accounts = queries.get_aws_accounts() state = State(integration=QONTRACT_INTEGRATION, accounts=accounts, settings=jjb.settings) if dry_run: validate_repos_and_admins(jjb, additional_repo_urls) jjb.generate(io_dir, 'desired') jjb.overwrite_configs(state) jjb.generate(io_dir, 'current') jjb.print_diffs(io_dir, instance_name) else: jjb.update() configs = jjb.get_configs() for name, desired_config in configs.items(): state.add(name, value=desired_config, force=True)
def run(thread_pool_size=10, defer=None): oc_map = tb.get_oc_map(QONTRACT_E2E_TEST) defer(lambda: oc_map.cleanup()) pattern = tb.get_namespaces_pattern() threaded.run(test_cluster, oc_map.clusters(), thread_pool_size, oc_map=oc_map, pattern=pattern)
def gpg_key_valid(public_gpg_key, defer=None): stripped_public_gpg_key = public_gpg_key.rstrip() if ' ' in stripped_public_gpg_key: msg = 'key has spaces in it' return False, msg equal_sign_count = public_gpg_key.count('=') if not stripped_public_gpg_key.endswith('=' * equal_sign_count): msg = 'equal signs should only appear at the end of the key' return False, msg try: public_gpg_key_dec = base64.b64decode(public_gpg_key) except Exception: msg = 'could not perform base64 decode of key' return False, msg gnupg_home_dir = tempfile.mkdtemp() defer(lambda: shutil.rmtree(gnupg_home_dir)) proc = Popen(['gpg', '--homedir', gnupg_home_dir], stdin=PIPE, stdout=PIPE, stderr=STDOUT) out = proc.communicate(public_gpg_key_dec) if proc.returncode != 0: return False, out keys = out[0].decode('utf-8').split('\n') key_types = [k.split(' ')[0] for k in keys if k] ok = all(elem in key_types for elem in ['pub', 'sub']) if not ok: msg = 'key must contain both pub and sub entries' return False, msg return True, ''
def gpg_encrypt(content, recepient, public_gpg_key, defer=None): public_gpg_key_dec = base64.b64decode(public_gpg_key) gnupg_home_dir = tempfile.mkdtemp() defer(lambda: shutil.rmtree(gnupg_home_dir)) # import public gpg key proc = Popen(['gpg', '--homedir', gnupg_home_dir, '--import'], stdin=PIPE, stdout=PIPE, stderr=STDOUT) out = proc.communicate(public_gpg_key_dec) if proc.returncode != 0: return None # encrypt content proc = Popen([ 'gpg', '--homedir', gnupg_home_dir, '--trust-model', 'always', '--encrypt', '--armor', '-r', recepient ], stdin=PIPE, stdout=PIPE, stderr=STDOUT) out = proc.communicate(content.encode()) if proc.returncode != 0: return None return out[0].decode('utf-8')
def run(dry_run, thread_pool_size=10, internal=None, use_jump_host=True, defer=None): namespaces = [ namespace_info for namespace_info in queries.get_namespaces() if namespace_info.get("managedRoles") and is_in_shard(f"{namespace_info['cluster']['name']}/" + f"{namespace_info['name']}") ] ri, oc_map = ob.fetch_current_state( namespaces=namespaces, thread_pool_size=thread_pool_size, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, override_managed_types=["RoleBinding.authorization.openshift.io"], internal=internal, use_jump_host=use_jump_host, ) defer(oc_map.cleanup) fetch_desired_state(ri, oc_map) ob.realize_data(dry_run, oc_map, ri, thread_pool_size) if ri.has_error_registered(): sys.exit(1)
def run(dry_run: bool, thread_pool_size=10, internal: Optional[bool] = None, use_jump_host=True, defer=None): all_namespaces = queries.get_namespaces(minimal=True) shard_namespaces, duplicates = get_shard_namespaces(all_namespaces) desired_state = get_desired_state(shard_namespaces) settings = queries.get_app_interface_settings() oc_map = OC_Map(namespaces=shard_namespaces, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size, init_projects=True) defer(oc_map.cleanup) results = threaded.run(manage_namespaces, desired_state, thread_pool_size, return_exceptions=True, dry_run=dry_run, oc_map=oc_map) err = check_results(desired_state, results) if err or duplicates: sys.exit(ExitCodes.ERROR)
def get_feature_toggle_state(integration_name, defer=None): api_url = os.environ.get("UNLEASH_API_URL") client_access_token = os.environ.get("UNLEASH_CLIENT_ACCESS_TOKEN") if not (api_url and client_access_token): return True # create temporary cache dir cache_dir = tempfile.mkdtemp() defer(lambda: shutil.rmtree(cache_dir)) # hide INFO logging from UnleashClient with log_lock: logger = logging.getLogger() default_logging = logger.level logger.setLevel(logging.ERROR) # create Unleash client headers = {"Authorization": f"Bearer {client_access_token}"} client = UnleashClient( url=api_url, app_name="qontract-reconcile", custom_headers=headers, cache_directory=cache_dir, ) client.initialize_client() # get feature toggle state state = client.is_enabled(integration_name, fallback_function=get_feature_toggle_default) client.destroy() logger.setLevel(default_logging) return state
def get_unleash_strategies(api_url, token, strategy_names, defer=None): # create strategy mapping unleash_strategies = {name: strategies.Strategy for name in strategy_names} # create temporary cache dir cache_dir = tempfile.mkdtemp() defer(lambda: shutil.rmtree(cache_dir)) # hide INFO logging from UnleashClient with log_lock: logger = logging.getLogger() default_logging = logger.level logger.setLevel(logging.ERROR) # create Unleash client headers = {"Authorization": f"Bearer {token}"} client = UnleashClient( url=api_url, app_name="qontract-reconcile", custom_headers=headers, cache_directory=cache_dir, custom_strategies=unleash_strategies, ) client.initialize_client() strats = { name: toggle.strategies for name, toggle in client.features.items() } client.destroy() logger.setLevel(default_logging) return strats
def run( dry_run, thread_pool_size=10, internal=None, use_jump_host=True, take_over=True, defer=None, ): namespaces = [ namespace_info for namespace_info in queries.get_namespaces() if namespace_info.get("limitRanges") ] namespaces = construct_resources(namespaces) if not namespaces: logging.debug("No LimitRanges definition found in app-interface!") sys.exit(0) ri, oc_map = ob.fetch_current_state( namespaces=namespaces, thread_pool_size=thread_pool_size, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, override_managed_types=["LimitRange"], internal=internal, use_jump_host=use_jump_host, ) defer(oc_map.cleanup) add_desired_state(namespaces, ri, oc_map) ob.realize_data(dry_run, oc_map, ri, thread_pool_size, take_over=take_over) if ri.has_error_registered(): sys.exit(1)
def run( dry_run: bool, thread_pool_size: int = 10, internal: Optional[bool] = None, use_jump_host=True, defer=None, ): gabi_instances = queries.get_gabi_instances() if not gabi_instances: logging.debug("No gabi instances found in app-interface") sys.exit(ExitCodes.SUCCESS) gabi_namespaces = [i["namespace"] for g in gabi_instances for i in g["instances"]] ri, oc_map = ob.fetch_current_state( namespaces=gabi_namespaces, thread_pool_size=thread_pool_size, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, override_managed_types=["ConfigMap"], internal=internal, use_jump_host=use_jump_host, ) defer(oc_map.cleanup) fetch_desired_state(gabi_instances, ri) ob.realize_data(dry_run, oc_map, ri, thread_pool_size) if ri.has_error_registered(): sys.exit(1)
def run(dry_run, thread_pool_size=10, internal=None, use_jump_host=True, defer=None): performance_parameters = queries.get_performance_parameters() observability_namespaces = [ pp['namespace']['cluster']['observabilityNamespace'] for pp in performance_parameters if pp['namespace']['cluster']['observabilityNamespace'] is not None ] if not observability_namespaces: logging.error('No observability namespaces found') sys.exit(1) ri, oc_map = ob.fetch_current_state( namespaces=observability_namespaces, thread_pool_size=thread_pool_size, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, override_managed_types=['PrometheusRule'], internal=internal, use_jump_host=use_jump_host) defer(lambda: oc_map.cleanup()) fetch_desired_state(performance_parameters, ri) ob.realize_data(dry_run, oc_map, ri) if ri.has_error_registered(): sys.exit(1)
def run(dry_run: bool, thread_pool_size: int, internal: bool, use_jump_host: bool, defer=None) -> None: # prepare desired_endpoints = get_endpoints() namespaces = { p.blackboxExporter.namespace.get("name"): p.blackboxExporter.namespace for p in desired_endpoints if p.blackboxExporter } if namespaces: ri, oc_map = ob.fetch_current_state( namespaces.values(), thread_pool_size=thread_pool_size, internal=internal, use_jump_host=use_jump_host, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, override_managed_types=["Probe"]) defer(oc_map.cleanup) # reconcile for provider, endpoints in desired_endpoints.items(): fill_desired_state(provider, endpoints, ri) ob.realize_data(dry_run, oc_map, ri, thread_pool_size, recycle_pods=False) if ri.has_error_registered(): sys.exit(1)
def run(dry_run, thread_pool_size=10, internal=None, use_jump_host=True, defer=None): oc_map, current_state, ocm_clusters = \ fetch_current_state(thread_pool_size, internal, use_jump_host) defer(lambda: oc_map.cleanup()) desired_state = fetch_desired_state(oc_map) # we only manage dedicated-admins via OCM current_state = [s for s in current_state if not (s['cluster'] in ocm_clusters and s['group'] == 'dedicated-admins')] desired_state = [s for s in desired_state if not (s['cluster'] in ocm_clusters and s['group'] == 'dedicated-admins')] diffs = calculate_diff(current_state, desired_state) validate_diffs(diffs) diffs.sort(key=sort_diffs) for diff in diffs: logging.info(list(diff.values())) if not dry_run: act(diff, oc_map)
def run(dry_run, thread_pool_size=10, internal=None, use_jump_host=True, defer=None): settings = queries.get_app_interface_settings() accounts = queries.get_state_aws_accounts() clusters = [c for c in queries.get_clusters(minimal=True) if c.get("ocm")] oc_map = OC_Map( clusters=clusters, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size, ) defer(oc_map.cleanup) state = State(integration=QONTRACT_INTEGRATION, accounts=accounts, settings=settings) if not dry_run: slack = slackapi_from_queries(QONTRACT_INTEGRATION) now = datetime.utcnow() for cluster in oc_map.clusters(include_errors=True): oc = oc_map.get(cluster) if not oc: logging.log(level=oc.log_level, msg=oc.message) continue upgrade_config = oc.get( namespace="openshift-managed-upgrade-operator", kind="UpgradeConfig", allow_not_found=True, )["items"] if not upgrade_config: logging.debug(f"[{cluster}] UpgradeConfig not found.") continue [upgrade_config] = upgrade_config upgrade_spec = upgrade_config["spec"] upgrade_at = upgrade_spec["upgradeAt"] version = upgrade_spec["desired"]["version"] upgrade_at_obj = datetime.strptime(upgrade_at, "%Y-%m-%dT%H:%M:%SZ") state_key = f"{cluster}-{upgrade_at}" # if this is the first iteration in which 'now' had passed # the upgrade at date time, we send a notification if upgrade_at_obj < now: if state.exists(state_key): # already notified continue logging.info(["cluster_upgrade", cluster]) if not dry_run: state.add(state_key) usergroup = f"{cluster}-cluster" usergroup_id = slack.get_usergroup_id(usergroup) slack.chat_post_message( f"Heads up <!subteam^{usergroup_id}>! " + f"cluster `{cluster}` is currently " + f"being upgraded to version `{version}`")
def run_test(test_yaml_spec, rule_files): '''Run promtool test rules params: test_yaml_spec: test yaml spec dict rule_files: dict indexed by rule path containing rule files yaml dicts ''' temp_rule_files = {} try: for rule_file, yaml_spec in rule_files.items(): with tempfile.NamedTemporaryFile(delete=False) as fp: fp.write(yaml.dump(yaml_spec).encode()) temp_rule_files[rule_file] = fp.name except Exception as e: return CommandExecutionResult(False, f'Error building temp rule files: {e}') # build a test yaml prometheus files that uses the temp files created new_rule_files = [] for rule_file in test_yaml_spec['rule_files']: if rule_file not in temp_rule_files: raise CommandExecutionResult( False, f'{rule_file} not in rule_files dict') new_rule_files.append(temp_rule_files[rule_file]) temp_test_yaml_spec = copy.deepcopy(test_yaml_spec) temp_test_yaml_spec['rule_files'] = new_rule_files defer(lambda: _cleanup(temp_rule_files.values())) return _run_yaml_spec_cmd(cmd=['promtool', 'test', 'rules'], yaml_spec=temp_test_yaml_spec)
def scan_history(repo_url, existing_keys, defer=None): # pylint: disable=consider-using-with logging.info('scanning {}'.format(repo_url)) if requests.get(repo_url).status_code == 404: logging.info('not found {}'.format(repo_url)) return [] wd = tempfile.mkdtemp() defer(lambda: cleanup(wd)) git.clone(repo_url, wd) DEVNULL = open(os.devnull, 'w') proc = Popen(['git', 'secrets', '--register-aws'], cwd=wd, stdout=DEVNULL) proc.communicate() proc = Popen(['git', 'secrets', '--scan-history'], cwd=wd, stdout=PIPE, stderr=PIPE) _, err = proc.communicate() if proc.returncode == 0: return [] logging.info('found suspects in {}'.format(repo_url)) suspected_files = get_suspected_files(err.decode('utf-8')) leaked_keys = get_leaked_keys(wd, suspected_files, existing_keys) if leaked_keys: logging.info('found suspected leaked keys: {}'.format(leaked_keys)) return leaked_keys
def run(dry_run, thread_pool_size=10, internal=None, use_jump_host=True, defer=None): gqlapi = gql.get_api() namespaces = [] for namespace_info in gqlapi.query(NAMESPACES_QUERY)['namespaces']: if not namespace_info.get('networkPoliciesAllow'): continue shard_key = (f"{namespace_info['cluster']['name']}/" f"{namespace_info['name']}") if not is_in_shard(shard_key): continue namespaces.append(namespace_info) ri, oc_map = ob.fetch_current_state( namespaces=namespaces, thread_pool_size=thread_pool_size, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, override_managed_types=['NetworkPolicy'], internal=internal, use_jump_host=use_jump_host) defer(lambda: oc_map.cleanup()) fetch_desired_state(namespaces, ri, oc_map) ob.realize_data(dry_run, oc_map, ri) if ri.has_error_registered(): sys.exit(1)
def run(dry_run, thread_pool_size=10, internal=None, use_jump_host=True, take_over=True, defer=None): namespaces = [ namespace_info for namespace_info in queries.get_namespaces() if namespace_info.get('quota') ] if not namespaces: logging.debug("No ResourceQuota definition found in app-interface!") sys.exit(0) ri, oc_map = ob.fetch_current_state( namespaces=namespaces, thread_pool_size=thread_pool_size, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, override_managed_types=['ResourceQuota'], internal=internal, use_jump_host=use_jump_host) defer(lambda: oc_map.cleanup()) fetch_desired_state(namespaces, ri, oc_map) ob.realize_data(dry_run, oc_map, ri) if ri.has_error_registered(): sys.exit(1)
def generate_object(jsonnet_string): try: fd, path = tempfile.mkstemp() defer(lambda: cleanup(path)) os.write(fd, jsonnet_string.encode()) os.close(fd) except Exception as e: raise JsonnetError(f'Error building jsonnet file: {e}') try: jsonnet_bundler_dir = os.environ['JSONNET_VENDOR_DIR'] except KeyError as e: raise JsonnetError('JSONNET_VENDOR_DIR not set') cmd = ['jsonnet', '-J', jsonnet_bundler_dir, path] status = run(cmd, stdout=PIPE, stderr=PIPE) if status.returncode != 0: message = 'Error building json doc' if status.stderr: message += ": " + status.stderr.decode() raise JsonnetError(message) return json.loads(status.stdout)
def run(dry_run, thread_pool_size=10, internal=None, use_jump_host=True, providers=[], cluster_name=None, namespace_name=None, init_api_resources=False, defer=None): gqlapi = gql.get_api() namespaces = [ namespace_info for namespace_info in gqlapi.query(NAMESPACES_QUERY)['namespaces'] if is_in_shard(f"{namespace_info['cluster']['name']}/" + f"{namespace_info['name']}") ] namespaces = \ filter_namespaces_by_cluster_and_namespace( namespaces, cluster_name, namespace_name ) namespaces = canonicalize_namespaces(namespaces, providers) oc_map, ri = \ fetch_data(namespaces, thread_pool_size, internal, use_jump_host, init_api_resources=init_api_resources) defer(lambda: oc_map.cleanup()) ob.realize_data(dry_run, oc_map, ri) if ri.has_error_registered(): sys.exit(1) return ri
def run(thread_pool_size=10, defer=None): oc_map = tb.get_oc_map(QONTRACT_E2E_TEST) defer(oc_map.cleanup) ns_under_test = tb.get_test_namespace_name() threaded.run(test_cluster, oc_map.clusters(), thread_pool_size, oc_map=oc_map, ns_under_test=ns_under_test)
def run(dry_run, thread_pool_size=10, internal=None, use_jump_host=True, defer=None): settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() clusters = [c for c in queries.get_clusters(minimal=True) if c.get('ocm')] oc_map = OC_Map(clusters=clusters, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size) defer(oc_map.cleanup) state = State(integration=QONTRACT_INTEGRATION, accounts=accounts, settings=settings) if not dry_run: slack = init_slack_workspace(QONTRACT_INTEGRATION) now = datetime.utcnow() for cluster in oc_map.clusters(include_errors=True): oc = oc_map.get(cluster) if not oc: logging.log(level=oc.log_level, msg=oc.message) continue upgrade_config = oc.get(namespace='openshift-managed-upgrade-operator', kind='UpgradeConfig', allow_not_found=True)['items'] if not upgrade_config: logging.debug(f'[{cluster}] UpgradeConfig not found.') continue [upgrade_config] = upgrade_config upgrade_spec = upgrade_config['spec'] upgrade_at = upgrade_spec['upgradeAt'] version = upgrade_spec['desired']['version'] upgrade_at_obj = datetime.strptime(upgrade_at, '%Y-%m-%dT%H:%M:%SZ') state_key = f'{cluster}-{upgrade_at}' # if this is the first iteration in which 'now' had passed # the upgrade at date time, we send a notification if upgrade_at_obj < now: if state.exists(state_key): # already notified continue logging.info(['cluster_upgrade', cluster]) if not dry_run: state.add(state_key) usergroup = f'{cluster}-cluster' usergroup_id = slack.get_usergroup_id(usergroup) slack.chat_post_message( f'Heads up <!subteam^{usergroup_id}>! ' + f'cluster `{cluster}` is currently ' + f'being upgraded to version `{version}`')
def run(dry_run, thread_pool_size=10, disable_service_account_keys=False, defer=None): accounts = queries.get_aws_accounts() settings = queries.get_app_interface_settings() aws = AWSApi(thread_pool_size, accounts, settings=settings) keys_to_delete = get_keys_to_delete(accounts) working_dirs = init_tf_working_dirs(accounts, thread_pool_size, settings) defer(lambda: cleanup(working_dirs)) error = aws.delete_keys(dry_run, keys_to_delete, working_dirs, disable_service_account_keys) if error: sys.exit(1)
def run(dry_run, print_only=False, enable_deletion=False, io_dir='throughput/', thread_pool_size=10, internal=None, use_jump_host=True, light=False, vault_output_path='', account_name=None, extra_labels=None, defer=None): ri, oc_map, tf, tf_namespaces = \ setup(dry_run, print_only, thread_pool_size, internal, use_jump_host, account_name, extra_labels) if not dry_run: defer(lambda: oc_map.cleanup()) if print_only: cleanup_and_exit() if tf is None: err = True cleanup_and_exit(tf, err) if not light: disabled_deletions_detected, err = tf.plan(enable_deletion) if err: cleanup_and_exit(tf, err) tf.dump_deleted_users(io_dir) if disabled_deletions_detected: cleanup_and_exit(tf, disabled_deletions_detected) if dry_run: cleanup_and_exit(tf) if not light: err = tf.apply() if err: cleanup_and_exit(tf, err) tf.populate_desired_state(ri, oc_map, tf_namespaces, account_name) actions = ob.realize_data(dry_run, oc_map, ri, caller=account_name) disable_keys(dry_run, thread_pool_size, disable_service_account_keys=True, account_name=account_name) if actions and vault_output_path: write_outputs_to_vault(vault_output_path, ri) if ri.has_error_registered(): err = True cleanup_and_exit(tf, err) cleanup_and_exit(tf)
def run(dry_run: bool, thread_pool_size: int, internal: bool, use_jump_host: bool, defer=None) -> None: # verify blackbox-exporter modules settings = queries.get_app_interface_settings() allowed_modules = \ set(settings["endpointMonitoringBlackboxExporterModules"]) verification_errors = False if allowed_modules: for p in get_blackbox_providers(): if p.blackboxExporter and \ p.blackboxExporter.module not in allowed_modules: LOG.error( f"endpoint monitoring provider {p.name} uses " f"blackbox-exporter module {p.blackboxExporter.module} " f"which is not in the allow list {allowed_modules} of " "app-interface-settings" ) verification_errors = True if verification_errors: sys.exit(1) # prepare desired_endpoints = get_endpoints() namespaces = { p.blackboxExporter.namespace.get("name"): p.blackboxExporter.namespace for p in desired_endpoints if p.blackboxExporter } if namespaces: ri, oc_map = ob.fetch_current_state( namespaces.values(), thread_pool_size=thread_pool_size, internal=internal, use_jump_host=use_jump_host, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, override_managed_types=["Probe"] ) defer(oc_map.cleanup) # reconcile for provider, endpoints in desired_endpoints.items(): fill_desired_state(provider, endpoints, ri) ob.realize_data(dry_run, oc_map, ri, thread_pool_size, recycle_pods=False) if ri.has_error_registered(): sys.exit(1)
def run(dry_run, thread_pool_size=10, internal=None, use_jump_host=True, defer=None): oc_map, current_state = \ fetch_current_state(thread_pool_size, internal, use_jump_host) defer(lambda: oc_map.cleanup()) desired_state = fetch_desired_state(oc_map) diffs = calculate_diff(current_state, desired_state) for diff in diffs: logging.info(list(diff.values())) if not dry_run: act(diff, oc_map)
def run(dry_run, thread_pool_size=10, internal=None, use_jump_host=True, defer=None): settings = queries.get_app_interface_settings() pipelines_providers = queries.get_pipelines_providers() tkn_namespaces = [ pp['namespace'] for pp in pipelines_providers if pp['provider'] == Providers.TEKTON ] oc_map = OC_Map(namespaces=tkn_namespaces, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size) defer(lambda: oc_map.cleanup()) for pp in pipelines_providers: retention = pp.get('retention') if not retention: continue if pp['provider'] == Providers.TEKTON: ns_info = pp['namespace'] namespace = ns_info['name'] cluster = ns_info['cluster']['name'] oc = oc_map.get(cluster) pipeline_runs = sorted( oc.get(namespace, 'PipelineRun')['items'], key=lambda k: k['metadata']['creationTimestamp']) retention_min = retention.get('minimum') if retention_min: pipeline_runs = pipeline_runs[retention_min:] retention_days = retention.get('days') for pr in pipeline_runs: name = pr['metadata']['name'] if retention_days and \ within_retention_days(pr, retention_days): continue logging.info([ 'delete_trigger', cluster, namespace, 'PipelineRun', name ]) if not dry_run: oc.delete(namespace, 'PipelineRun', name)
def run(dry_run=False, print_only=False, enable_deletion=False, thread_pool_size=10, defer=None): settings = queries.get_app_interface_settings() zones = queries.get_dns_zones() participating_account_names = [z['account']['name'] for z in zones] participating_accounts = [ a for a in queries.get_aws_accounts() if a['name'] in participating_account_names ] ts = Terrascript(QONTRACT_INTEGRATION, "", thread_pool_size, participating_accounts, settings=settings) desired_state = build_desired_state(zones) error = ts.populate_route53(desired_state) if error: sys.exit(ExitCodes.ERROR) working_dirs = ts.dump(print_only=print_only) if print_only: sys.exit(ExitCodes.SUCCESS) tf = Terraform(QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION, "", working_dirs, thread_pool_size) if tf is None: sys.exit(ExitCodes.ERROR) defer(lambda: tf.cleanup()) _, err = tf.plan(enable_deletion) if err: sys.exit(ExitCodes.ERROR) if dry_run: return err = tf.apply() if err: sys.exit(ExitCodes.ERROR)
def run( dry_run: bool, thread_pool_size: int = 10, internal: Optional[bool] = None, use_jump_host: bool = True, saas_file_name: Optional[str] = None, ) -> None: tkn_providers = fetch_tkn_providers(saas_file_name) # TODO: This will need to be an error condition in the future if not tkn_providers: LOG.debug("No saas files found to be processed") sys.exit(0) # We need to start with the desired state to know the names of the # tekton objects that will be created in the providers' namespaces. We # need to make sure that this integration only manages its resources # and not the tekton resources already created via openshift-resources LOG.debug("Fetching desired resources") desired_resources = fetch_desired_resources(tkn_providers) tkn_namespaces = [tknp["namespace"] for tknp in tkn_providers.values()] LOG.debug("Fetching current resources") ri, oc_map = ob.fetch_current_state( namespaces=tkn_namespaces, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, override_managed_types=["Pipeline", "Task"], internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size, ) defer(oc_map.cleanup) LOG.debug("Adding desired resources to inventory") for desired_resource in desired_resources: ri.add_desired(**desired_resource) LOG.debug("Realizing data") ob.realize_data(dry_run, oc_map, ri, thread_pool_size) if ri.has_error_registered(): sys.exit(ExitCodes.ERROR) sys.exit(0)