def run(dry_run, gitlab_project_id=None, thread_pool_size=10, enable_deletion=False, send_mails=False): settings = queries.get_app_interface_settings() users = queries.get_users() g = init_github() results = threaded.run(get_user_company, users, thread_pool_size, github=g) users_to_delete = get_users_to_delete(results) if not dry_run and enable_deletion: mr_cli = mr_client_gateway.init(gitlab_project_id=gitlab_project_id) for user in users_to_delete: username = user['username'] paths = user['paths'] logging.info(['delete_user', username]) if not dry_run: if send_mails: send_email_notification(user, settings) elif enable_deletion: mr = CreateDeleteUser(username, paths) mr.submit(cli=mr_cli) else: msg = ('\'delete\' action is not enabled. ' 'Please run the integration manually ' 'with the \'--enable-deletion\' flag.') logging.warning(msg)
def main( configfile, dry_run, log_level, gitlab_project_id, reports_path, thread_pool_size ): config.init_from_toml(configfile) init_log_level(log_level) config.init_from_toml(configfile) gql.init_from_config() now = datetime.now() apps = get_apps_data(now, thread_pool_size=thread_pool_size) reports = [Report(app, now).to_message() for app in apps] for report in reports: logging.info(["create_report", report["file_path"]]) if reports_path: report_file = os.path.join(reports_path, report["file_path"]) try: os.makedirs(os.path.dirname(report_file)) except FileExistsError: pass with open(report_file, "w") as f: f.write(report["content"]) if not dry_run: email_body = """\ Hello, A new report by the App SRE team is now available at: https://visual-app-interface.devshift.net/reports You can use the Search bar to search by App. You can also view reports per service here: https://visual-app-interface.devshift.net/services Having problems? Ping us on #sd-app-sre on Slack! You are receiving this message because you are a member of app-interface or subscribed to a mailing list specified as owning a service being run by the App SRE team: https://gitlab.cee.redhat.com/service/app-interface """ mr_cli = mr_client_gateway.init( gitlab_project_id=gitlab_project_id, sqs_or_gitlab="gitlab" ) mr = CreateAppInterfaceReporter( reports=reports, email_body=textwrap.dedent(email_body), reports_path=reports_path, ) result = mr.submit(cli=mr_cli) logging.info(["created_mr", result.web_url])
def act(dry_run, gitlab_project_id, accounts, keys_to_delete): if not dry_run and keys_to_delete: mr_cli = mr_client_gateway.init(gitlab_project_id=gitlab_project_id) for k in keys_to_delete: account = k["account"] key = k["key"] logging.info(["delete_aws_access_key", account, key]) if not dry_run: path = "data" + [a["path"] for a in accounts if a["name"] == account][0] mr = CreateDeleteAwsAccessKey(account, path, key) mr.submit(cli=mr_cli)
def act(dry_run, gitlab_project_id, accounts, keys_to_delete): if not dry_run and keys_to_delete: mr_cli = mr_client_gateway.init(gitlab_project_id=gitlab_project_id) for k in keys_to_delete: account = k['account'] key = k['key'] logging.info(['delete_aws_access_key', account, key]) if not dry_run: path = 'data' + \ [a['path'] for a in accounts if a['name'] == account][0] mr = CreateDeleteAwsAccessKey(account, path, key) mr.submit(cli=mr_cli)
def run(dry_run, gitlab_project_id=None, thread_pool_size=10): users = init_users() user_specs = threaded.run(init_user_spec, users, thread_pool_size) users_to_delete = [(username, paths) for username, delete, paths in user_specs if delete] if not dry_run: mr_cli = mr_client_gateway.init(gitlab_project_id=gitlab_project_id, sqs_or_gitlab='gitlab') for username, paths in users_to_delete: logging.info(['delete_user', username]) if not dry_run: mr = CreateDeleteUser(username, paths) mr.submit(cli=mr_cli)
def run(dry_run, gitlab_project_id=None): users = init_users() ldap_users = ldap_client.get_users([u['username'] for u in users]) users_to_delete = [u for u in users if u['username'] not in ldap_users] if not dry_run: mr_cli = mr_client_gateway.init(gitlab_project_id=gitlab_project_id, sqs_or_gitlab='gitlab') for u in users_to_delete: username = u['username'] paths = u['paths'] logging.info(['delete_user', username]) if not dry_run: mr = CreateDeleteUser(username, paths) mr.submit(cli=mr_cli)
def run(dry_run, gitlab_project_id=None): users = init_users() with LdapClient.from_settings(queries.get_app_interface_settings()) as ldap_client: ldap_users = ldap_client.get_users([u["username"] for u in users]) users_to_delete = [u for u in users if u["username"] not in ldap_users] if not dry_run: mr_cli = mr_client_gateway.init( gitlab_project_id=gitlab_project_id, sqs_or_gitlab="gitlab" ) for u in users_to_delete: username = u["username"] paths = u["paths"] logging.info(["delete_user", username]) if not dry_run: mr = CreateDeleteUser(username, paths) mr.submit(cli=mr_cli)
def run(dry_run, gitlab_project_id=None, thread_pool_size=10): settings = queries.get_app_interface_settings() clusters = queries.get_clusters() clusters = [c for c in clusters if c.get('ocm') is not None] ocm_map = OCMMap(clusters=clusters, integration=QONTRACT_INTEGRATION, settings=settings, skip_provision_shards=False) current_state, pending_state = ocm_map.cluster_specs() desired_state = fetch_desired_state(clusters) if not dry_run: mr_cli = mr_client_gateway.init(gitlab_project_id=gitlab_project_id) error = False clusters_updates = {} for cluster_name, desired_spec in desired_state.items(): current_spec = current_state.get(cluster_name) if current_spec: clusters_updates[cluster_name] = {} cluster_path = 'data' + \ [c['path'] for c in clusters if c['name'] == cluster_name][0] # validate version desired_spec['spec'].pop('initial_version') desired_version = desired_spec['spec'].pop('version') current_version = current_spec['spec'].pop('version') compare_result = 1 # default value in case version is empty if desired_version: compare_result = \ semver.compare(current_version, desired_version) if compare_result > 0: # current version is larger due to an upgrade. # submit MR to update cluster version logging.info( '[%s] desired version %s is different ' + 'from current version %s. ' + 'version will be updated automatically in app-interface.', cluster_name, desired_version, current_version) clusters_updates[cluster_name]['version'] = current_version elif compare_result < 0: logging.error( '[%s] desired version %s is different ' + 'from current version %s', cluster_name, desired_version, current_version) error = True if not desired_spec['spec'].get('id'): clusters_updates[cluster_name]['id'] = \ current_spec['spec']['id'] if not desired_spec['spec'].get('external_id'): clusters_updates[cluster_name]['external_id'] = \ current_spec['spec']['external_id'] desired_provision_shard_id = \ desired_spec['spec'].get('provision_shard_id') current_provision_shard_id = \ current_spec['spec']['provision_shard_id'] if desired_provision_shard_id != current_provision_shard_id: clusters_updates[cluster_name]['provision_shard_id'] = \ current_provision_shard_id if clusters_updates[cluster_name]: clusters_updates[cluster_name]['path'] = cluster_path # exclude params we don't want to check in the specs for k in ['id', 'external_id', 'provision_shard_id']: current_spec['spec'].pop(k, None) desired_spec['spec'].pop(k, None) if current_spec != desired_spec: # check if cluster update is valid update_spec, err = get_cluster_update_spec( cluster_name, current_spec, desired_spec, ) if err: error = True continue # update cluster logging.debug( '[%s] desired spec %s is different ' + 'from current spec %s', cluster_name, desired_spec, current_spec) logging.info(['update_cluster', cluster_name]) # TODO(mafriedm): check dry_run in OCM API patch if not dry_run: ocm = ocm_map.get(cluster_name) ocm.update_cluster(cluster_name, update_spec, dry_run) else: # create cluster if cluster_name in pending_state: continue logging.info(['create_cluster', cluster_name]) ocm = ocm_map.get(cluster_name) ocm.create_cluster(cluster_name, desired_spec, dry_run) create_update_mr = False for cluster_name, cluster_updates in clusters_updates.items(): for k, v in cluster_updates.items(): if k == 'path': continue logging.info(f"[{cluster_name}] desired key " + f"{k} will be updated automatically " + f"with value {v}.") create_update_mr = True if create_update_mr and not dry_run: mr = CreateClustersUpdates(clusters_updates) mr.submit(cli=mr_cli) if error: sys.exit(1)
def run( dry_run, thread_pool_size=10, io_dir="throughput/", saas_file_name=None, env_name=None, gitlab_project_id=None, defer=None, ): all_saas_files = queries.get_saas_files(v1=True, v2=True) saas_files = queries.get_saas_files(saas_file_name, env_name, v1=True, v2=True) app_interface_settings = queries.get_app_interface_settings() if not saas_files: logging.error("no saas files found") sys.exit(ExitCodes.ERROR) # notify different outputs (publish results, slack notifications) # we only do this if: # - this is not a dry run # - there is a single saas file deployed notify = not dry_run and len(saas_files) == 1 if notify: saas_file = saas_files[0] slack_info = saas_file.get("slack") if slack_info: slack = slackapi_from_slack_workspace( slack_info, app_interface_settings, QONTRACT_INTEGRATION, init_usergroups=False, ) # support built-in start and end slack notifications # only in v2 saas files if saas_file["apiVersion"] == "v2": ri = ResourceInventory() console_url = compose_console_url(saas_file, saas_file_name, env_name) # deployment result notification defer( lambda: slack_notify( saas_file_name, env_name, slack, ri, console_url, in_progress=False, ) ) # deployment start notification slack_notifications = slack_info.get("notifications") if slack_notifications and slack_notifications.get("start"): slack_notify( saas_file_name, env_name, slack, ri, console_url, in_progress=True, ) else: slack = None instance = queries.get_gitlab_instance() # instance exists in v1 saas files only desired_jenkins_instances = [ s["instance"]["name"] for s in saas_files if s.get("instance") ] jenkins_map = jenkins_base.get_jenkins_map( desired_instances=desired_jenkins_instances ) settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() try: gl = GitLabApi(instance, settings=settings) except Exception: # allow execution without access to gitlab # as long as there are no access attempts. gl = None saasherder = SaasHerder( saas_files, thread_pool_size=thread_pool_size, gitlab=gl, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, settings=settings, jenkins_map=jenkins_map, accounts=accounts, ) if len(saasherder.namespaces) == 0: logging.warning("no targets found") sys.exit(ExitCodes.SUCCESS) ri, oc_map = ob.fetch_current_state( namespaces=saasherder.namespaces, thread_pool_size=thread_pool_size, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, init_api_resources=True, cluster_admin=saasherder.cluster_admin, ) defer(oc_map.cleanup) saasherder.populate_desired_state(ri) # validate that this deployment is valid # based on promotion information in targets if not saasherder.validate_promotions(): logging.error("invalid promotions") ri.register_error() sys.exit(ExitCodes.ERROR) # if saas_file_name is defined, the integration # is being called from multiple running instances actions = ob.realize_data( dry_run, oc_map, ri, thread_pool_size, caller=saas_file_name, wait_for_namespace=True, no_dry_run_skip_compare=(not saasherder.compare), take_over=saasherder.take_over, ) if not dry_run: if saasherder.publish_job_logs: try: ob.follow_logs(oc_map, actions, io_dir) except Exception as e: logging.error(str(e)) ri.register_error() try: ob.validate_data(oc_map, actions) except Exception as e: logging.error(str(e)) ri.register_error() # publish results of this deployment # based on promotion information in targets success = not ri.has_error_registered() # only publish promotions for deployment jobs (a single saas file) if notify: # Auto-promote next stages only if there are changes in the # promoting stage. This prevents trigger promotions on job re-runs auto_promote = len(actions) > 0 mr_cli = mr_client_gateway.init(gitlab_project_id=gitlab_project_id) saasherder.publish_promotions(success, all_saas_files, mr_cli, auto_promote) if not success: sys.exit(ExitCodes.ERROR) # send human readable notifications to slack # we only do this if: # - this is not a dry run # - there is a single saas file deployed # - output is 'events' # - no errors were registered if notify and slack and actions and slack_info.get("output") == "events": for action in actions: message = ( f"[{action['cluster']}] " + f"{action['kind']} {action['name']} {action['action']}" ) slack.chat_post_message(message)
def run(dry_run, gitlab_project_id=None, thread_pool_size=10): settings = queries.get_app_interface_settings() clusters = queries.get_clusters() clusters = [c for c in clusters if c.get('ocm') is not None] ocm_map = ocmmod.OCMMap( clusters=clusters, integration=QONTRACT_INTEGRATION, settings=settings, init_provision_shards=True) current_state, pending_state = ocm_map.cluster_specs() desired_state = fetch_desired_state(clusters) if not dry_run: mr_cli = mr_client_gateway.init(gitlab_project_id=gitlab_project_id) error = False clusters_updates = {} for cluster_name, desired_spec in desired_state.items(): # Set the default network type if not desired_spec['network'].get('type'): desired_spec['network']['type'] = 'OpenShiftSDN' current_spec = current_state.get(cluster_name) if current_spec: clusters_updates[cluster_name] = {'spec': {}, 'root': {}} cluster_path = 'data' + \ [c['path'] for c in clusters if c['name'] == cluster_name][0] # validate version desired_spec['spec'].pop('initial_version') desired_version = desired_spec['spec'].pop('version') current_version = current_spec['spec'].pop('version') compare_result = 1 # default value in case version is empty if desired_version: compare_result = \ semver.compare(current_version, desired_version) if compare_result > 0: # current version is larger due to an upgrade. # submit MR to update cluster version logging.info( '[%s] desired version %s is different ' + 'from current version %s. ' + 'version will be updated automatically in app-interface.', cluster_name, desired_version, current_version) clusters_updates[cluster_name]['spec']['version'] = current_version # noqa: E501 elif compare_result < 0: logging.error( f'[{cluster_name}] version {desired_version} ' + f'is different from current version {current_version}. ' + f'please correct version to be {current_version}, ' + 'as this field is only meant for tracking purposes. ' + 'upgrades are determined by ocm-upgrade-scheduler.') error = True if not desired_spec['spec'].get('id'): clusters_updates[cluster_name]['spec']['id'] = \ current_spec['spec']['id'] if not desired_spec['spec'].get('external_id'): clusters_updates[cluster_name]['spec']['external_id'] = \ current_spec['spec']['external_id'] if not desired_spec.get('consoleUrl'): clusters_updates[cluster_name]['root']['consoleUrl'] = \ current_spec['console_url'] if not desired_spec.get('serverUrl'): clusters_updates[cluster_name]['root']['serverUrl'] = \ current_spec['server_url'] if not desired_spec.get('elbFQDN'): clusters_updates[cluster_name]['root']['elbFQDN'] = \ f"elb.apps.{cluster_name}.{current_spec['domain']}" desired_provision_shard_id = \ desired_spec['spec'].get('provision_shard_id') current_provision_shard_id = \ current_spec['spec']['provision_shard_id'] if desired_provision_shard_id != current_provision_shard_id: clusters_updates[cluster_name]['spec']['provision_shard_id'] =\ current_provision_shard_id if clusters_updates[cluster_name]: clusters_updates[cluster_name]['path'] = cluster_path # exclude params we don't want to check in the specs for k in ['id', 'external_id', 'provision_shard_id']: current_spec['spec'].pop(k, None) desired_spec['spec'].pop(k, None) desired_uwm = desired_spec['spec'].get(ocmmod.DISABLE_UWM_ATTR) current_uwm = current_spec['spec'].get(ocmmod.DISABLE_UWM_ATTR) if desired_uwm is None and current_uwm is not None: clusters_updates[cluster_name]['spec'][ocmmod.DISABLE_UWM_ATTR] =\ current_uwm # noqa: E501 # check if cluster update, if any, is valid update_spec, err = get_cluster_update_spec( cluster_name, current_spec, desired_spec, ) if err: logging.warning(f"Invalid changes to spec: {update_spec}") error = True continue # update cluster # TODO(mafriedm): check dry_run in OCM API patch if update_spec: logging.info(['update_cluster', cluster_name]) logging.debug( '[%s] desired spec %s is different ' + 'from current spec %s', cluster_name, desired_spec, current_spec) if not dry_run: ocm = ocm_map.get(cluster_name) ocm.update_cluster(cluster_name, update_spec, dry_run) else: # create cluster if cluster_name in pending_state: continue logging.info(['create_cluster', cluster_name]) ocm = ocm_map.get(cluster_name) ocm.create_cluster(cluster_name, desired_spec, dry_run) create_update_mr = False for cluster_name, cluster_updates in clusters_updates.items(): for k, v in cluster_updates['spec'].items(): logging.info( f"[{cluster_name}] desired key in spec " + f"{k} will be updated automatically " + f"with value {v}." ) create_update_mr = True for k, v in cluster_updates['root'].items(): logging.info( f"[{cluster_name}] desired root key {k} will " f"be updated automatically with value {v}" ) create_update_mr = True if create_update_mr and not dry_run: mr = cu.CreateClustersUpdates(clusters_updates) mr.submit(cli=mr_cli) sys.exit(int(error))
def run(dry_run, gitlab_project_id=None): settings = queries.get_app_interface_settings() namespaces = queries.get_namespaces() # This is a list of app-interface ECR resources and their # mirrors osd_mirrors = [] for namespace in namespaces: # We are only interested on the ECR resources from # this specific namespace if namespace['name'] != 'osd-operators-ecr-mirrors': continue if namespace['terraformResources'] is None: continue for tfr in namespace['terraformResources']: if tfr['provider'] != 'ecr': continue if tfr['mirror'] is None: continue osd_mirrors.append(tfr) # Now the tricky part. The "OCP Release ECR Mirror" is a stand-alone # object in app-interface. We have to process it so we get the # upstream and the mirror repositories instances = queries.get_ocp_release_ecr_mirror() for instance in instances: namespace = instance['ecrResourcesNamespace'] ocp_release_identifier = instance['ocpReleaseEcrIdentifier'] ocp_art_dev_identifier = instance['ocpArtDevEcrIdentifier'] ocp_release_tf_info = get_ecr_tf_resource_info(namespace, ocp_release_identifier) # We get an ECR resource from app-interface, but it has # no mirror property as the mirroring is done differently # there (see qontract-reconcile-ocp-release-ecr-mirror). # The quay repositories are not managed in app-interface, but # we know where they are by looking at the ClusterImageSets # in Hive. # Let's just manually inject the mirror information so we # process all the ECR resources the same way ocp_release_tf_info['mirror'] = { 'url': 'quay.io/openshift-release-dev/ocp-release', 'pullCredentials': None, 'tags': None, 'tagsExclude': None } osd_mirrors.append(ocp_release_tf_info) ocp_art_dev_tf_info = get_ecr_tf_resource_info(namespace, ocp_art_dev_identifier) ocp_art_dev_tf_info['mirror'] = { 'url': 'quay.io/openshift-release-dev/ocp-v4.0-art-dev', 'pullCredentials': None, 'tags': None, 'tagsExclude': None } osd_mirrors.append(ocp_art_dev_tf_info) # Initializing the AWS Client for all the accounts # with ECR resources of interest accounts = [] for tfr in osd_mirrors: account = get_aws_account_info(tfr['account']) if account not in accounts: accounts.append(account) aws_cli = AWSApi(thread_pool_size=1, accounts=accounts, settings=settings, init_ecr_auth_tokens=True) aws_cli.map_ecr_resources() # Building up the mirrors information in the # install-config.yaml compatible format mirrors_info = [] for tfr in osd_mirrors: image_url = get_image_uri(aws_cli=aws_cli, account=tfr['account'], repository=tfr['identifier']) mirrors_info.append({ 'source': tfr['mirror']['url'], 'mirrors': [ image_url, ] }) if not dry_run: # Creating the MR to app-interface mr_cli = mr_client_gateway.init(gitlab_project_id=gitlab_project_id) mr = CSInstallConfig(mirrors_info=mirrors_info) mr.submit(cli=mr_cli)
def run(dry_run, thread_pool_size=10, io_dir='throughput/', saas_file_name=None, env_name=None, gitlab_project_id=None, defer=None): all_saas_files = queries.get_saas_files() saas_files = queries.get_saas_files(saas_file_name, env_name) if not saas_files: logging.error('no saas files found') sys.exit(ExitCodes.ERROR) instance = queries.get_gitlab_instance() desired_jenkins_instances = [s['instance']['name'] for s in saas_files] jenkins_map = jenkins_base.get_jenkins_map( desired_instances=desired_jenkins_instances) settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() try: gl = GitLabApi(instance, settings=settings) except Exception: # allow execution without access to gitlab # as long as there are no access attempts. gl = None saasherder = SaasHerder(saas_files, thread_pool_size=thread_pool_size, gitlab=gl, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, settings=settings, jenkins_map=jenkins_map, accounts=accounts) if len(saasherder.namespaces) == 0: logging.warning('no targets found') sys.exit(ExitCodes.SUCCESS) ri, oc_map = ob.fetch_current_state( namespaces=saasherder.namespaces, thread_pool_size=thread_pool_size, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, init_api_resources=True) defer(lambda: oc_map.cleanup()) saasherder.populate_desired_state(ri) # validate that this deployment is valid # based on promotion information in targets if not saasherder.validate_promotions(): logging.error('invalid promotions') sys.exit(ExitCodes.ERROR) # if saas_file_name is defined, the integration # is being called from multiple running instances actions = ob.realize_data(dry_run, oc_map, ri, caller=saas_file_name, wait_for_namespace=True, no_dry_run_skip_compare=(not saasherder.compare), take_over=saasherder.take_over) if not dry_run: if saasherder.publish_job_logs: try: ob.follow_logs(oc_map, actions, io_dir) except Exception as e: logging.error(str(e)) ri.register_error() try: ob.validate_data(oc_map, actions) except Exception as e: logging.error(str(e)) ri.register_error() # publish results of this deployment # based on promotion information in targets success = not ri.has_error_registered() # only publish promotions for deployment jobs (a single saas file) if not dry_run and len(saasherder.saas_files) == 1: mr_cli = mr_client_gateway.init(gitlab_project_id=gitlab_project_id) saasherder.publish_promotions(success, all_saas_files, mr_cli) if not success: sys.exit(ExitCodes.ERROR) # send human readable notifications to slack # we only do this if: # - this is not a dry run # - there is a single saas file deployed # - output is 'events' # - no errors were registered if not dry_run and len(saasherder.saas_files) == 1: saas_file = saasherder.saas_files[0] slack_info = saas_file.get('slack') if slack_info and actions and slack_info.get('output') == 'events': slack = init_slack(slack_info, QONTRACT_INTEGRATION, init_usergroups=False) for action in actions: message = \ f"[{action['cluster']}] " + \ f"{action['kind']} {action['name']} {action['action']}" slack.chat_post_message(message)
def run(dry_run, gitlab_project_id=None, thread_pool_size=10): settings = queries.get_app_interface_settings() clusters = queries.get_clusters() clusters = [c for c in clusters if c.get("ocm") is not None] ocm_map = ocmmod.OCMMap( clusters=clusters, integration=QONTRACT_INTEGRATION, settings=settings, init_provision_shards=True, ) current_state, pending_state = ocm_map.cluster_specs() desired_state = fetch_desired_state(clusters) if not dry_run: mr_cli = mr_client_gateway.init(gitlab_project_id=gitlab_project_id) error = False clusters_updates = {} for cluster_name, desired_spec in desired_state.items(): # Set the default network type if not desired_spec["network"].get("type"): desired_spec["network"]["type"] = "OpenShiftSDN" current_spec = current_state.get(cluster_name) if current_spec: clusters_updates[cluster_name] = {"spec": {}, "root": {}} cluster_path = ( "data" + [c["path"] for c in clusters if c["name"] == cluster_name][0]) # validate version desired_spec["spec"].pop("initial_version") desired_version = desired_spec["spec"].pop("version") current_version = current_spec["spec"].pop("version") compare_result = 1 # default value in case version is empty if desired_version: compare_result = semver.compare(current_version, desired_version) if compare_result > 0: # current version is larger due to an upgrade. # submit MR to update cluster version logging.info( "[%s] desired version %s is different " + "from current version %s. " + "version will be updated automatically in app-interface.", cluster_name, desired_version, current_version, ) clusters_updates[cluster_name]["spec"][ "version"] = current_version # noqa: E501 elif compare_result < 0: logging.error( f"[{cluster_name}] version {desired_version} " + f"is different from current version {current_version}. " + f"please correct version to be {current_version}, " + "as this field is only meant for tracking purposes. " + "upgrades are determined by ocm-upgrade-scheduler.") error = True if not desired_spec["spec"].get("id"): clusters_updates[cluster_name]["spec"]["id"] = current_spec[ "spec"]["id"] if not desired_spec["spec"].get("external_id"): clusters_updates[cluster_name]["spec"][ "external_id"] = current_spec["spec"]["external_id"] if not desired_spec.get("consoleUrl"): clusters_updates[cluster_name]["root"][ "consoleUrl"] = current_spec["console_url"] if not desired_spec.get("serverUrl"): clusters_updates[cluster_name]["root"][ "serverUrl"] = current_spec["server_url"] if not desired_spec.get("elbFQDN"): clusters_updates[cluster_name]["root"][ "elbFQDN"] = f"elb.apps.{cluster_name}.{current_spec['domain']}" desired_provision_shard_id = desired_spec["spec"].get( "provision_shard_id") current_provision_shard_id = current_spec["spec"][ "provision_shard_id"] if desired_provision_shard_id != current_provision_shard_id: clusters_updates[cluster_name]["spec"][ "provision_shard_id"] = current_provision_shard_id if clusters_updates[cluster_name]: clusters_updates[cluster_name]["path"] = cluster_path # exclude params we don't want to check in the specs for k in ["id", "external_id", "provision_shard_id"]: current_spec["spec"].pop(k, None) desired_spec["spec"].pop(k, None) desired_uwm = desired_spec["spec"].get(ocmmod.DISABLE_UWM_ATTR) current_uwm = current_spec["spec"].get(ocmmod.DISABLE_UWM_ATTR) if desired_uwm is None and current_uwm is not None: clusters_updates[cluster_name]["spec"][ ocmmod.DISABLE_UWM_ATTR] = current_uwm # noqa: E501 # check if cluster update, if any, is valid update_spec, err = get_cluster_update_spec( cluster_name, current_spec, desired_spec, ) if err: logging.warning(f"Invalid changes to spec: {update_spec}") error = True continue # update cluster # TODO(mafriedm): check dry_run in OCM API patch if update_spec: logging.info(["update_cluster", cluster_name]) logging.debug( "[%s] desired spec %s is different " + "from current spec %s", cluster_name, desired_spec, current_spec, ) if not dry_run: ocm = ocm_map.get(cluster_name) ocm.update_cluster(cluster_name, update_spec, dry_run) else: # create cluster if cluster_name in pending_state: continue logging.info(["create_cluster", cluster_name]) ocm = ocm_map.get(cluster_name) ocm.create_cluster(cluster_name, desired_spec, dry_run) create_update_mr = False for cluster_name, cluster_updates in clusters_updates.items(): for k, v in cluster_updates["spec"].items(): logging.info(f"[{cluster_name}] desired key in spec " + f"{k} will be updated automatically " + f"with value {v}.") create_update_mr = True for k, v in cluster_updates["root"].items(): logging.info(f"[{cluster_name}] desired root key {k} will " f"be updated automatically with value {v}") create_update_mr = True if create_update_mr and not dry_run: mr = cu.CreateClustersUpdates(clusters_updates) mr.submit(cli=mr_cli) sys.exit(int(error))