def run( dry_run, thread_pool_size=10, io_dir="throughput/", saas_file_name=None, env_name=None, gitlab_project_id=None, defer=None, ): all_saas_files = queries.get_saas_files(v1=True, v2=True) saas_files = queries.get_saas_files(saas_file_name, env_name, v1=True, v2=True) app_interface_settings = queries.get_app_interface_settings() if not saas_files: logging.error("no saas files found") sys.exit(ExitCodes.ERROR) # notify different outputs (publish results, slack notifications) # we only do this if: # - this is not a dry run # - there is a single saas file deployed notify = not dry_run and len(saas_files) == 1 if notify: saas_file = saas_files[0] slack_info = saas_file.get("slack") if slack_info: slack = slackapi_from_slack_workspace( slack_info, app_interface_settings, QONTRACT_INTEGRATION, init_usergroups=False, ) # support built-in start and end slack notifications # only in v2 saas files if saas_file["apiVersion"] == "v2": ri = ResourceInventory() console_url = compose_console_url(saas_file, saas_file_name, env_name) # deployment result notification defer( lambda: slack_notify( saas_file_name, env_name, slack, ri, console_url, in_progress=False, ) ) # deployment start notification slack_notifications = slack_info.get("notifications") if slack_notifications and slack_notifications.get("start"): slack_notify( saas_file_name, env_name, slack, ri, console_url, in_progress=True, ) else: slack = None instance = queries.get_gitlab_instance() # instance exists in v1 saas files only desired_jenkins_instances = [ s["instance"]["name"] for s in saas_files if s.get("instance") ] jenkins_map = jenkins_base.get_jenkins_map( desired_instances=desired_jenkins_instances ) settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() try: gl = GitLabApi(instance, settings=settings) except Exception: # allow execution without access to gitlab # as long as there are no access attempts. gl = None saasherder = SaasHerder( saas_files, thread_pool_size=thread_pool_size, gitlab=gl, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, settings=settings, jenkins_map=jenkins_map, accounts=accounts, ) if len(saasherder.namespaces) == 0: logging.warning("no targets found") sys.exit(ExitCodes.SUCCESS) ri, oc_map = ob.fetch_current_state( namespaces=saasherder.namespaces, thread_pool_size=thread_pool_size, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, init_api_resources=True, cluster_admin=saasherder.cluster_admin, ) defer(oc_map.cleanup) saasherder.populate_desired_state(ri) # validate that this deployment is valid # based on promotion information in targets if not saasherder.validate_promotions(): logging.error("invalid promotions") ri.register_error() sys.exit(ExitCodes.ERROR) # if saas_file_name is defined, the integration # is being called from multiple running instances actions = ob.realize_data( dry_run, oc_map, ri, thread_pool_size, caller=saas_file_name, wait_for_namespace=True, no_dry_run_skip_compare=(not saasherder.compare), take_over=saasherder.take_over, ) if not dry_run: if saasherder.publish_job_logs: try: ob.follow_logs(oc_map, actions, io_dir) except Exception as e: logging.error(str(e)) ri.register_error() try: ob.validate_data(oc_map, actions) except Exception as e: logging.error(str(e)) ri.register_error() # publish results of this deployment # based on promotion information in targets success = not ri.has_error_registered() # only publish promotions for deployment jobs (a single saas file) if notify: # Auto-promote next stages only if there are changes in the # promoting stage. This prevents trigger promotions on job re-runs auto_promote = len(actions) > 0 mr_cli = mr_client_gateway.init(gitlab_project_id=gitlab_project_id) saasherder.publish_promotions(success, all_saas_files, mr_cli, auto_promote) if not success: sys.exit(ExitCodes.ERROR) # send human readable notifications to slack # we only do this if: # - this is not a dry run # - there is a single saas file deployed # - output is 'events' # - no errors were registered if notify and slack and actions and slack_info.get("output") == "events": for action in actions: message = ( f"[{action['cluster']}] " + f"{action['kind']} {action['name']} {action['action']}" ) slack.chat_post_message(message)
class TestConfigHashPromotionsValidation(TestCase): """ TestCase to test SaasHerder promotions validation. SaasHerder is initialized with ResourceInventory population. Like is done in openshift-saas-deploy""" cluster: str namespace: str fxt: Any template: Any @classmethod def setUpClass(cls): cls.fxt = Fixtures('saasherder') cls.cluster = "test-cluster" cls.template = cls.fxt.get_anymarkup('template_1.yml') def setUp(self) -> None: self.all_saas_files = \ [self.fxt.get_anymarkup('saas.gql.yml')] self.state_patcher = \ patch("reconcile.utils.saasherder.State", autospec=True) self.state_mock = self.state_patcher.start().return_value self.ig_patcher = \ patch.object(SaasHerder, "_initiate_github", autospec=True) self.ig_patcher.start() self.image_auth_patcher = \ patch.object(SaasHerder, "_initiate_image_auth") self.image_auth_patcher.start() self.gfc_patcher = \ patch.object(SaasHerder, "_get_file_contents", autospec=True) gfc_mock = self.gfc_patcher.start() self.saas_file = \ self.fxt.get_anymarkup('saas.gql.yml') # ApiVersion is set in the saas gql query method in queries module self.saas_file["apiVersion"] = "v2" gfc_mock.return_value = (self.template, "url", "ahash") self.deploy_current_state_fxt = \ self.fxt.get_anymarkup('saas_deploy.state.json') self.post_deploy_current_state_fxt = \ self.fxt.get_anymarkup('saas_post_deploy.state.json') self.saasherder = SaasHerder( [self.saas_file], thread_pool_size=1, gitlab=None, integration='', integration_version='', accounts={"name": "test-account"}, # Initiates State in SaasHerder settings={"hashLength": 24}) # IMPORTANT: Populating desired state modify self.saas_files within # saasherder object. self.ri = ResourceInventory() for ns in ["test-ns-publisher", "test-ns-subscriber"]: for kind in ["Service", "Deployment"]: self.ri.initialize_resource_type(self.cluster, ns, kind) self.saasherder.populate_desired_state(self.ri) if self.ri.has_error_registered(): raise Exception("Errors registered in Resourceinventory") def tearDown(self): self.state_patcher.stop() self.ig_patcher.stop() self.gfc_patcher.stop() def test_config_hash_is_filled(self): """ Ensures the get_config_diff_saas_file fills the promotion data on the publisher target. This data is used in publish_promotions method to add the hash to subscribed targets. IMPORTANT: This is not the promotion_data within promotion. This fields are set by _process_template method in saasherder """ job_spec = \ self.saasherder.get_configs_diff_saas_file(self.saas_file)[0] promotion = job_spec["target_config"]["promotion"] self.assertIsNotNone(promotion[TARGET_CONFIG_HASH]) def test_promotion_state_config_hash_match_validates(self): """ A promotion is valid if the pusblisher state got from the state is equal to the one set in the subscriber target promotion data. This is the happy path, publisher job state target config hash is the same set in the subscriber job """ configs = \ self.saasherder.get_saas_targets_config(self.saas_file) tcs = list(configs.values()) publisher_config_hash = tcs[0]['promotion'][TARGET_CONFIG_HASH] publisher_state = { "success": True, "saas_file": self.saas_file["name"], TARGET_CONFIG_HASH: publisher_config_hash } self.state_mock.get.return_value = publisher_state result = self.saasherder.validate_promotions(self.all_saas_files) self.assertTrue(result) def test_promotion_state_config_hash_not_match_no_validates(self): """ Promotion is not valid if the parent target config hash set in promotion data is not the same set in the publisher job state. This could happen if a new publisher job has before the subscriber job """ publisher_state = { "success": True, "saas_file": self.saas_file["name"], TARGET_CONFIG_HASH: "will_not_match" } self.state_mock.get.return_value = publisher_state result = self.saasherder.validate_promotions(self.all_saas_files) self.assertFalse(result) def test_promotion_without_state_config_hash_validates(self): """ Existent states won't have promotion data. If there is an ongoing promotion, this ensures it will happen. """ promotion_result = { "success": True, } self.state_mock.get.return_value = promotion_result result = self.saasherder.validate_promotions(self.all_saas_files) self.assertTrue(result)
def run(dry_run, thread_pool_size=10, io_dir='throughput/', saas_file_name=None, env_name=None, gitlab_project_id=None, defer=None): all_saas_files = queries.get_saas_files() saas_files = queries.get_saas_files(saas_file_name, env_name) if not saas_files: logging.error('no saas files found') sys.exit(ExitCodes.ERROR) instance = queries.get_gitlab_instance() desired_jenkins_instances = [s['instance']['name'] for s in saas_files] jenkins_map = jenkins_base.get_jenkins_map( desired_instances=desired_jenkins_instances) settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() try: gl = GitLabApi(instance, settings=settings) except Exception: # allow execution without access to gitlab # as long as there are no access attempts. gl = None saasherder = SaasHerder(saas_files, thread_pool_size=thread_pool_size, gitlab=gl, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, settings=settings, jenkins_map=jenkins_map, accounts=accounts) if len(saasherder.namespaces) == 0: logging.warning('no targets found') sys.exit(ExitCodes.SUCCESS) ri, oc_map = ob.fetch_current_state( namespaces=saasherder.namespaces, thread_pool_size=thread_pool_size, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, init_api_resources=True) defer(lambda: oc_map.cleanup()) saasherder.populate_desired_state(ri) # validate that this deployment is valid # based on promotion information in targets if not saasherder.validate_promotions(): logging.error('invalid promotions') sys.exit(ExitCodes.ERROR) # if saas_file_name is defined, the integration # is being called from multiple running instances actions = ob.realize_data(dry_run, oc_map, ri, caller=saas_file_name, wait_for_namespace=True, no_dry_run_skip_compare=(not saasherder.compare), take_over=saasherder.take_over) if not dry_run: if saasherder.publish_job_logs: try: ob.follow_logs(oc_map, actions, io_dir) except Exception as e: logging.error(str(e)) ri.register_error() try: ob.validate_data(oc_map, actions) except Exception as e: logging.error(str(e)) ri.register_error() # publish results of this deployment # based on promotion information in targets success = not ri.has_error_registered() # only publish promotions for deployment jobs (a single saas file) if not dry_run and len(saasherder.saas_files) == 1: mr_cli = mr_client_gateway.init(gitlab_project_id=gitlab_project_id) saasherder.publish_promotions(success, all_saas_files, mr_cli) if not success: sys.exit(ExitCodes.ERROR) # send human readable notifications to slack # we only do this if: # - this is not a dry run # - there is a single saas file deployed # - output is 'events' # - no errors were registered if not dry_run and len(saasherder.saas_files) == 1: saas_file = saasherder.saas_files[0] slack_info = saas_file.get('slack') if slack_info and actions and slack_info.get('output') == 'events': slack = init_slack(slack_info, QONTRACT_INTEGRATION, init_usergroups=False) for action in actions: message = \ f"[{action['cluster']}] " + \ f"{action['kind']} {action['name']} {action['action']}" slack.chat_post_message(message)