def run(dry_run, io_dir='throughput/', print_only=False, config_name=None, job_name=None, instance_name=None, defer=None): if not print_only and config_name is not None: raise Exception("--config-name must works with --print-only mode") jjb, additional_repo_urls = \ init_jjb(instance_name, config_name, print_only) defer(lambda: jjb.cleanup()) if print_only: jjb.print_jobs(job_name=job_name) if config_name is not None: jjb.generate(io_dir, 'printout') sys.exit(0) accounts = queries.get_aws_accounts() state = State(integration=QONTRACT_INTEGRATION, accounts=accounts, settings=jjb.settings) if dry_run: validate_repos_and_admins(jjb, additional_repo_urls) jjb.generate(io_dir, 'desired') jjb.overwrite_configs(state) jjb.generate(io_dir, 'current') jjb.print_diffs(io_dir, instance_name) else: jjb.update() configs = jjb.get_configs() for name, desired_config in configs.items(): state.add(name, value=desired_config, force=True)
def run(dry_run, thread_pool_size=10, internal=None, use_jump_host=True, defer=None): settings = queries.get_app_interface_settings() accounts = queries.get_state_aws_accounts() clusters = [c for c in queries.get_clusters(minimal=True) if c.get("ocm")] oc_map = OC_Map( clusters=clusters, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size, ) defer(oc_map.cleanup) state = State(integration=QONTRACT_INTEGRATION, accounts=accounts, settings=settings) if not dry_run: slack = slackapi_from_queries(QONTRACT_INTEGRATION) now = datetime.utcnow() for cluster in oc_map.clusters(include_errors=True): oc = oc_map.get(cluster) if not oc: logging.log(level=oc.log_level, msg=oc.message) continue upgrade_config = oc.get( namespace="openshift-managed-upgrade-operator", kind="UpgradeConfig", allow_not_found=True, )["items"] if not upgrade_config: logging.debug(f"[{cluster}] UpgradeConfig not found.") continue [upgrade_config] = upgrade_config upgrade_spec = upgrade_config["spec"] upgrade_at = upgrade_spec["upgradeAt"] version = upgrade_spec["desired"]["version"] upgrade_at_obj = datetime.strptime(upgrade_at, "%Y-%m-%dT%H:%M:%SZ") state_key = f"{cluster}-{upgrade_at}" # if this is the first iteration in which 'now' had passed # the upgrade at date time, we send a notification if upgrade_at_obj < now: if state.exists(state_key): # already notified continue logging.info(["cluster_upgrade", cluster]) if not dry_run: state.add(state_key) usergroup = f"{cluster}-cluster" usergroup_id = slack.get_usergroup_id(usergroup) slack.chat_post_message( f"Heads up <!subteam^{usergroup_id}>! " + f"cluster `{cluster}` is currently " + f"being upgraded to version `{version}`")
def reconcile(self, dry_run: bool, state: State): name_to_id_state = state.get_all("") page_provider = self.get_page_provider() # restore component ids from state for desired in self.components: desired.component_id = name_to_id_state.get(desired.name) # delete id_to_name_state = {v: k for k, v in name_to_id_state.items()} desired_component_names = [c.name for c in self.components] for current_id in page_provider.component_ids(): # if the component is known to the state management and if it is # not known to the desired state, it was once managed by this # integration but was delete from app-interface -> delete from page name_for_current_component = id_to_name_state.get(current_id) if (name_for_current_component and name_for_current_component not in desired_component_names): LOG.info(f"delete component {name_for_current_component} " f"from page {self.name}") page_provider.delete_component(dry_run, current_id) if not dry_run: state.rm(name_for_current_component) # create and update for desired in self.components: component_id = page_provider.apply_component(dry_run, desired) if component_id and desired.component_id != component_id: self._bind_component(dry_run, desired, component_id, state)
def test_ls_when_integration_is_empty_string(accounts, s3_client, mocker): s3_client.create_bucket(Bucket='some-bucket') s3_client.put_object(Bucket='some-bucket', Key='state/integration-name-1/some-file-1', Body='test') s3_client.put_object(Bucket='some-bucket', Key='state/integration-name-2/some-file-2', Body='test') s3_client.put_object(Bucket='some-bucket', Key='state/integration-name-3/nested/some-file-2', Body='test') mock_aws_api = mocker.patch('reconcile.utils.state.AWSApi', autospec=True) mock_aws_api.return_value \ .get_session.return_value \ .client.return_value = s3_client state = State('', accounts) keys = state.ls() expected = [ '/integration-name-1/some-file-1', '/integration-name-2/some-file-2', '/integration-name-3/nested/some-file-2', ] assert keys == expected
def run(dry_run): settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() users = queries.get_users() state = State( integration=QONTRACT_INTEGRATION, accounts=accounts, settings=settings ) smtp_client = SmtpClient(settings=settings) mails = smtp_client.get_mails( criteria='SUBJECT "Sentry Access Request"', folder='[Gmail]/Sent Mail' ) user_names = get_sentry_users_from_mails(mails) if not dry_run: slack = init_slack_workspace(QONTRACT_INTEGRATION, init_usergroups=False) for user_name in user_names: guesses = guess_user(user_name, users) if not guesses: logging.debug(f'no users guessed for {user_name}') continue slack_username = \ guesses[0].get('slack_username') or guesses[0]['org_username'] if state.exists(slack_username): continue logging.info(['help_user', slack_username]) if not dry_run: state.add(slack_username) slack.chat_post_message( f'yo <@{slack_username}>! it appears that you have ' + 'requested access to a project in Sentry. ' + 'access is managed automatically via app-interface. ' 'checkout https://url.corp.redhat.com/sentry-help')
def realize( inventory: LabelInventory, state: State, oc_map: OC_Map, dry_run: bool, thread_pool_size: int, ) -> None: """ Apply the changes in the state store and on the namespaces """ for cluster, namespace, types in inventory: if inventory.errors(cluster, namespace): continue upd_managed = types.get(UPDATED_MANAGED, []) if upd_managed: key = state_key(cluster, namespace) _LOG.debug(f"Updating state store: {key}: {upd_managed}") if not dry_run: state.add(key, upd_managed, force=True) # Potential exceptions will get raised up threaded.run( label, inventory, thread_pool_size, oc_map=oc_map, dry_run=dry_run, inventory=inventory, )
def test_ls_when_that_are_more_than_1000_keys(accounts, s3_client, mocker): s3_client.create_bucket(Bucket='some-bucket') expected = [] # Putting more than 1000 keys for i in range(0, 1010): key = f'/some-file-{i}' expected.append(key) s3_client.put_object(Bucket='some-bucket', Key=f'state/integration{key}', Body=f'{i}') # S3 response is sorted expected.sort() mock_aws_api = mocker.patch('reconcile.utils.state.AWSApi', autospec=True) mock_aws_api.return_value \ .get_session.return_value \ .client.return_value = s3_client state = State('integration', accounts) keys = state.ls() assert keys == expected
def run(dry_run): accounts = queries.get_state_aws_accounts(reset_passwords=True) settings = queries.get_app_interface_settings() state = State(integration=QONTRACT_INTEGRATION, accounts=accounts, settings=settings) for a in accounts: aws_api = None account_name = a['name'] reset_passwords = a.get('resetPasswords') if not reset_passwords: continue for r in reset_passwords: user_name = r['user']['org_username'] request_id = r['requestId'] state_key = f"{account_name}/{user_name}/{request_id}" if state.exists(state_key): continue logging.info(['reset_password', account_name, user_name]) if dry_run: continue if aws_api is None: aws_api = AWSApi(1, [a], settings=settings) aws_api.reset_password(account_name, user_name) aws_api.reset_mfa(account_name, user_name) state.add(state_key)
def test_ls_when_integration_is_empty_string(accounts, s3_client, mocker): s3_client.create_bucket(Bucket="some-bucket") s3_client.put_object(Bucket="some-bucket", Key="state/integration-name-1/some-file-1", Body="test") s3_client.put_object(Bucket="some-bucket", Key="state/integration-name-2/some-file-2", Body="test") s3_client.put_object( Bucket="some-bucket", Key="state/integration-name-3/nested/some-file-2", Body="test", ) mock_aws_api = mocker.patch("reconcile.utils.state.AWSApi", autospec=True) mock_aws_api.return_value.get_session.return_value.client.return_value = s3_client state = State("", accounts) keys = state.ls() expected = [ "/integration-name-1/some-file-1", "/integration-name-2/some-file-2", "/integration-name-3/nested/some-file-2", ] assert keys == expected
def _bind_component(self, dry_run: bool, component: StatusComponent, component_id: str, state: State) -> None: LOG.info(f"bind component {component.name} to ID {component_id} " f"on page {self.name}") if not dry_run: state.add(component.name, component_id, force=True) component.component_id = component_id
def get_version_history(dry_run, upgrade_policies, ocm_map): """Get a summary of versions history per OCM instance Args: dry_run (bool): save updated history to remote state upgrade_policies (list): query results of clusters upgrade policies ocm_map (OCMMap): OCM clients per OCM instance Returns: dict: version history per OCM instance """ settings = queries.get_app_interface_settings() accounts = queries.get_state_aws_accounts() state = State( integration=QONTRACT_INTEGRATION, accounts=accounts, settings=settings ) results = {} # we keep a remote state per OCM instance for ocm_name in ocm_map.instances(): history = state.get(ocm_name, {}) update_history(history, upgrade_policies) results[ocm_name] = history if not dry_run: state.add(ocm_name, history, force=True) return results
def test_exists_for_missing_key(accounts, s3_client, mocker): s3_client.create_bucket(Bucket="some-bucket") mock_aws_api = mocker.patch("reconcile.utils.state.AWSApi", autospec=True) mock_aws_api.return_value.get_session.return_value.client.return_value = s3_client state = State("integration-name", accounts) assert not state.exists("some-key")
def run(dry_run, thread_pool_size=10, internal=None, use_jump_host=True, defer=None): settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() clusters = [c for c in queries.get_clusters(minimal=True) if c.get('ocm')] oc_map = OC_Map(clusters=clusters, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size) defer(oc_map.cleanup) state = State(integration=QONTRACT_INTEGRATION, accounts=accounts, settings=settings) if not dry_run: slack = init_slack_workspace(QONTRACT_INTEGRATION) now = datetime.utcnow() for cluster in oc_map.clusters(include_errors=True): oc = oc_map.get(cluster) if not oc: logging.log(level=oc.log_level, msg=oc.message) continue upgrade_config = oc.get(namespace='openshift-managed-upgrade-operator', kind='UpgradeConfig', allow_not_found=True)['items'] if not upgrade_config: logging.debug(f'[{cluster}] UpgradeConfig not found.') continue [upgrade_config] = upgrade_config upgrade_spec = upgrade_config['spec'] upgrade_at = upgrade_spec['upgradeAt'] version = upgrade_spec['desired']['version'] upgrade_at_obj = datetime.strptime(upgrade_at, '%Y-%m-%dT%H:%M:%SZ') state_key = f'{cluster}-{upgrade_at}' # if this is the first iteration in which 'now' had passed # the upgrade at date time, we send a notification if upgrade_at_obj < now: if state.exists(state_key): # already notified continue logging.info(['cluster_upgrade', cluster]) if not dry_run: state.add(state_key) usergroup = f'{cluster}-cluster' usergroup_id = slack.get_usergroup_id(usergroup) slack.chat_post_message( f'Heads up <!subteam^{usergroup_id}>! ' + f'cluster `{cluster}` is currently ' + f'being upgraded to version `{version}`')
def test_exists_for_forbidden(accounts, s3_client, mocker): forbidden_error = ClientError({"Error": {"Code": "403"}}, None) mock_aws_api = mocker.patch("reconcile.utils.state.AWSApi", autospec=True) mock_aws_api.return_value.get_session.return_value.client.return_value.head_object.side_effect = ( forbidden_error) state = State("integration-name", accounts) with pytest.raises(StateInaccessibleException, match=r".*403.*"): state.exists("some-key")
def test_ls_when_state_is_empty(accounts, s3_client, mocker): s3_client.create_bucket(Bucket="some-bucket") mock_aws_api = mocker.patch("reconcile.utils.state.AWSApi", autospec=True) mock_aws_api.return_value.get_session.return_value.client.return_value = s3_client state = State("integration-name", accounts) keys = state.ls() assert keys == []
def ls(ctx, integration): settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() state = State(integration, accounts, settings=settings) keys = state.ls() # if 'integration' is defined, the 0th token is empty table_content = [ {'integration': k.split('/')[0] or integration, 'key': '/'.join(k.split('/')[1:])} for k in keys] print_output('table', table_content, ['integration', 'key'])
def ls(ctx, integration): settings = queries.get_app_interface_settings() accounts = queries.get_state_aws_accounts() state = State(integration, accounts, settings=settings) keys = state.ls() # if integration in not defined the 2th token will be the integration name key_index = 1 if integration else 2 table_content = [ {'integration': integration or k.split('/')[1], 'key': '/'.join(k.split('/')[key_index:])} for k in keys] print_output({'output': 'table', 'sort': False}, table_content, ['integration', 'key'])
def run(dry_run): settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() smtp_client = SmtpClient(settings=settings) state = State(integration=QONTRACT_INTEGRATION, accounts=accounts, settings=settings) credentials_requests = queries.get_credentials_requests() # validate no 2 requests have the same name credentials_requests_names = \ set([r['name'] for r in credentials_requests]) if len(credentials_requests) != len(credentials_requests_names): logging.error('request names must be unique.') sys.exit(1) error = False credentials_requests_to_send = \ [r for r in credentials_requests if not state.exists(r['name'])] for credentials_request_to_send in credentials_requests_to_send: user = credentials_request_to_send['user'] org_username = user['org_username'] public_gpg_key = user.get('public_gpg_key') credentials_name = credentials_request_to_send['credentials'] if not public_gpg_key: error = True logging.error( f"user {org_username} does not have a public gpg key") continue logging.info(['send_credentials', org_username, credentials_name]) if not dry_run: request_name = credentials_request_to_send['name'] names = [org_username] subject = request_name ecrypted_credentials = get_ecrypted_credentials( credentials_name, user, settings, smtp_client) if not ecrypted_credentials: error = True logging.error( f"could not get encrypted credentials {credentials_name}") continue body = MESSAGE_TEMPLATE.format(request_name, credentials_name, ecrypted_credentials) smtp_client.send_mail(names, subject, body) state.add(request_name) if error: sys.exit(1)
def test_exists_for_existing_key(accounts, s3_client, mocker): key = "some-key" s3_client.create_bucket(Bucket="some-bucket") s3_client.put_object(Bucket="some-bucket", Key=f"state/integration-name/{key}", Body="test") mock_aws_api = mocker.patch("reconcile.utils.state.AWSApi", autospec=True) mock_aws_api.return_value.get_session.return_value.client.return_value = s3_client state = State("integration-name", accounts) assert state.exists(key)
def run(dry_run): settings = queries.get_app_interface_settings() accounts = queries.get_state_aws_accounts() smtp_client = SmtpClient(settings=settings) state = State(integration=QONTRACT_INTEGRATION, accounts=accounts, settings=settings) credentials_requests = queries.get_credentials_requests() # validate no 2 requests have the same name credentials_requests_names = {r["name"] for r in credentials_requests} if len(credentials_requests) != len(credentials_requests_names): logging.error("request names must be unique.") sys.exit(1) error = False credentials_requests_to_send = [ r for r in credentials_requests if not state.exists(r["name"]) ] for credentials_request_to_send in credentials_requests_to_send: try: user = credentials_request_to_send["user"] credentials_name = credentials_request_to_send["credentials"] org_username = user["org_username"] logging.info(["send_credentials", org_username, credentials_name]) request_name = credentials_request_to_send["name"] names = [org_username] subject = request_name encrypted_credentials = get_encrypted_credentials( credentials_name, user, settings) if not dry_run: body = MESSAGE_TEMPLATE.format(request_name, credentials_name, encrypted_credentials) smtp_client.send_mail(names, subject, body) state.add(request_name) except KeyError: logging.exception( f"Bad user details for {org_username} - {credentials_name}") error = True except CalledProcessError as e: logging.exception(f"Failed to handle GPG key for {org_username} " f"({credentials_name}): {e.stdout}") error = True if error: sys.exit(1)
def run(dry_run): jira_boards = [j for j in queries.get_jira_boards() if j.get('slack')] accounts = queries.get_state_aws_accounts() settings = queries.get_app_interface_settings() state = State( integration=QONTRACT_INTEGRATION, accounts=accounts, settings=settings ) for index, jira_board in enumerate(jira_boards): if not is_in_shard_round_robin(jira_board['name'], index): continue jira, current_state = fetch_current_state(jira_board, settings) if not current_state: logging.warning( 'not acting on empty Jira boards. ' + 'please create a ticket to get started.' ) continue previous_state = fetch_previous_state(state, jira.project) if previous_state: diffs = calculate_diff(jira.server, current_state, previous_state) act(dry_run, jira_board, diffs) if not dry_run: write_state(state, jira.project, current_state)
def test_exists_for_missing_bucket(accounts, s3_client, mocker): # don't create a bucket unlink in all the other tests mock_aws_api = mocker.patch("reconcile.utils.state.AWSApi", autospec=True) mock_aws_api.return_value.get_session.return_value.client.return_value = s3_client with pytest.raises(StateInaccessibleException, match=r".*404.*"): State("integration-name", accounts)
def test_exists_for_existing_key(accounts, s3_client, mocker): key = "some-key" s3_client.create_bucket(Bucket='some-bucket') s3_client.put_object(Bucket='some-bucket', Key=f'state/integration-name/{key}', Body='test') mock_aws_api = mocker.patch('reconcile.utils.state.AWSApi', autospec=True) mock_aws_api.return_value \ .get_session.return_value \ .client.return_value = s3_client state = State('integration-name', accounts) assert state.exists(key)
def get_managed(inventory: LabelInventory, state: State) -> None: """ Fill the label inventory with the list of currently managed labels for each cluster & namespace. This information is retrieved from the state store provided in input """ keys = state.ls() # We could run threaded here: probably faster but more parallel requests. for cluster, ns_name, types in inventory: if types.get(DESIRED) is None: continue # cluster, ns_name = get_names_for_namespace(namespace) key = state_key(cluster, ns_name) if f"/{key}" not in keys: continue managed = state.get(key, []) inventory.set(cluster, ns_name, MANAGED, managed)
def update_component_status(self, dry_run: bool, component_name: str, component_status: str, state: State) -> None: component_id = state.get(component_name) if component_id: page_provider = self.get_page_provider() page_provider.update_component_status(dry_run, component_id, component_status) else: raise ValueError(f"component {component_name} unknown")
def run(dry_run, io_dir='throughput/', defer=None): jjb, additional_repo_urls = init_jjb() defer(lambda: jjb.cleanup()) accounts = queries.get_aws_accounts() state = State(integration=QONTRACT_INTEGRATION, accounts=accounts, settings=jjb.settings) if dry_run: validate_repos_and_admins(jjb, additional_repo_urls) jjb.generate(io_dir, 'desired') jjb.overwrite_configs(state) jjb.generate(io_dir, 'current') jjb.print_diffs(io_dir) else: jjb.update() configs = jjb.get_configs() for name, desired_config in configs.items(): state.add(name, value=desired_config, force=True)
def test_ls_returns_correct_file(accounts, s3_client, mocker): s3_client.create_bucket(Bucket="some-bucket") s3_client.put_object(Bucket="some-bucket", Key="state/integration-name/some-file-1", Body="test") # Creating some-file-2 to identify when two or more integrations have # similar names s3_client.put_object(Bucket="some-bucket", Key="state/integration-name-2/some-file-2", Body="test") mock_aws_api = mocker.patch("reconcile.utils.state.AWSApi", autospec=True) mock_aws_api.return_value.get_session.return_value.client.return_value = s3_client state = State("integration-name", accounts) keys = state.ls() expected = ["/some-file-1"] assert keys == expected
def test_ls_returns_correct_file(accounts, s3_client, mocker): s3_client.create_bucket(Bucket='some-bucket') s3_client.put_object(Bucket='some-bucket', Key='state/integration-name/some-file-1', Body='test') # Creating some-file-2 to identify when two or more integrations have # similar names s3_client.put_object(Bucket='some-bucket', Key='state/integration-name-2/some-file-2', Body='test') mock_aws_api = mocker.patch('reconcile.utils.state.AWSApi', autospec=True) mock_aws_api.return_value \ .get_session.return_value \ .client.return_value = s3_client state = State('integration-name', accounts) keys = state.ls() expected = ['/some-file-1'] assert keys == expected
def run(dry_run): settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() state = State(integration=QONTRACT_INTEGRATION, accounts=accounts, settings=settings) emails = queries.get_app_interface_emails() smtp_client = SmtpClient(settings=settings) # validate no 2 emails have the same name email_names = {e['name'] for e in emails} if len(emails) != len(email_names): logging.error('email names must be unique.') sys.exit(1) emails_to_send = [e for e in emails if not state.exists(e['name'])] for email in emails_to_send: logging.info(['send_email', email['name'], email['subject']]) if not dry_run: names = collect_to(email['to']) subject = email['subject'] body = email['body'] smtp_client.send_mail(names, subject, body) state.add(email['name'])
def run( dry_run: bool, thread_pool_size: int = 10, internal: Optional[bool] = None, use_jump_host: bool = True, defer=None, raise_errors=False, ): _LOG.debug("Collecting GQL data ...") namespaces = get_gql_namespaces_in_shard() inventory = LabelInventory() _LOG.debug("Initializing OC_Map ...") oc_map = get_oc_map(namespaces, internal, use_jump_host, thread_pool_size) defer(oc_map.cleanup) _LOG.debug("Collecting desired state ...") get_desired(inventory, oc_map, namespaces) settings = queries.get_app_interface_settings() accounts = queries.get_state_aws_accounts() state = State(integration=QONTRACT_INTEGRATION, accounts=accounts, settings=settings) _LOG.debug("Collecting managed state ...") get_managed(inventory, state) _LOG.debug("Collecting current state ...") get_current(inventory, oc_map, thread_pool_size) inventory.reconcile() realize(inventory, state, oc_map, dry_run, thread_pool_size) if inventory.has_any_error(): error_messages = [] for cluster, namespace, errs in inventory.iter_errors(): for err in errs: msg = f"{cluster}/{namespace}: {err}" _LOG.error(msg) error_messages.append(msg) if raise_errors: raise NamespaceLabelError("\n".join(error_messages)) sys.exit(1)