Ejemplo n.º 1
0
def get_version_history(dry_run, upgrade_policies, ocm_map):
    """Get a summary of versions history per OCM instance

    Args:
        dry_run (bool): save updated history to remote state
        upgrade_policies (list): query results of clusters upgrade policies
        ocm_map (OCMMap): OCM clients per OCM instance

    Returns:
        dict: version history per OCM instance
    """
    settings = queries.get_app_interface_settings()
    accounts = queries.get_state_aws_accounts()
    state = State(
        integration=QONTRACT_INTEGRATION,
        accounts=accounts,
        settings=settings
    )
    results = {}
    # we keep a remote state per OCM instance
    for ocm_name in ocm_map.instances():
        history = state.get(ocm_name, {})
        update_history(history, upgrade_policies)
        results[ocm_name] = history
        if not dry_run:
            state.add(ocm_name, history, force=True)

    return results
Ejemplo n.º 2
0
def run(dry_run,
        io_dir='throughput/',
        print_only=False,
        config_name=None,
        job_name=None,
        instance_name=None,
        defer=None):
    if not print_only and config_name is not None:
        raise Exception("--config-name must works with --print-only mode")
    jjb, additional_repo_urls = \
        init_jjb(instance_name, config_name, print_only)
    defer(lambda: jjb.cleanup())

    if print_only:
        jjb.print_jobs(job_name=job_name)
        if config_name is not None:
            jjb.generate(io_dir, 'printout')
        sys.exit(0)

    accounts = queries.get_aws_accounts()
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=jjb.settings)

    if dry_run:
        validate_repos_and_admins(jjb, additional_repo_urls)
        jjb.generate(io_dir, 'desired')
        jjb.overwrite_configs(state)
        jjb.generate(io_dir, 'current')
        jjb.print_diffs(io_dir, instance_name)
    else:
        jjb.update()
        configs = jjb.get_configs()
        for name, desired_config in configs.items():
            state.add(name, value=desired_config, force=True)
def run(dry_run):
    accounts = queries.get_state_aws_accounts(reset_passwords=True)
    settings = queries.get_app_interface_settings()
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=settings)

    for a in accounts:
        aws_api = None
        account_name = a['name']
        reset_passwords = a.get('resetPasswords')
        if not reset_passwords:
            continue
        for r in reset_passwords:
            user_name = r['user']['org_username']
            request_id = r['requestId']
            state_key = f"{account_name}/{user_name}/{request_id}"
            if state.exists(state_key):
                continue

            logging.info(['reset_password', account_name, user_name])
            if dry_run:
                continue

            if aws_api is None:
                aws_api = AWSApi(1, [a], settings=settings)

            aws_api.reset_password(account_name, user_name)
            aws_api.reset_mfa(account_name, user_name)
            state.add(state_key)
Ejemplo n.º 4
0
def run(dry_run):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_aws_accounts()
    users = queries.get_users()
    state = State(
        integration=QONTRACT_INTEGRATION,
        accounts=accounts,
        settings=settings
    )
    smtp_client = SmtpClient(settings=settings)
    mails = smtp_client.get_mails(
        criteria='SUBJECT "Sentry Access Request"',
        folder='[Gmail]/Sent Mail'
    )
    user_names = get_sentry_users_from_mails(mails)
    if not dry_run:
        slack = init_slack_workspace(QONTRACT_INTEGRATION,
                                     init_usergroups=False)
    for user_name in user_names:
        guesses = guess_user(user_name, users)
        if not guesses:
            logging.debug(f'no users guessed for {user_name}')
            continue
        slack_username = \
            guesses[0].get('slack_username') or guesses[0]['org_username']
        if state.exists(slack_username):
            continue
        logging.info(['help_user', slack_username])
        if not dry_run:
            state.add(slack_username)
            slack.chat_post_message(
                f'yo <@{slack_username}>! it appears that you have ' +
                'requested access to a project in Sentry. ' +
                'access is managed automatically via app-interface. '
                'checkout https://url.corp.redhat.com/sentry-help')
def realize(
    inventory: LabelInventory,
    state: State,
    oc_map: OC_Map,
    dry_run: bool,
    thread_pool_size: int,
) -> None:
    """
    Apply the changes in the state store and on the namespaces
    """
    for cluster, namespace, types in inventory:
        if inventory.errors(cluster, namespace):
            continue
        upd_managed = types.get(UPDATED_MANAGED, [])
        if upd_managed:
            key = state_key(cluster, namespace)
            _LOG.debug(f"Updating state store: {key}: {upd_managed}")
            if not dry_run:
                state.add(key, upd_managed, force=True)

    # Potential exceptions will get raised up
    threaded.run(
        label,
        inventory,
        thread_pool_size,
        oc_map=oc_map,
        dry_run=dry_run,
        inventory=inventory,
    )
Ejemplo n.º 6
0
 def _bind_component(self, dry_run: bool, component: StatusComponent,
                     component_id: str, state: State) -> None:
     LOG.info(f"bind component {component.name} to ID {component_id} "
              f"on page {self.name}")
     if not dry_run:
         state.add(component.name, component_id, force=True)
         component.component_id = component_id
Ejemplo n.º 7
0
def run(dry_run,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        defer=None):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_state_aws_accounts()
    clusters = [c for c in queries.get_clusters(minimal=True) if c.get("ocm")]
    oc_map = OC_Map(
        clusters=clusters,
        integration=QONTRACT_INTEGRATION,
        settings=settings,
        internal=internal,
        use_jump_host=use_jump_host,
        thread_pool_size=thread_pool_size,
    )
    defer(oc_map.cleanup)
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=settings)

    if not dry_run:
        slack = slackapi_from_queries(QONTRACT_INTEGRATION)

    now = datetime.utcnow()
    for cluster in oc_map.clusters(include_errors=True):
        oc = oc_map.get(cluster)
        if not oc:
            logging.log(level=oc.log_level, msg=oc.message)
            continue
        upgrade_config = oc.get(
            namespace="openshift-managed-upgrade-operator",
            kind="UpgradeConfig",
            allow_not_found=True,
        )["items"]
        if not upgrade_config:
            logging.debug(f"[{cluster}] UpgradeConfig not found.")
            continue
        [upgrade_config] = upgrade_config

        upgrade_spec = upgrade_config["spec"]
        upgrade_at = upgrade_spec["upgradeAt"]
        version = upgrade_spec["desired"]["version"]
        upgrade_at_obj = datetime.strptime(upgrade_at, "%Y-%m-%dT%H:%M:%SZ")
        state_key = f"{cluster}-{upgrade_at}"
        # if this is the first iteration in which 'now' had passed
        # the upgrade at date time, we send a notification
        if upgrade_at_obj < now:
            if state.exists(state_key):
                # already notified
                continue
            logging.info(["cluster_upgrade", cluster])
            if not dry_run:
                state.add(state_key)
                usergroup = f"{cluster}-cluster"
                usergroup_id = slack.get_usergroup_id(usergroup)
                slack.chat_post_message(
                    f"Heads up <!subteam^{usergroup_id}>! " +
                    f"cluster `{cluster}` is currently " +
                    f"being upgraded to version `{version}`")
Ejemplo n.º 8
0
def run(dry_run,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        defer=None):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_aws_accounts()
    clusters = [c for c in queries.get_clusters(minimal=True) if c.get('ocm')]
    oc_map = OC_Map(clusters=clusters,
                    integration=QONTRACT_INTEGRATION,
                    settings=settings,
                    internal=internal,
                    use_jump_host=use_jump_host,
                    thread_pool_size=thread_pool_size)
    defer(oc_map.cleanup)
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=settings)

    if not dry_run:
        slack = init_slack_workspace(QONTRACT_INTEGRATION)

    now = datetime.utcnow()
    for cluster in oc_map.clusters(include_errors=True):
        oc = oc_map.get(cluster)
        if not oc:
            logging.log(level=oc.log_level, msg=oc.message)
            continue
        upgrade_config = oc.get(namespace='openshift-managed-upgrade-operator',
                                kind='UpgradeConfig',
                                allow_not_found=True)['items']
        if not upgrade_config:
            logging.debug(f'[{cluster}] UpgradeConfig not found.')
            continue
        [upgrade_config] = upgrade_config

        upgrade_spec = upgrade_config['spec']
        upgrade_at = upgrade_spec['upgradeAt']
        version = upgrade_spec['desired']['version']
        upgrade_at_obj = datetime.strptime(upgrade_at, '%Y-%m-%dT%H:%M:%SZ')
        state_key = f'{cluster}-{upgrade_at}'
        # if this is the first iteration in which 'now' had passed
        # the upgrade at date time, we send a notification
        if upgrade_at_obj < now:
            if state.exists(state_key):
                # already notified
                continue
            logging.info(['cluster_upgrade', cluster])
            if not dry_run:
                state.add(state_key)
                usergroup = f'{cluster}-cluster'
                usergroup_id = slack.get_usergroup_id(usergroup)
                slack.chat_post_message(
                    f'Heads up <!subteam^{usergroup_id}>! ' +
                    f'cluster `{cluster}` is currently ' +
                    f'being upgraded to version `{version}`')
Ejemplo n.º 9
0
def run(dry_run):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_aws_accounts()
    smtp_client = SmtpClient(settings=settings)
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=settings)
    credentials_requests = queries.get_credentials_requests()

    # validate no 2 requests have the same name
    credentials_requests_names = \
        set([r['name'] for r in credentials_requests])
    if len(credentials_requests) != len(credentials_requests_names):
        logging.error('request names must be unique.')
        sys.exit(1)

    error = False

    credentials_requests_to_send = \
        [r for r in credentials_requests if not state.exists(r['name'])]
    for credentials_request_to_send in credentials_requests_to_send:
        user = credentials_request_to_send['user']
        org_username = user['org_username']
        public_gpg_key = user.get('public_gpg_key')
        credentials_name = credentials_request_to_send['credentials']
        if not public_gpg_key:
            error = True
            logging.error(
                f"user {org_username} does not have a public gpg key")
            continue
        logging.info(['send_credentials', org_username, credentials_name])

        if not dry_run:
            request_name = credentials_request_to_send['name']
            names = [org_username]
            subject = request_name
            ecrypted_credentials = get_ecrypted_credentials(
                credentials_name, user, settings, smtp_client)
            if not ecrypted_credentials:
                error = True
                logging.error(
                    f"could not get encrypted credentials {credentials_name}")
                continue
            body = MESSAGE_TEMPLATE.format(request_name, credentials_name,
                                           ecrypted_credentials)
            smtp_client.send_mail(names, subject, body)
            state.add(request_name)

    if error:
        sys.exit(1)
Ejemplo n.º 10
0
def run(dry_run):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_state_aws_accounts()
    smtp_client = SmtpClient(settings=settings)
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=settings)
    credentials_requests = queries.get_credentials_requests()

    # validate no 2 requests have the same name
    credentials_requests_names = {r["name"] for r in credentials_requests}
    if len(credentials_requests) != len(credentials_requests_names):
        logging.error("request names must be unique.")
        sys.exit(1)

    error = False

    credentials_requests_to_send = [
        r for r in credentials_requests if not state.exists(r["name"])
    ]
    for credentials_request_to_send in credentials_requests_to_send:
        try:
            user = credentials_request_to_send["user"]
            credentials_name = credentials_request_to_send["credentials"]
            org_username = user["org_username"]
            logging.info(["send_credentials", org_username, credentials_name])

            request_name = credentials_request_to_send["name"]
            names = [org_username]
            subject = request_name
            encrypted_credentials = get_encrypted_credentials(
                credentials_name, user, settings)
            if not dry_run:
                body = MESSAGE_TEMPLATE.format(request_name, credentials_name,
                                               encrypted_credentials)
                smtp_client.send_mail(names, subject, body)
                state.add(request_name)
        except KeyError:
            logging.exception(
                f"Bad user details for {org_username} - {credentials_name}")
            error = True
        except CalledProcessError as e:
            logging.exception(f"Failed to handle GPG key for {org_username} "
                              f"({credentials_name}): {e.stdout}")
            error = True

    if error:
        sys.exit(1)
def run(dry_run, io_dir='throughput/', defer=None):
    jjb, additional_repo_urls = init_jjb()
    defer(lambda: jjb.cleanup())

    accounts = queries.get_aws_accounts()
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=jjb.settings)

    if dry_run:
        validate_repos_and_admins(jjb, additional_repo_urls)
        jjb.generate(io_dir, 'desired')
        jjb.overwrite_configs(state)
        jjb.generate(io_dir, 'current')
        jjb.print_diffs(io_dir)
    else:
        jjb.update()
        configs = jjb.get_configs()
        for name, desired_config in configs.items():
            state.add(name, value=desired_config, force=True)
Ejemplo n.º 12
0
def run(dry_run):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_aws_accounts()
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=settings)
    emails = queries.get_app_interface_emails()
    smtp_client = SmtpClient(settings=settings)
    # validate no 2 emails have the same name
    email_names = {e['name'] for e in emails}
    if len(emails) != len(email_names):
        logging.error('email names must be unique.')
        sys.exit(1)

    emails_to_send = [e for e in emails if not state.exists(e['name'])]
    for email in emails_to_send:
        logging.info(['send_email', email['name'], email['subject']])

        if not dry_run:
            names = collect_to(email['to'])
            subject = email['subject']
            body = email['body']
            smtp_client.send_mail(names, subject, body)
            state.add(email['name'])
Ejemplo n.º 13
0
class SaasHerder():
    """Wrapper around SaaS deployment actions."""
    def __init__(self,
                 saas_files,
                 thread_pool_size,
                 gitlab,
                 integration,
                 integration_version,
                 settings,
                 jenkins_map=None,
                 accounts=None,
                 validate=False):
        self.saas_files = saas_files
        if validate:
            self._validate_saas_files()
            if not self.valid:
                return
        self.thread_pool_size = thread_pool_size
        self.gitlab = gitlab
        self.integration = integration
        self.integration_version = integration_version
        self.settings = settings
        self.secret_reader = SecretReader(settings=settings)
        self.namespaces = self._collect_namespaces()
        self.jenkins_map = jenkins_map
        # each namespace is in fact a target,
        # so we can use it to calculate.
        divisor = len(self.namespaces) or 1
        self.available_thread_pool_size = \
            threaded.estimate_available_thread_pool_size(
                self.thread_pool_size,
                divisor)
        # if called by a single saas file,it may
        # specify that it manages resources exclusively.
        self.take_over = self._get_saas_file_attribute('takeover')
        self.compare = self._get_saas_file_attribute('compare')
        self.publish_job_logs = self._get_saas_file_attribute('publishJobLogs')
        if accounts:
            self._initiate_state(accounts)

    def _get_saas_file_attribute(self, attribute):
        return len(self.saas_files) == 1 and self.saas_files[0].get(attribute)

    def _validate_saas_files(self):
        self.valid = True
        saas_file_name_path_map = {}
        saas_file_promotion_publish_channels = []
        for saas_file in self.saas_files:
            saas_file_name = saas_file['name']
            saas_file_path = saas_file['path']
            saas_file_name_path_map.setdefault(saas_file_name, [])
            saas_file_name_path_map[saas_file_name].append(saas_file_path)

            saas_file_owners = [
                u['org_username'] for r in saas_file['roles']
                for u in r['users']
            ]
            if not saas_file_owners:
                msg = 'saas file {} has no owners: {}'
                logging.error(msg.format(saas_file_name, saas_file_path))
                self.valid = False

            for resource_template in saas_file['resourceTemplates']:
                resource_template_name = resource_template['name']
                for target in resource_template['targets']:
                    # promotion publish channels
                    promotion = target.get('promotion')
                    if promotion:
                        publish = promotion.get('publish')
                        if publish:
                            saas_file_promotion_publish_channels.extend(
                                publish)
                    # validate target parameters
                    target_parameters = target['parameters']
                    if not target_parameters:
                        continue
                    target_parameters = json.loads(target_parameters)
                    target_namespace = target['namespace']
                    namespace_name = target_namespace['name']
                    cluster_name = target_namespace['cluster']['name']
                    environment = target_namespace['environment']
                    environment_name = environment['name']
                    environment_parameters = environment['parameters']
                    if not environment_parameters:
                        continue
                    environment_parameters = \
                        json.loads(environment_parameters)
                    msg = \
                        f'[{saas_file_name}/{resource_template_name}] ' + \
                        'parameter found in target ' + \
                        f'{cluster_name}/{namespace_name} ' + \
                        f'should be reused from env {environment_name}'
                    for t_key, t_value in target_parameters.items():
                        if not isinstance(t_value, str):
                            continue
                        for e_key, e_value in environment_parameters.items():
                            if not isinstance(e_value, str):
                                continue
                            if '.' not in e_value:
                                continue
                            if e_value not in t_value:
                                continue
                            if t_key == e_key and t_value == e_value:
                                details = \
                                    f'consider removing {t_key}'
                            else:
                                replacement = t_value.replace(
                                    e_value, '${' + e_key + '}')
                                details = \
                                    f'target: \"{t_key}: {t_value}\". ' + \
                                    f'env: \"{e_key}: {e_value}\". ' + \
                                    f'consider \"{t_key}: {replacement}\"'
                            logging.warning(f'{msg}: {details}')

        # saas file name duplicates
        duplicates = {
            saas_file_name: saas_file_paths
            for saas_file_name, saas_file_paths in
            saas_file_name_path_map.items() if len(saas_file_paths) > 1
        }
        if duplicates:
            self.valid = False
            msg = 'saas file name {} is not unique: {}'
            for saas_file_name, saas_file_paths in duplicates.items():
                logging.error(msg.format(saas_file_name, saas_file_paths))

        # promotion publish channel duplicates
        duplicates = [
            p for p in saas_file_promotion_publish_channels
            if saas_file_promotion_publish_channels.count(p) > 1
        ]
        if duplicates:
            self.valid = False
            msg = 'saas file promotion publish channel is not unique: {}'
            for duplicate in duplicates:
                logging.error(msg.format(duplicate))

    def _collect_namespaces(self):
        # namespaces may appear more then once in the result
        namespaces = []
        for saas_file in self.saas_files:
            managed_resource_types = saas_file['managedResourceTypes']
            resource_templates = saas_file['resourceTemplates']
            for rt in resource_templates:
                targets = rt['targets']
                for target in targets:
                    namespace = target['namespace']
                    if target.get('disable'):
                        logging.debug(
                            f"[{saas_file['name']}/{rt['name']}] target " +
                            f"{namespace['cluster']['name']}/" +
                            f"{namespace['name']} is disabled.")
                        continue
                    # managedResourceTypes is defined per saas_file
                    # add it to each namespace in the current saas_file
                    namespace['managedResourceTypes'] = managed_resource_types
                    namespaces.append(namespace)
        return namespaces

    def _initiate_state(self, accounts):
        self.state = State(integration=self.integration,
                           accounts=accounts,
                           settings=self.settings)

    @staticmethod
    def _collect_parameters(container):
        parameters = container.get('parameters') or {}
        if isinstance(parameters, str):
            parameters = json.loads(parameters)
        # adjust Python's True/False
        for k, v in parameters.items():
            if v is True:
                parameters[k] = 'true'
            elif v is False:
                parameters[k] = 'false'
            elif any([isinstance(v, t) for t in [dict, list, tuple]]):
                parameters[k] = json.dumps(v)
        return parameters

    @staticmethod
    def _get_file_contents_github(repo, path, commit_sha):
        try:
            f = repo.get_contents(path, commit_sha)
            return f.decoded_content
        except GithubException as e:
            # slightly copied with love from
            # https://github.com/PyGithub/PyGithub/issues/661
            errors = e.data['errors']
            # example errors dict that we are looking for
            # {
            #    'message': '<text>',
            #    'errors': [{
            #                  'resource': 'Blob',
            #                  'field': 'data',
            #                  'code': 'too_large'
            #               }],
            #    'documentation_url': '<url>'
            # }
            for error in errors:
                if error['code'] == 'too_large':
                    # get large files
                    tree = repo.get_git_tree(commit_sha, recursive='/'
                                             in path).tree
                    for x in tree:
                        if x.path != path.lstrip('/'):
                            continue
                        blob = repo.get_git_blob(x.sha)
                        return base64.b64decode(blob.content).decode("utf8")

            raise e

    @retry()
    def _get_file_contents(self, options):
        url = options['url']
        path = options['path']
        ref = options['ref']
        github = options['github']
        html_url = f"{url}/blob/{ref}{path}"
        commit_sha = self._get_commit_sha(options)
        content = None
        if 'github' in url:
            repo_name = url.rstrip("/").replace('https://github.com/', '')
            repo = github.get_repo(repo_name)
            content = self._get_file_contents_github(repo, path, commit_sha)
        elif 'gitlab' in url:
            if not self.gitlab:
                raise Exception('gitlab is not initialized')
            project = self.gitlab.get_project(url)
            f = project.files.get(file_path=path.lstrip('/'), ref=commit_sha)
            content = f.decode()

        return yaml.safe_load(content), html_url, commit_sha

    @retry()
    def _get_directory_contents(self, options):
        url = options['url']
        path = options['path']
        ref = options['ref']
        github = options['github']
        html_url = f"{url}/tree/{ref}{path}"
        commit_sha = self._get_commit_sha(options)
        resources = []
        if 'github' in url:
            repo_name = url.rstrip("/").replace('https://github.com/', '')
            repo = github.get_repo(repo_name)
            for f in repo.get_contents(path, commit_sha):
                file_path = os.path.join(path, f.name)
                file_contents_decoded = \
                    self._get_file_contents_github(
                        repo, file_path, commit_sha)
                resource = yaml.safe_load(file_contents_decoded)
                resources.append(resource)
        elif 'gitlab' in url:
            if not self.gitlab:
                raise Exception('gitlab is not initialized')
            project = self.gitlab.get_project(url)
            for f in project.repository_tree(path=path.lstrip('/'),
                                             ref=commit_sha,
                                             all=True):
                file_contents = \
                    project.files.get(file_path=f['path'], ref=commit_sha)
                resource = yaml.safe_load(file_contents.decode())
                resources.append(resource)

        return resources, html_url, commit_sha

    @retry()
    def _get_commit_sha(self, options):
        url = options['url']
        ref = options['ref']
        github = options['github']
        hash_length = options.get('hash_length')
        commit_sha = ''
        if 'github' in url:
            repo_name = url.rstrip("/").replace('https://github.com/', '')
            repo = github.get_repo(repo_name)
            commit = repo.get_commit(sha=ref)
            commit_sha = commit.sha
        elif 'gitlab' in url:
            if not self.gitlab:
                raise Exception('gitlab is not initialized')
            project = self.gitlab.get_project(url)
            commits = project.commits.list(ref_name=ref)
            commit_sha = commits[0].id

        if hash_length:
            return commit_sha[:hash_length]

        return commit_sha

    @staticmethod
    def _get_cluster_and_namespace(target):
        cluster = target['namespace']['cluster']['name']
        namespace = target['namespace']['name']
        return cluster, namespace

    @staticmethod
    def _additional_resource_process(resources, html_url):
        for resource in resources:
            # add a definition annotation to each PrometheusRule rule
            if resource['kind'] == 'PrometheusRule':
                try:
                    groups = resource['spec']['groups']
                    for group in groups:
                        rules = group['rules']
                        for rule in rules:
                            annotations = rule.get('annotations')
                            if not annotations:
                                continue
                            rule['annotations']['html_url'] = html_url
                except Exception:
                    logging.warning('could not add html_url annotation to' +
                                    resource['name'])

    @staticmethod
    def _parameter_value_needed(parameter_name, consolidated_parameters,
                                template):
        """Is a parameter named in the template but unspecified?

        NOTE: This is currently "parameter *named* and absent" -- i.e. we
        don't care about `required: true`. This is for backward compatibility.

        :param parameter_name: The name (key) of the parameter.
        :param consolidated_parameters: Dict of parameters already specified/
                calculated.
        :param template: The template file in dict form.
        :return bool: True if the named parameter is named in the template,
                but not already present in consolidated_parameters.
        """
        if parameter_name in consolidated_parameters:
            return False
        for template_parameter in template.get("parameters", {}):
            if template_parameter["name"] == parameter_name:
                return True
        return False

    def _process_template(self, options):
        saas_file_name = options['saas_file_name']
        resource_template_name = options['resource_template_name']
        image_auth = options['image_auth']
        url = options['url']
        path = options['path']
        provider = options['provider']
        target = options['target']
        github = options['github']
        target_ref = target['ref']
        target_promotion = target.get('promotion') or {}

        resources = None
        html_url = None
        commit_sha = None

        if provider == 'openshift-template':
            hash_length = options['hash_length']
            parameters = options['parameters']
            environment = target['namespace']['environment']
            environment_parameters = self._collect_parameters(environment)
            target_parameters = self._collect_parameters(target)

            consolidated_parameters = {}
            consolidated_parameters.update(environment_parameters)
            consolidated_parameters.update(parameters)
            consolidated_parameters.update(target_parameters)

            for replace_key, replace_value in consolidated_parameters.items():
                if not isinstance(replace_value, str):
                    continue
                replace_pattern = '${' + replace_key + '}'
                for k, v in consolidated_parameters.items():
                    if not isinstance(v, str):
                        continue
                    if replace_pattern in v:
                        consolidated_parameters[k] = \
                            v.replace(replace_pattern, replace_value)

            get_file_contents_options = {
                'url': url,
                'path': path,
                'ref': target_ref,
                'github': github
            }

            try:
                template, html_url, commit_sha = \
                    self._get_file_contents(get_file_contents_options)
            except Exception as e:
                logging.error(f"[{url}/{path}:{target_ref}] " +
                              f"error fetching template: {str(e)}")
                return None, None, None

            # add IMAGE_TAG only if it is unspecified
            image_tag = consolidated_parameters.get('IMAGE_TAG')
            if not image_tag:
                sha_substring = commit_sha[:hash_length]
                # IMAGE_TAG takes one of two forms:
                # - If saas file attribute 'use_channel_in_image_tag' is true,
                #   it is {CHANNEL}-{SHA}
                # - Otherwise it is just {SHA}
                if self._get_saas_file_attribute("use_channel_in_image_tag"):
                    try:
                        channel = consolidated_parameters["CHANNEL"]
                    except KeyError:
                        logging.error(
                            f"[{saas_file_name}/{resource_template_name}] " +
                            f"{html_url}: CHANNEL is required when " +
                            "'use_channel_in_image_tag' is true.")
                        return None, None, None
                    image_tag = f"{channel}-{sha_substring}"
                else:
                    image_tag = sha_substring
                consolidated_parameters['IMAGE_TAG'] = image_tag

            # This relies on IMAGE_TAG already being calculated.
            need_repo_digest = self._parameter_value_needed(
                "REPO_DIGEST", consolidated_parameters, template)
            need_image_digest = self._parameter_value_needed(
                "IMAGE_DIGEST", consolidated_parameters, template)
            if need_repo_digest or need_image_digest:
                try:
                    logging.debug("Generating REPO_DIGEST.")
                    registry_image = consolidated_parameters["REGISTRY_IMG"]
                except KeyError as e:
                    logging.error(
                        f"[{saas_file_name}/{resource_template_name}] " +
                        f"{html_url}: error generating REPO_DIGEST. " +
                        "Is REGISTRY_IMG missing? " + f"{str(e)}")
                    return None, None, None
                try:
                    image_uri = f"{registry_image}:{image_tag}"
                    img = Image(image_uri, **image_auth)
                    if need_repo_digest:
                        consolidated_parameters["REPO_DIGEST"] = img.url_digest
                    if need_image_digest:
                        consolidated_parameters["IMAGE_DIGEST"] = img.digest
                except (rqexc.ConnectionError, rqexc.HTTPError) as e:
                    logging.error(
                        f"[{saas_file_name}/{resource_template_name}] " +
                        f"{html_url}: error generating REPO_DIGEST for " +
                        f"{image_uri}: {str(e)}")
                    return None, None, None

            oc = OC('server', 'token', local=True)
            try:
                resources = oc.process(template, consolidated_parameters)
            except StatusCodeError as e:
                logging.error(
                    f"[{saas_file_name}/{resource_template_name}] " +
                    f"{html_url}: error processing template: {str(e)}")

        elif provider == 'directory':
            get_directory_contents_options = {
                'url': url,
                'path': path,
                'ref': target_ref,
                'github': github
            }
            try:
                resources, html_url, commit_sha = \
                    self._get_directory_contents(
                        get_directory_contents_options)
            except Exception as e:
                logging.error(f"[{url}/{path}:{target_ref}] " +
                              f"error fetching directory: {str(e)}")
                return None, None, None

        else:
            logging.error(f"[{saas_file_name}/{resource_template_name}] " +
                          f"unknown provider: {provider}")

        target_promotion['commit_sha'] = commit_sha
        return resources, html_url, target_promotion

    @staticmethod
    def _collect_images(resource):
        images = set()
        # resources with pod templates
        try:
            template = resource["spec"]["template"]
            for c in template["spec"]["containers"]:
                images.add(c["image"])
        except KeyError:
            pass
        # init containers
        try:
            template = resource["spec"]["template"]
            for c in template["spec"]["initContainers"]:
                images.add(c["image"])
        except KeyError:
            pass
        # CronJob
        try:
            template = resource["spec"]["jobTemplate"]["spec"]["template"]
            for c in template["spec"]["containers"]:
                images.add(c["image"])
        except KeyError:
            pass
        # CatalogSource templates
        try:
            images.add(resource["spec"]["image"])
        except KeyError:
            pass

        return images

    @staticmethod
    def _check_image(image, image_patterns, image_auth, error_prefix):
        error = False
        if image_patterns and \
                not any(image.startswith(p) for p in image_patterns):
            error = True
            logging.error(
                f"{error_prefix} Image is not in imagePatterns: {image}")
        try:
            valid = Image(image, **image_auth)
            if not valid:
                error = True
                logging.error(f"{error_prefix} Image does not exist: {image}")
        except Exception as e:
            error = True
            logging.error(f"{error_prefix} Image is invalid: {image}. " +
                          f"details: {str(e)}")

        return error

    def _check_images(self, options):
        saas_file_name = options['saas_file_name']
        resource_template_name = options['resource_template_name']
        html_url = options['html_url']
        resources = options['resources']
        image_auth = options['image_auth']
        image_patterns = options['image_patterns']
        error_prefix = \
            f"[{saas_file_name}/{resource_template_name}] {html_url}:"

        images_list = threaded.run(self._collect_images, resources,
                                   self.available_thread_pool_size)
        images = {item for sublist in images_list for item in sublist}
        if not images:
            return False  # no errors
        errors = threaded.run(self._check_image,
                              images,
                              self.available_thread_pool_size,
                              image_patterns=image_patterns,
                              image_auth=image_auth,
                              error_prefix=error_prefix)
        error = True in errors
        return error

    def _initiate_github(self, saas_file):
        auth = saas_file.get('authentication') or {}
        auth_code = auth.get('code') or {}
        if auth_code:
            token = self.secret_reader.read(auth_code)
        else:
            # use the app-sre token by default
            default_org_name = 'app-sre'
            config = get_config(desired_org_name=default_org_name)
            token = config['github'][default_org_name]['token']

        base_url = os.environ.get('GITHUB_API', 'https://api.github.com')
        # This is a threaded world. Let's define a big
        # connections pool to live in that world
        # (this avoids the warning "Connection pool is
        # full, discarding connection: api.github.com")
        pool_size = 100
        return Github(token, base_url=base_url, pool_size=pool_size)

    def _initiate_image_auth(self, saas_file):
        """
        This function initiates a dict required for image authentication.
        This dict will be used as kwargs for sertoolbox's Image.
        The image authentication secret specified in the saas file must
        contain the 'user' and 'token' keys, and may optionally contain
        a 'url' key specifying the image registry url to be passed to check
        if an image should be checked using these credentials.
        The function returns the keys extracted from the secret in the
        structure expected by sretoolbox's Image:
        'user' --> 'username'
        'token' --> 'password'
        'url' --> 'auth_server' (optional)
        """
        auth = saas_file.get('authentication')
        if not auth:
            return {}
        auth_image_secret = auth.get('image')
        if not auth_image_secret:
            return {}

        creds = self.secret_reader.read_all(auth_image_secret)
        required_keys = ['user', 'token']
        ok = all(k in creds.keys() for k in required_keys)
        if not ok:
            logging.warning(
                "the specified image authentication secret " +
                f"found in path {auth_image_secret['path']} " +
                f"does not contain all required keys: {required_keys}")
            return {}

        image_auth = {'username': creds['user'], 'password': creds['token']}
        url = creds.get('url')
        if url:
            image_auth['auth_server']: url

        return image_auth

    def populate_desired_state(self, ri):
        results = threaded.run(self.init_populate_desired_state_specs,
                               self.saas_files, self.thread_pool_size)
        desired_state_specs = \
            [item for sublist in results for item in sublist]
        promotions = threaded.run(self.populate_desired_state_saas_file,
                                  desired_state_specs,
                                  self.thread_pool_size,
                                  ri=ri)
        self.promotions = promotions

    def init_populate_desired_state_specs(self, saas_file):
        specs = []
        saas_file_name = saas_file['name']
        github = self._initiate_github(saas_file)
        image_auth = self._initiate_image_auth(saas_file)
        instance_name = saas_file['instance']['name']
        managed_resource_types = saas_file['managedResourceTypes']
        image_patterns = saas_file['imagePatterns']
        resource_templates = saas_file['resourceTemplates']
        saas_file_parameters = self._collect_parameters(saas_file)
        # iterate over resource templates (multiple per saas_file)
        for rt in resource_templates:
            rt_name = rt['name']
            url = rt['url']
            path = rt['path']
            provider = rt.get('provider') or 'openshift-template'
            hash_length = rt.get('hash_length') or self.settings['hashLength']
            parameters = self._collect_parameters(rt)

            consolidated_parameters = {}
            consolidated_parameters.update(saas_file_parameters)
            consolidated_parameters.update(parameters)

            # iterate over targets (each target is a namespace)
            for target in rt['targets']:
                if target.get('disable'):
                    # a warning is logged during SaasHerder initiation
                    continue
                cluster, namespace = \
                    self._get_cluster_and_namespace(target)
                process_template_options = {
                    'saas_file_name': saas_file_name,
                    'resource_template_name': rt_name,
                    'image_auth': image_auth,
                    'url': url,
                    'path': path,
                    'provider': provider,
                    'hash_length': hash_length,
                    'target': target,
                    'parameters': consolidated_parameters,
                    'github': github
                }
                check_images_options_base = {
                    'saas_file_name': saas_file_name,
                    'resource_template_name': rt_name,
                    'image_auth': image_auth,
                    'image_patterns': image_patterns
                }
                spec = {
                    'saas_file_name': saas_file_name,
                    'cluster': cluster,
                    'namespace': namespace,
                    'managed_resource_types': managed_resource_types,
                    'process_template_options': process_template_options,
                    'check_images_options_base': check_images_options_base,
                    'instance_name': instance_name,
                    'upstream': target.get('upstream')
                }
                specs.append(spec)

        return specs

    def populate_desired_state_saas_file(self, spec, ri):
        saas_file_name = spec['saas_file_name']
        cluster = spec['cluster']
        namespace = spec['namespace']
        managed_resource_types = spec['managed_resource_types']
        process_template_options = spec['process_template_options']
        check_images_options_base = spec['check_images_options_base']
        instance_name = spec['instance_name']
        upstream = spec['upstream']

        resources, html_url, promotion = \
            self._process_template(process_template_options)
        if resources is None:
            ri.register_error()
            return
        # filter resources
        resources = [
            resource for resource in resources if isinstance(resource, dict)
            and resource['kind'] in managed_resource_types
        ]
        # additional processing of resources
        self._additional_resource_process(resources, html_url)
        # check images
        skip_check_images = upstream and self.jenkins_map and \
            self.jenkins_map[instance_name].is_job_running(upstream)
        if skip_check_images:
            logging.warning("skipping check_image since " +
                            f"upstream job {upstream} is running")
        else:
            check_images_options = {
                'html_url': html_url,
                'resources': resources
            }
            check_images_options.update(check_images_options_base)
            image_error = self._check_images(check_images_options)
            if image_error:
                ri.register_error()
                return
        # add desired resources
        for resource in resources:
            resource_kind = resource['kind']
            resource_name = resource['metadata']['name']
            oc_resource = OR(resource,
                             self.integration,
                             self.integration_version,
                             caller_name=saas_file_name,
                             error_details=html_url)
            ri.add_desired(cluster, namespace, resource_kind, resource_name,
                           oc_resource)

        return promotion

    def get_moving_commits_diff(self, dry_run):
        results = threaded.run(self.get_moving_commits_diff_saas_file,
                               self.saas_files,
                               self.thread_pool_size,
                               dry_run=dry_run)
        return [item for sublist in results for item in sublist]

    def get_moving_commits_diff_saas_file(self, saas_file, dry_run):
        saas_file_name = saas_file['name']
        instace_name = saas_file['instance']['name']
        github = self._initiate_github(saas_file)
        trigger_specs = []
        for rt in saas_file['resourceTemplates']:
            rt_name = rt['name']
            url = rt['url']
            for target in rt['targets']:
                # don't trigger if there is a linked upstream job
                if target.get('upstream'):
                    continue
                ref = target['ref']
                get_commit_sha_options = {
                    'url': url,
                    'ref': ref,
                    'github': github
                }
                desired_commit_sha = \
                    self._get_commit_sha(get_commit_sha_options)
                # don't trigger on refs which are commit shas
                if ref == desired_commit_sha:
                    continue
                namespace = target['namespace']
                cluster_name = namespace['cluster']['name']
                namespace_name = namespace['name']
                env_name = namespace['environment']['name']
                key = f"{saas_file_name}/{rt_name}/{cluster_name}/" + \
                    f"{namespace_name}/{env_name}/{ref}"
                current_commit_sha = self.state.get(key, None)
                # skip if there is no change in commit sha
                if current_commit_sha == desired_commit_sha:
                    continue
                # don't trigger if this is the first time
                # this target is being deployed.
                # that will be taken care of by
                # openshift-saas-deploy-trigger-configs
                if current_commit_sha is None:
                    # store the value to take over from now on
                    if not dry_run:
                        self.state.add(key, value=desired_commit_sha)
                    continue
                # we finally found something we want to trigger on!
                job_spec = {
                    'saas_file_name': saas_file_name,
                    'env_name': env_name,
                    'instance_name': instace_name,
                    'rt_name': rt_name,
                    'cluster_name': cluster_name,
                    'namespace_name': namespace_name,
                    'ref': ref,
                    'commit_sha': desired_commit_sha
                }
                trigger_specs.append(job_spec)

        return trigger_specs

    def update_moving_commit(self, job_spec):
        saas_file_name = job_spec['saas_file_name']
        env_name = job_spec['env_name']
        rt_name = job_spec['rt_name']
        cluster_name = job_spec['cluster_name']
        namespace_name = job_spec['namespace_name']
        ref = job_spec['ref']
        commit_sha = job_spec['commit_sha']
        key = f"{saas_file_name}/{rt_name}/{cluster_name}/" + \
            f"{namespace_name}/{env_name}/{ref}"
        self.state.add(key, value=commit_sha, force=True)

    def get_configs_diff(self):
        results = threaded.run(self.get_configs_diff_saas_file,
                               self.saas_files, self.thread_pool_size)
        return [item for sublist in results for item in sublist]

    def get_configs_diff_saas_file(self, saas_file):
        saas_file_name = saas_file['name']
        saas_file_parameters = saas_file.get('parameters')
        saas_file_managed_resource_types = saas_file['managedResourceTypes']
        instace_name = saas_file['instance']['name']
        trigger_specs = []
        for rt in saas_file['resourceTemplates']:
            rt_name = rt['name']
            url = rt['url']
            path = rt['path']
            rt_parameters = rt.get('parameters')
            for desired_target_config in rt['targets']:
                namespace = desired_target_config['namespace']
                cluster_name = namespace['cluster']['name']
                namespace_name = namespace['name']
                env_name = namespace['environment']['name']
                desired_target_config['namespace'] = \
                    self.sanitize_namespace(namespace)
                # add parent parameters to target config
                desired_target_config['saas_file_parameters'] = \
                    saas_file_parameters
                # add managed resource types to target config
                desired_target_config['saas_file_managed_resource_types'] = \
                    saas_file_managed_resource_types
                desired_target_config['url'] = url
                desired_target_config['path'] = path
                desired_target_config['rt_parameters'] = rt_parameters
                # get current target config from state
                key = f"{saas_file_name}/{rt_name}/{cluster_name}/" + \
                    f"{namespace_name}/{env_name}"
                current_target_config = self.state.get(key, None)
                # skip if there is no change in target configuration
                if current_target_config == desired_target_config:
                    continue
                job_spec = {
                    'saas_file_name': saas_file_name,
                    'env_name': env_name,
                    'instance_name': instace_name,
                    'rt_name': rt_name,
                    'cluster_name': cluster_name,
                    'namespace_name': namespace_name,
                    'target_config': desired_target_config
                }
                trigger_specs.append(job_spec)

        return trigger_specs

    @staticmethod
    def sanitize_namespace(namespace):
        """Only keep fields that should trigger a new job."""
        new_job_fields = {
            'namespace': ['name', 'cluster', 'app'],
            'cluster': ['name', 'serverUrl'],
            'app': ['name']
        }
        namespace = {
            k: v
            for k, v in namespace.items() if k in new_job_fields['namespace']
        }
        cluster = namespace['cluster']
        namespace['cluster'] = {
            k: v
            for k, v in cluster.items() if k in new_job_fields['cluster']
        }
        app = namespace['app']
        namespace['app'] = {
            k: v
            for k, v in app.items() if k in new_job_fields['app']
        }
        return namespace

    def update_config(self, job_spec):
        saas_file_name = job_spec['saas_file_name']
        env_name = job_spec['env_name']
        rt_name = job_spec['rt_name']
        cluster_name = job_spec['cluster_name']
        namespace_name = job_spec['namespace_name']
        target_config = job_spec['target_config']
        key = f"{saas_file_name}/{rt_name}/{cluster_name}/" + \
            f"{namespace_name}/{env_name}"
        self.state.add(key, value=target_config, force=True)

    def validate_promotions(self):
        """
        If there were promotion sections in the participating saas files
        validate that the conditions are met. """
        for item in self.promotions:
            if item is None:
                continue
            # validate that the commit sha being promoted
            # was succesfully published to the subscribed channel(s)
            commit_sha = item['commit_sha']
            subscribe = item.get('subscribe')
            if subscribe:
                for channel in subscribe:
                    state_key = f"promotions/{channel}/{commit_sha}"
                    value = self.state.get(state_key, None)
                    success = value.get('success')
                    if not success:
                        logging.error(
                            f'Commit {commit_sha} was not ' +
                            f'published with success to channel {channel}')
                        return False

        return True

    def publish_promotions(self, success, saas_files, mr_cli):
        """
        If there were promotion sections in the participating saas files
        publish the results for future promotion validations. """
        subscribe_saas_file_path_map = \
            self._get_subscribe_saas_file_path_map(saas_files, auto_only=True)
        trigger_promotion = False
        for item in self.promotions:
            commit_sha = item['commit_sha']
            publish = item.get('publish')
            if publish:
                all_subscribed_saas_file_paths = set()
                for channel in publish:
                    # publish to state to pass promotion gate
                    state_key = f"promotions/{channel}/{commit_sha}"
                    value = {'success': success}
                    self.state.add(state_key, value, force=True)
                    logging.info(
                        f'Commit {commit_sha} was published ' +
                        f'with success {success} to channel {channel}')
                    # collect data to trigger promotion
                    subscribed_saas_file_paths = \
                        subscribe_saas_file_path_map.get(channel)
                    if subscribed_saas_file_paths:
                        all_subscribed_saas_file_paths.update(
                            subscribed_saas_file_paths)
                item['saas_file_paths'] = list(all_subscribed_saas_file_paths)
                if all_subscribed_saas_file_paths:
                    trigger_promotion = True

        if trigger_promotion:
            mr = AutoPromoter(self.promotions)
            mr.submit(cli=mr_cli)

    @staticmethod
    def _get_subscribe_saas_file_path_map(saas_files, auto_only=False):
        """
        Returns a dict with subscribe channels as keys and a
        list of paths of saas files containing these channels.
        """
        subscribe_saas_file_path_map = {}
        for saas_file in saas_files:
            saas_file_path = 'data' + saas_file['path']
            for rt in saas_file['resourceTemplates']:
                for target in rt['targets']:
                    target_promotion = target.get('promotion')
                    if not target_promotion:
                        continue
                    target_auto = target_promotion.get('auto')
                    if auto_only and not target_auto:
                        continue
                    subscribe = target_promotion.get('subscribe')
                    if not subscribe:
                        continue
                    for channel in subscribe:
                        subscribe_saas_file_path_map.setdefault(channel, set())
                        subscribe_saas_file_path_map[channel].add(
                            saas_file_path)

        return subscribe_saas_file_path_map
Ejemplo n.º 14
0
def set(ctx, integration, key, value):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_aws_accounts()
    state = State(integration, accounts, settings=settings)
    state.add(key, value=value, force=True)
Ejemplo n.º 15
0
def add(ctx, integration, key):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_aws_accounts()
    state = State(integration, accounts, settings=settings)
    state.add(key)