Ejemplo n.º 1
0
def get_desired_state(internal, use_jump_host, thread_pool_size):
    gqlapi = gql.get_api()
    all_namespaces = gqlapi.query(QUERY)['namespaces']

    namespaces = []
    for namespace in all_namespaces:
        shard_key = f'{namespace["cluster"]["name"]}/{namespace["name"]}'
        if is_in_shard(shard_key):
            namespaces.append(namespace)

    ri = ResourceInventory()
    settings = queries.get_app_interface_settings()
    oc_map = OC_Map(namespaces=namespaces,
                    integration=QONTRACT_INTEGRATION,
                    settings=settings,
                    internal=internal,
                    use_jump_host=use_jump_host,
                    thread_pool_size=thread_pool_size,
                    init_projects=True)
    ob.init_specs_to_fetch(ri,
                           oc_map,
                           namespaces=namespaces,
                           override_managed_types=['Namespace'])

    desired_state = []
    for cluster, namespace, _, _ in ri:
        if cluster not in oc_map.clusters():
            continue
        desired_state.append({"cluster": cluster, "namespace": namespace})

    return oc_map, desired_state
def run(dry_run,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        defer=None):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_aws_accounts()
    clusters = [c for c in queries.get_clusters(minimal=True) if c.get('ocm')]
    oc_map = OC_Map(clusters=clusters,
                    integration=QONTRACT_INTEGRATION,
                    settings=settings,
                    internal=internal,
                    use_jump_host=use_jump_host,
                    thread_pool_size=thread_pool_size)
    defer(lambda: oc_map.cleanup())
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=settings)

    if not dry_run:
        slack = init_slack_workspace(QONTRACT_INTEGRATION)

    now = datetime.utcnow()
    for cluster in oc_map.clusters():
        oc = oc_map.get(cluster)
        if not oc:
            logging.log(level=oc.log_level, msg=oc.message)
            continue
        upgrade_config = oc.get(namespace='openshift-managed-upgrade-operator',
                                kind='UpgradeConfig',
                                name='osd-upgrade-config',
                                allow_not_found=True)
        if not upgrade_config:
            logging.debug(f'[{cluster}] UpgradeConfig not found.')
            continue

        upgrade_spec = upgrade_config['spec']
        upgrade_at = upgrade_spec['upgradeAt']
        version = upgrade_spec['desired']['version']
        upgrade_at_obj = datetime.strptime(upgrade_at, '%Y-%m-%dT%H:%M:%SZ')
        state_key = f'{cluster}-{upgrade_at}'
        # if this is the first iteration in which 'now' had passed
        # the upgrade at date time, we send a notification
        if upgrade_at_obj < now:
            if state.exists(state_key):
                # already notified
                continue
            logging.info(['cluster_upgrade', cluster])
            if not dry_run:
                state.add(state_key)
                usergroup = f'{cluster}-cluster'
                usergroup_id = slack.get_usergroup_id(usergroup)
                slack.chat_post_message(
                    f'Heads up <!subteam^{usergroup_id}>! ' +
                    f'cluster `{cluster}` is currently ' +
                    f'being upgraded to version `{version}`')
Ejemplo n.º 3
0
def fetch_current_state(thread_pool_size, internal, use_jump_host):
    gqlapi = gql.get_api()
    clusters = gqlapi.query(CLUSTERS_QUERY)['clusters']
    clusters = [c for c in clusters if c.get('ocm') is None]
    settings = queries.get_app_interface_settings()
    oc_map = OC_Map(clusters=clusters, integration=QONTRACT_INTEGRATION,
                    settings=settings, internal=internal,
                    use_jump_host=use_jump_host)
    results = threaded.run(get_cluster_users, oc_map.clusters(),
                           thread_pool_size, oc_map=oc_map)
    current_state = [item for sublist in results for item in sublist]
    return oc_map, current_state
Ejemplo n.º 4
0
def fetch_current_state(thread_pool_size, internal, use_jump_host):
    clusters = queries.get_clusters(minimal=True)
    settings = queries.get_app_interface_settings()
    oc_map = OC_Map(clusters=clusters,
                    integration=QONTRACT_INTEGRATION,
                    settings=settings,
                    internal=internal,
                    use_jump_host=use_jump_host,
                    thread_pool_size=thread_pool_size)
    results = threaded.run(get_cluster_users,
                           oc_map.clusters(),
                           thread_pool_size,
                           oc_map=oc_map)
    current_state = [item for sublist in results for item in sublist]
    return oc_map, current_state
Ejemplo n.º 5
0
def fetch_current_state(namespaces=None,
                        clusters=None,
                        thread_pool_size=None,
                        integration=None,
                        integration_version=None,
                        override_managed_types=None,
                        internal=None,
                        use_jump_host=True,
                        init_api_resources=False):
    ri = ResourceInventory()
    settings = queries.get_app_interface_settings()
    oc_map = OC_Map(namespaces=namespaces,
                    clusters=clusters,
                    integration=integration,
                    settings=settings,
                    internal=internal,
                    use_jump_host=use_jump_host,
                    thread_pool_size=thread_pool_size,
                    init_api_resources=init_api_resources)
    state_specs = \
        init_specs_to_fetch(
            ri,
            oc_map,
            namespaces=namespaces,
            clusters=clusters,
            override_managed_types=override_managed_types
        )
    threaded.run(populate_current_state,
                 state_specs,
                 thread_pool_size,
                 ri=ri,
                 integration=integration,
                 integration_version=integration_version)

    return ri, oc_map
Ejemplo n.º 6
0
    def run(self):
        clusters = queries.get_clusters()

        oc_map = OC_Map(clusters=clusters,
                        integration=QONTRACT_INTEGRATION,
                        settings=self.settings, use_jump_host=True,
                        thread_pool_size=self.thread_pool_size)

        manifests = threaded.run(func=self._get_imagemanifestvuln,
                                 iterable=oc_map.clusters(),
                                 thread_pool_size=self.thread_pool_size,
                                 oc_map=oc_map)

        threaded.run(func=self._post,
                     iterable=manifests,
                     thread_pool_size=self.thread_pool_size)
def get_desired_state(internal, use_jump_host):
    gqlapi = gql.get_api()
    namespaces = gqlapi.query(QUERY)['namespaces']
    ri = ResourceInventory()
    settings = queries.get_app_interface_settings()
    oc_map = OC_Map(namespaces=namespaces,
                    integration=QONTRACT_INTEGRATION,
                    settings=settings,
                    internal=internal,
                    use_jump_host=use_jump_host)
    ob.init_specs_to_fetch(ri,
                           oc_map,
                           namespaces=namespaces,
                           override_managed_types=['Namespace'])
    desired_state = [{
        "cluster": cluster,
        "namespace": namespace
    } for cluster, namespace, _, _ in ri if cluster in oc_map.clusters()]

    return oc_map, desired_state
Ejemplo n.º 8
0
def fetch_data(namespaces, thread_pool_size, internal, use_jump_host):
    ri = ResourceInventory()
    settings = queries.get_app_interface_settings()
    oc_map = OC_Map(namespaces=namespaces,
                    integration=QONTRACT_INTEGRATION,
                    settings=settings,
                    internal=internal,
                    use_jump_host=use_jump_host)
    state_specs = ob.init_specs_to_fetch(ri, oc_map, namespaces=namespaces)
    threaded.run(fetch_states, state_specs, thread_pool_size, ri=ri)

    return oc_map, ri
Ejemplo n.º 9
0
def fetch_current_state(thread_pool_size, internal, use_jump_host):
    clusters = queries.get_clusters()
    clusters = [c for c in clusters if c.get('ocm') is None]
    current_state = []
    settings = queries.get_app_interface_settings()
    oc_map = OC_Map(clusters=clusters, integration=QONTRACT_INTEGRATION,
                    settings=settings, internal=internal,
                    use_jump_host=use_jump_host)

    groups_list = create_groups_list(clusters, oc_map)
    results = threaded.run(get_cluster_state, groups_list, thread_pool_size,
                           oc_map=oc_map)

    current_state = [item for sublist in results for item in sublist]
    return oc_map, current_state
def fetch_current_state(namespaces, thread_pool_size, internal, use_jump_host):
    ri = ResourceInventory()
    settings = queries.get_app_interface_settings()
    oc_map = OC_Map(namespaces=namespaces, integration=QONTRACT_INTEGRATION,
                    settings=settings, internal=internal,
                    use_jump_host=use_jump_host)
    state_specs = \
        ob.init_specs_to_fetch(
            ri,
            oc_map,
            namespaces=namespaces,
            override_managed_types=['Secret']
        )
    threaded.run(populate_oc_resources, state_specs, thread_pool_size, ri=ri)

    return ri, oc_map
Ejemplo n.º 11
0
def get_oc_map(test_name):
    gqlapi = gql.get_api()
    clusters = gqlapi.query(CLUSTERS_QUERY)['clusters']
    settings = queries.get_app_interface_settings()
    return OC_Map(clusters=clusters, e2e_test=test_name, settings=settings)
Ejemplo n.º 12
0
def run(dry_run=False, enable_deletion=False):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_aws_accounts()
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=settings)

    queries_list = collect_queries()
    remove_candidates = []
    for query in queries_list:
        query_name = query['name']

        # Checking the sql-query state:
        # - No state: up for execution.
        # - State is a timestamp: executed and up for removal
        #   after the JOB_TTL
        # - State is 'DONE': executed and removed.
        try:
            query_state = state[query_name]
            if query_state != 'DONE':
                remove_candidates.append({
                    'name': query_name,
                    'timestamp': query_state
                })
            continue
        except KeyError:
            pass

        job_yaml = process_template(query)
        job = yaml.safe_load(job_yaml)
        job_resource = OpenshiftResource(job, QONTRACT_INTEGRATION,
                                         QONTRACT_INTEGRATION_VERSION)

        oc_map = OC_Map(namespaces=[query['namespace']],
                        integration=QONTRACT_INTEGRATION,
                        settings=queries.get_app_interface_settings(),
                        internal=None)

        openshift_base.apply(dry_run=dry_run,
                             oc_map=oc_map,
                             cluster=query['cluster'],
                             namespace=query['namespace']['name'],
                             resource_type='job',
                             resource=job_resource)

        if not dry_run:
            state[query_name] = time.time()

    for candidate in remove_candidates:
        if time.time() < candidate['timestamp'] + JOB_TTL:
            continue

        try:
            query = collect_queries(query_name=candidate['name'])[0]
        except IndexError:
            raise RuntimeError(f'sql-query {candidate["name"]} not present'
                               f'in the app-interface while its Job is still '
                               f'not removed from the cluster. Manual clean '
                               f'up is needed.')

        oc_map = OC_Map(namespaces=[query['namespace']],
                        integration=QONTRACT_INTEGRATION,
                        settings=queries.get_app_interface_settings(),
                        internal=None)

        openshift_base.delete(dry_run=dry_run,
                              oc_map=oc_map,
                              cluster=query['cluster'],
                              namespace=query['namespace']['name'],
                              resource_type='job',
                              name=query['name'],
                              enable_deletion=enable_deletion)

        if not dry_run:
            state[candidate['name']] = 'DONE'
Ejemplo n.º 13
0
    def __init__(self, dry_run, instance):
        self.dry_run = dry_run
        self.settings = queries.get_app_interface_settings()

        cluster_info = instance['hiveCluster']
        hive_cluster = instance['hiveCluster']['name']

        # Getting the OCM Client for the hive cluster
        ocm_map = OCMMap(clusters=[cluster_info],
                         integration=QONTRACT_INTEGRATION,
                         settings=self.settings)

        self.ocm_cli = ocm_map.get(hive_cluster)
        if not self.ocm_cli:
            raise OcpReleaseEcrMirrorError(f"Can't create ocm client for "
                                           f"cluster {hive_cluster}")

        # Getting the OC Client for the hive cluster
        oc_map = OC_Map(clusters=[cluster_info],
                        integration=QONTRACT_INTEGRATION,
                        settings=self.settings)
        self.oc_cli = oc_map.get(hive_cluster)
        if not self.oc_cli:
            raise OcpReleaseEcrMirrorError(f"Can't create oc client for "
                                           f"cluster {hive_cluster}")

        namespace = instance['ecrResourcesNamespace']
        ocp_release_identifier = instance['ocpReleaseEcrIdentifier']
        ocp_art_dev_identifier = instance['ocpArtDevEcrIdentifier']

        ocp_release_info = self._get_tf_resource_info(namespace,
                                                      ocp_release_identifier)
        if ocp_release_info is None:
            raise OcpReleaseEcrMirrorError(f"Could not find rds "
                                           f"identifier "
                                           f"{ocp_release_identifier} in "
                                           f"namespace {namespace['name']}")

        ocp_art_dev_info = self._get_tf_resource_info(namespace,
                                                      ocp_art_dev_identifier)
        if ocp_art_dev_info is None:
            raise OcpReleaseEcrMirrorError(f"Could not find rds identifier"
                                           f" {ocp_art_dev_identifier} in"
                                           f"namespace {namespace['name']}")

        # Getting the AWS Client for the accounts
        aws_accounts = [
            self._get_aws_account_info(account=ocp_release_info['account']),
            self._get_aws_account_info(account=ocp_art_dev_info['account'])
        ]
        self.aws_cli = AWSApi(thread_pool_size=1,
                              accounts=aws_accounts,
                              settings=self.settings,
                              init_ecr_auth_tokens=True)
        self.aws_cli.map_ecr_resources()

        self.ocp_release_ecr_uri = self._get_image_uri(
            account=ocp_release_info['account'],
            repository=ocp_release_identifier)
        if self.ocp_release_ecr_uri is None:
            raise OcpReleaseEcrMirrorError(f"Could not find the "
                                           f"ECR repository "
                                           f"{ocp_release_identifier}")

        self.ocp_art_dev_ecr_uri = self._get_image_uri(
            account=ocp_art_dev_info['account'],
            repository=ocp_art_dev_identifier)
        if self.ocp_art_dev_ecr_uri is None:
            raise OcpReleaseEcrMirrorError(f"Could not find the "
                                           f"ECR repository "
                                           f"{ocp_art_dev_identifier}")

        # Getting all the credentials
        quay_creds = self._get_quay_creds()
        ocp_release_creds = self._get_ecr_creds(
            account=ocp_release_info['account'],
            region=ocp_release_info['region'])
        ocp_art_dev_creds = self._get_ecr_creds(
            account=ocp_art_dev_info['account'],
            region=ocp_art_dev_info['region'])

        # Creating a single dictionary with all credentials to be used by the
        # "oc adm release mirror" command
        self.registry_creds = {
            'auths': {
                **quay_creds['auths'],
                **ocp_release_creds['auths'],
                **ocp_art_dev_creds['auths'],
            }
        }
Ejemplo n.º 14
0
def fetch_desired_state(infer_clusters=True):
    gqlapi = gql.get_api()
    state = AggregatedList()

    roles = gqlapi.query(ROLES_QUERY)['roles']
    for role in roles:
        permissions = list(filter(
            lambda p: p.get('service') in ['github-org', 'github-org-team'],
            role['permissions']
        ))

        if not permissions:
            continue

        members = []

        for user in role['users']:
            members.append(user['github_username'])

        for bot in role['bots']:
            if 'github_username' in bot:
                members.append(bot['github_username'])
        members = [m.lower() for m in members]

        for permission in permissions:
            if permission['service'] == 'github-org':
                state.add(permission, members)
            elif permission['service'] == 'github-org-team':
                state.add(permission, members)
                state.add({
                    'service': 'github-org',
                    'org': permission['org'],
                }, members)

    if not infer_clusters:
        return state

    clusters = gqlapi.query(CLUSTERS_QUERY)['clusters']
    settings = queries.get_app_interface_settings()
    oc_map = OC_Map(clusters=clusters, settings=settings)
    defer(lambda: oc_map.cleanup())
    openshift_users_desired_state = \
        openshift_users.fetch_desired_state(oc_map)
    for cluster in clusters:
        if not cluster['auth']:
            continue

        cluster_name = cluster['name']
        members = [ou['user'].lower()
                   for ou in openshift_users_desired_state
                   if ou['cluster'] == cluster_name]

        state.add({
            'service': 'github-org',
            'org': cluster['auth']['org'],
        }, members)
        if cluster['auth']['service'] == 'github-org-team':
            state.add({
                'service': 'github-org-team',
                'org': cluster['auth']['org'],
                'team': cluster['auth']['team'],
            }, members)

    return state
Ejemplo n.º 15
0
def run(dry_run, enable_deletion=False):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_aws_accounts()
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=settings)

    queries_list = collect_queries()
    remove_candidates = []
    for query in queries_list:
        query_name = query['name']

        # Checking the sql-query state:
        # - No state: up for execution.
        # - State is a timestamp: executed and up for removal
        #   after the JOB_TTL
        # - State is 'DONE': executed and removed.
        try:
            query_state = state[query_name]
            is_cronjob = query.get('schedule')
            if query_state != 'DONE' and not is_cronjob:
                remove_candidates.append({
                    'name': query_name,
                    'timestamp': query_state
                })
            continue
        except KeyError:
            pass

        image_repository = 'quay.io/app-sre'
        use_pull_secret = False
        sql_query_settings = settings.get('sqlQuery')
        if sql_query_settings:
            use_pull_secret = True
            image_repository = sql_query_settings['imageRepository']
            pull_secret = sql_query_settings['pullSecret']
            secret_resource = orb.fetch_provider_vault_secret(
                path=pull_secret['path'],
                version=pull_secret['version'],
                name=query_name,
                labels=pull_secret['labels'] or {},
                annotations=pull_secret['annotations'] or {},
                type=pull_secret['type'],
                integration=QONTRACT_INTEGRATION,
                integration_version=QONTRACT_INTEGRATION_VERSION)

        job_yaml = process_template(query,
                                    image_repository=image_repository,
                                    use_pull_secret=use_pull_secret)
        job = yaml.safe_load(job_yaml)
        job_resource = OpenshiftResource(job, QONTRACT_INTEGRATION,
                                         QONTRACT_INTEGRATION_VERSION)
        oc_map = OC_Map(namespaces=[query['namespace']],
                        integration=QONTRACT_INTEGRATION,
                        settings=queries.get_app_interface_settings(),
                        internal=None)

        if use_pull_secret:
            openshift_base.apply(dry_run=dry_run,
                                 oc_map=oc_map,
                                 cluster=query['cluster'],
                                 namespace=query['namespace']['name'],
                                 resource_type=secret_resource.kind,
                                 resource=secret_resource,
                                 wait_for_namespace=False)

        openshift_base.apply(dry_run=dry_run,
                             oc_map=oc_map,
                             cluster=query['cluster'],
                             namespace=query['namespace']['name'],
                             resource_type=job_resource.kind,
                             resource=job_resource,
                             wait_for_namespace=False)

        if not dry_run:
            state[query_name] = time.time()

    for candidate in remove_candidates:
        if time.time() < candidate['timestamp'] + JOB_TTL:
            continue

        try:
            query = collect_queries(query_name=candidate['name'])[0]
        except IndexError:
            raise RuntimeError(f'sql-query {candidate["name"]} not present'
                               f'in the app-interface while its Job is still '
                               f'not removed from the cluster. Manual clean '
                               f'up is needed.')

        oc_map = OC_Map(namespaces=[query['namespace']],
                        integration=QONTRACT_INTEGRATION,
                        settings=queries.get_app_interface_settings(),
                        internal=None)

        try:
            openshift_base.delete(dry_run=dry_run,
                                  oc_map=oc_map,
                                  cluster=query['cluster'],
                                  namespace=query['namespace']['name'],
                                  resource_type='job',
                                  name=query['name'],
                                  enable_deletion=enable_deletion)
        except StatusCodeError:
            LOG.exception("Error removing ['%s' '%s' 'job' '%s']",
                          query['cluster'], query['namespace']['name'],
                          query['name'])

        try:
            openshift_base.delete(dry_run=dry_run,
                                  oc_map=oc_map,
                                  cluster=query['cluster'],
                                  namespace=query['namespace']['name'],
                                  resource_type='Secret',
                                  name=query['name'],
                                  enable_deletion=enable_deletion)
        except StatusCodeError:
            LOG.exception("Error removing ['%s' '%s' 'Secret' '%s']",
                          query['cluster'], query['namespace']['name'],
                          query['name'])

        if not dry_run:
            state[candidate['name']] = 'DONE'