Пример #1
0
def run(dry_run=False, thread_pool_size=10):
    settings = queries.get_app_interface_settings()
    clusters = queries.get_clusters()
    clusters = [c for c in clusters if c.get('ocm') is not None]
    ocm_map = OCMMap(clusters=clusters,
                     integration=QONTRACT_INTEGRATION,
                     settings=settings)
    current_state = ocm_map.cluster_specs()
    desired_state = {
        c['name']: {
            'spec': c['spec'],
            'network': c['network']
        }
        for c in clusters
    }

    error = False
    for k, desired_spec in desired_state.items():
        current_spec = current_state[k]
        if current_spec != desired_spec:
            logging.error(
                '[%s] desired spec %s is different from current spec %s', k,
                desired_spec, current_spec)
            error = True

    if error:
        sys.exit(1)
def run(dry_run, thread_pool_size=10):
    settings = queries.get_app_interface_settings()
    clusters = queries.get_clusters()
    clusters = [c for c in clusters if c.get('ocm') is not None]
    ocm_map = OCMMap(clusters=clusters, integration=QONTRACT_INTEGRATION,
                     settings=settings)
    current_state, pending_state = ocm_map.cluster_specs()
    desired_state = {c['name']: {'spec': c['spec'], 'network': c['network']}
                     for c in clusters}

    error = False
    for cluster_name, desired_spec in desired_state.items():
        current_spec = current_state.get(cluster_name)
        if current_spec and current_spec != desired_spec:
            logging.error(
                '[%s] desired spec %s is different from current spec %s',
                cluster_name, desired_spec, current_spec)
            error = True

    if error:
        sys.exit(1)

    for cluster_name, desired_spec in desired_state.items():
        if cluster_name in current_state or cluster_name in pending_state:
            continue
        logging.info(['create_cluster', cluster_name])
        if not dry_run:
            ocm = ocm_map.get(cluster_name)
            ocm.create_cluster(cluster_name, desired_spec)
Пример #3
0
def fetch_current_state(clusters, settings):
    current_state = []
    ocm_map = OCMMap(clusters=clusters, integration=QONTRACT_INTEGRATION,
                     settings=settings)

    for cluster_info in clusters:
        cluster = cluster_info['name']
        ocm = ocm_map.get(cluster)
        idps = ocm.get_github_idp_teams(cluster)
        current_state.extend(idps)

    return ocm_map, current_state
Пример #4
0
def fetch_desired_state(settings):
    desired_state = []
    clusters = [
        c for c in queries.get_clusters() if c.get('peering') is not None
    ]
    ocm_map = OCMMap(clusters=clusters,
                     integration=QONTRACT_INTEGRATION,
                     settings=settings)
    for cluster_info in clusters:
        cluster = cluster_info['name']
        ocm = ocm_map.get(cluster)
        peering_info = cluster_info['peering']
        # requester is the cluster's AWS account
        requester = {
            'vpc_id': peering_info['vpc_id'],
            'cidr_block': cluster_info['network']['vpc'],
            'region': cluster_info['spec']['region']
        }
        peer_connections = peering_info['connections']
        for peer_connection in peer_connections:
            connection_name = peer_connection['name']
            peer_vpc = peer_connection['vpc']
            # accepter is the peered AWS account
            accepter = {
                'vpc_id': peer_vpc['vpc_id'],
                'cidr_block': peer_vpc['cidr_block'],
                'region': peer_vpc['region']
            }
            account = peer_vpc['account']
            # assume_role is the role to assume to provision the
            # peering connection request, through the accepter AWS account.
            # this may change in the future -
            # in case we add support for peerings between clusters.
            account['assume_role'] = \
                ocm.get_aws_infrastructure_access_terraform_assume_role(
                    cluster,
                    peer_vpc['account']['uid'],
                    peer_vpc['account']['terraformUsername']
                )
            # assume_region is the region in which the requester resides
            account['assume_region'] = requester['region']
            item = {
                'connection_name': connection_name,
                'requester': requester,
                'accepter': accepter,
                'account': account
            }
            desired_state.append(item)

    return desired_state
Пример #5
0
def cluster_upgrades(ctx, name):
    settings = queries.get_app_interface_settings()

    clusters = queries.get_clusters()

    clusters_ocm = [
        c for c in clusters
        if c.get('ocm') is not None and c.get('auth') is not None
    ]

    ocm_map = OCMMap(clusters=clusters_ocm, settings=settings)

    clusters_data = []
    for c in clusters:
        if name and c['name'] != name:
            continue

        if not c.get('spec'):
            continue

        data = {
            'name': c['name'],
            'upgrade': c['spec']['upgrade'],
            'id': c['spec']['id'],
            'external_id': c['spec'].get('external_id'),
        }

        upgrade_policy = c['upgradePolicy']

        if upgrade_policy:
            data['upgradePolicy'] = upgrade_policy.get('schedule_type')

        if data.get('upgradePolicy') == 'automatic':
            data['schedule'] = c['upgradePolicy']['schedule']
            ocm = ocm_map.get(c['name'])
            if ocm:
                upgrade_policy = ocm.get_upgrade_policies(c['name'])
                next_run = upgrade_policy[0].get('next_run')
                if next_run:
                    data['next_run'] = next_run
        else:
            data['upgradePolicy'] = 'manual'

        clusters_data.append(data)

    clusters_data = sorted(clusters_data, key=lambda k: k['upgrade'])

    columns = ['name', 'upgrade', 'upgradePolicy', 'schedule', 'next_run']

    print_output(ctx.obj['output'], clusters_data, columns)
def fetch_current_state(clusters):
    settings = queries.get_app_interface_settings()
    ocm_map = OCMMap(clusters=clusters, integration=QONTRACT_INTEGRATION,
                     settings=settings)

    current_state = []
    for cluster in clusters:
        cluster_name = cluster['name']
        ocm = ocm_map.get(cluster_name)
        machine_pools = ocm.get_machine_pools(cluster_name)
        for machine_pool in machine_pools:
            machine_pool['cluster'] = cluster_name
            current_state.append(machine_pool)

    return ocm_map, current_state
Пример #7
0
def fetch_current_state(clusters):
    settings = queries.get_app_interface_settings()
    ocm_map = OCMMap(clusters=clusters,
                     integration=QONTRACT_INTEGRATION,
                     settings=settings)

    current_state = []
    for cluster in clusters:
        cluster_name = cluster['name']
        ocm = ocm_map.get(cluster_name)
        upgrade_policies = \
            ocm.get_upgrade_policies(cluster_name, schedule_type='automatic')
        for upgrade_policy in upgrade_policies:
            upgrade_policy['cluster'] = cluster_name
            current_state.append(upgrade_policy)

    return ocm_map, current_state
Пример #8
0
def fetch_current_state(clusters):
    settings = queries.get_app_interface_settings()
    ocm_map = OCMMap(clusters=clusters,
                     integration=QONTRACT_INTEGRATION,
                     settings=settings)

    current_state = []
    for cluster in clusters:
        cluster_name = cluster['name']
        ocm = ocm_map.get(cluster_name)
        addons = ocm.get_cluster_addons(cluster_name)
        if addons:
            for addon in addons:
                addon['cluster'] = cluster_name
                current_state.append(addon)

    return ocm_map, current_state
def fetch_current_state():
    current_state = []
    settings = queries.get_app_interface_settings()
    clusters = [c for c in queries.get_clusters()
                if c.get('ocm') is not None]
    ocm_map = OCMMap(clusters=clusters, integration=QONTRACT_INTEGRATION,
                     settings=settings)

    for cluster_info in clusters:
        cluster = cluster_info['name']
        ocm = ocm_map.get(cluster)
        role_grants = ocm.get_aws_infrastructure_access_role_grants(cluster)
        for user_arn, access_level in role_grants:
            item = {
                'cluster': cluster,
                'user_arn': user_arn,
                'access_level': access_level
            }
            current_state.append(item)

    return ocm_map, current_state
def fetch_current_state(clusters):
    settings = queries.get_app_interface_settings()
    ocm_map = OCMMap(clusters=clusters,
                     integration=QONTRACT_INTEGRATION,
                     settings=settings)

    current_state = []
    for cluster in clusters:
        cluster_name = cluster['name']
        ocm = ocm_map.get(cluster_name)
        labels = ocm.get_external_configuration_labels(cluster_name)
        for key, value in labels.items():
            item = {
                'label': {
                    'key': key,
                    'value': value
                },
                'cluster': cluster_name
            }
            current_state.append(item)

    return ocm_map, current_state
Пример #11
0
def fetch_current_state(thread_pool_size):
    clusters = queries.get_clusters()
    clusters = [c for c in clusters if c.get('ocm') is not None]
    current_state = []
    settings = queries.get_app_interface_settings()
    ocm_map = OCMMap(clusters=clusters,
                     integration=QONTRACT_INTEGRATION,
                     settings=settings)
    groups_list = openshift_groups.create_groups_list(clusters, oc_map=ocm_map)
    results = threaded.run(get_cluster_state,
                           groups_list,
                           thread_pool_size,
                           ocm_map=ocm_map)

    current_state = [item for sublist in results for item in sublist]
    return ocm_map, current_state
def run(dry_run,
        print_only=False,
        enable_deletion=False,
        thread_pool_size=10,
        defer=None):
    settings = queries.get_app_interface_settings()
    clusters = [
        c for c in queries.get_clusters() if c.get('peering') is not None
    ]
    ocm_map = OCMMap(clusters=clusters,
                     integration=QONTRACT_INTEGRATION,
                     settings=settings)

    # Fetch desired state for cluster-to-vpc(account) VPCs
    desired_state_vpc, err = \
        build_desired_state_vpc(clusters, ocm_map, settings)
    if err:
        sys.exit(1)

    # Fetch desired state for cluster-to-cluster VPCs
    desired_state_cluster, err = \
        build_desired_state_cluster(clusters, ocm_map, settings)
    if err:
        sys.exit(1)

    desired_state = desired_state_vpc + desired_state_cluster

    # check there are no repeated vpc connection names
    connection_names = [c['connection_name'] for c in desired_state]
    if len(set(connection_names)) != len(connection_names):
        logging.error("duplicate vpc connection names found")
        sys.exit(1)

    participating_accounts = \
        [item['requester']['account'] for item in desired_state]
    participating_accounts += \
        [item['accepter']['account'] for item in desired_state]
    participating_account_names = \
        [a['name'] for a in participating_accounts]
    accounts = [
        a for a in queries.get_aws_accounts()
        if a['name'] in participating_account_names
    ]

    ts = Terrascript(QONTRACT_INTEGRATION,
                     "",
                     thread_pool_size,
                     accounts,
                     settings=settings)
    ts.populate_additional_providers(participating_accounts)
    ts.populate_vpc_peerings(desired_state)
    working_dirs = ts.dump(print_only=print_only)

    if print_only:
        sys.exit()

    tf = Terraform(QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION, "",
                   working_dirs, thread_pool_size)

    if tf is None:
        sys.exit(1)

    defer(lambda: tf.cleanup())

    deletions_detected, err = tf.plan(enable_deletion)
    if err:
        sys.exit(1)
    if deletions_detected and not enable_deletion:
        sys.exit(1)

    if dry_run:
        return

    err = tf.apply()
    if err:
        sys.exit(1)
Пример #13
0
    def __init__(self, dry_run, instance):
        self.dry_run = dry_run
        self.settings = queries.get_app_interface_settings()

        cluster_info = instance['hiveCluster']
        hive_cluster = instance['hiveCluster']['name']

        # Getting the OCM Client for the hive cluster
        ocm_map = OCMMap(clusters=[cluster_info],
                         integration=QONTRACT_INTEGRATION,
                         settings=self.settings)

        self.ocm_cli = ocm_map.get(hive_cluster)
        if not self.ocm_cli:
            raise OcpReleaseEcrMirrorError(f"Can't create ocm client for "
                                           f"cluster {hive_cluster}")

        # Getting the OC Client for the hive cluster
        oc_map = OC_Map(clusters=[cluster_info],
                        integration=QONTRACT_INTEGRATION,
                        settings=self.settings)
        self.oc_cli = oc_map.get(hive_cluster)
        if not self.oc_cli:
            raise OcpReleaseEcrMirrorError(f"Can't create oc client for "
                                           f"cluster {hive_cluster}")

        namespace = instance['ecrResourcesNamespace']
        ocp_release_identifier = instance['ocpReleaseEcrIdentifier']
        ocp_art_dev_identifier = instance['ocpArtDevEcrIdentifier']

        ocp_release_info = self._get_tf_resource_info(namespace,
                                                      ocp_release_identifier)
        if ocp_release_info is None:
            raise OcpReleaseEcrMirrorError(f"Could not find rds "
                                           f"identifier "
                                           f"{ocp_release_identifier} in "
                                           f"namespace {namespace['name']}")

        ocp_art_dev_info = self._get_tf_resource_info(namespace,
                                                      ocp_art_dev_identifier)
        if ocp_art_dev_info is None:
            raise OcpReleaseEcrMirrorError(f"Could not find rds identifier"
                                           f" {ocp_art_dev_identifier} in"
                                           f"namespace {namespace['name']}")

        # Getting the AWS Client for the accounts
        aws_accounts = [
            self._get_aws_account_info(account=ocp_release_info['account']),
            self._get_aws_account_info(account=ocp_art_dev_info['account'])
        ]
        self.aws_cli = AWSApi(thread_pool_size=1,
                              accounts=aws_accounts,
                              settings=self.settings,
                              init_ecr_auth_tokens=True)
        self.aws_cli.map_ecr_resources()

        self.ocp_release_ecr_uri = self._get_image_uri(
            account=ocp_release_info['account'],
            repository=ocp_release_identifier)
        if self.ocp_release_ecr_uri is None:
            raise OcpReleaseEcrMirrorError(f"Could not find the "
                                           f"ECR repository "
                                           f"{ocp_release_identifier}")

        self.ocp_art_dev_ecr_uri = self._get_image_uri(
            account=ocp_art_dev_info['account'],
            repository=ocp_art_dev_identifier)
        if self.ocp_art_dev_ecr_uri is None:
            raise OcpReleaseEcrMirrorError(f"Could not find the "
                                           f"ECR repository "
                                           f"{ocp_art_dev_identifier}")

        # Getting all the credentials
        quay_creds = self._get_quay_creds()
        ocp_release_creds = self._get_ecr_creds(
            account=ocp_release_info['account'],
            region=ocp_release_info['region'])
        ocp_art_dev_creds = self._get_ecr_creds(
            account=ocp_art_dev_info['account'],
            region=ocp_art_dev_info['region'])

        # Creating a single dictionary with all credentials to be used by the
        # "oc adm release mirror" command
        self.registry_creds = {
            'auths': {
                **quay_creds['auths'],
                **ocp_release_creds['auths'],
                **ocp_art_dev_creds['auths'],
            }
        }
Пример #14
0
def run(dry_run, gitlab_project_id=None, thread_pool_size=10):
    settings = queries.get_app_interface_settings()
    clusters = queries.get_clusters()
    clusters = [c for c in clusters if c.get('ocm') is not None]
    ocm_map = OCMMap(clusters=clusters, integration=QONTRACT_INTEGRATION,
                     settings=settings)
    current_state, pending_state = ocm_map.cluster_specs()
    desired_state = fetch_current_state(clusters)

    if not dry_run:
        mr_cli = mr_client_gateway.init(gitlab_project_id=gitlab_project_id)

    error = False
    clusters_updates = {}
    for cluster_name, desired_spec in desired_state.items():
        current_spec = current_state.get(cluster_name)
        if current_spec:
            clusters_updates[cluster_name] = {}
            cluster_path = 'data' + \
                [c['path'] for c in clusters
                 if c['name'] == cluster_name][0]

            # validate version
            desired_spec['spec'].pop('initial_version')
            desired_version = desired_spec['spec'].pop('version')
            current_version = current_spec['spec'].pop('version')
            compare_result = 1  # default value in case version is empty
            if desired_version:
                compare_result = \
                    semver.compare(current_version, desired_version)
            if compare_result > 0:
                # current version is larger due to an upgrade.
                # submit MR to update cluster version
                logging.info(
                    '[%s] desired version %s is different ' +
                    'from current version %s. ' +
                    'version will be updated automatically in app-interface.',
                    cluster_name, desired_version, current_version)
                clusters_updates[cluster_name]['version'] = current_version
            elif compare_result < 0:
                logging.error(
                    '[%s] desired version %s is different ' +
                    'from current version %s',
                    cluster_name, desired_version, current_version)
                error = True

            if not desired_spec['spec'].get('id'):
                clusters_updates[cluster_name]['id'] = \
                    current_spec['spec']['id']

            if not desired_spec['spec'].get('external_id'):
                clusters_updates[cluster_name]['external_id'] = \
                    current_spec['spec']['external_id']

            desired_provision_shard_id = \
                desired_spec['spec'].get('provision_shard_id')
            current_provision_shard_id = \
                current_spec['spec']['provision_shard_id']
            if desired_provision_shard_id != current_provision_shard_id:
                clusters_updates[cluster_name]['provision_shard_id'] = \
                    current_provision_shard_id

            if clusters_updates[cluster_name]:
                clusters_updates[cluster_name]['path'] = cluster_path

            # exclude params we don't want to check in the specs
            for k in ['id', 'external_id', 'provision_shard_id']:
                current_spec['spec'].pop(k, None)
                desired_spec['spec'].pop(k, None)

            # validate specs
            if current_spec != desired_spec:
                logging.error(
                    '[%s] desired spec %s is different ' +
                    'from current spec %s',
                    cluster_name, desired_spec, current_spec)
                error = True
        else:
            # create cluster
            if cluster_name in pending_state:
                continue
            logging.info(['create_cluster', cluster_name])
            ocm = ocm_map.get(cluster_name)
            ocm.create_cluster(cluster_name, desired_spec, dry_run)

    create_update_mr = False
    for cluster_name, cluster_updates in clusters_updates.items():
        for k, v in cluster_updates.items():
            if k == 'path':
                continue
            logging.info(
                f"[{cluster_name}] desired key " +
                f"{k} will be updated automatically " +
                f"with value {v}."
            )
            create_update_mr = True
    if create_update_mr and not dry_run:
        mr = CreateClustersUpdates(clusters_updates)
        mr.submit(cli=mr_cli)

    if error:
        sys.exit(1)
Пример #15
0
def run(dry_run, thread_pool_size=10,
        internal=None, use_jump_host=True, defer=None):
    kafka_clusters = queries.get_kafka_clusters()
    if not kafka_clusters:
        logging.debug("No Kafka clusters found in app-interface")
        sys.exit(ExitCodes.SUCCESS)

    settings = queries.get_app_interface_settings()
    ocm_map = OCMMap(clusters=kafka_clusters,
                     integration=QONTRACT_INTEGRATION,
                     settings=settings)
    namespaces = []
    for kafka_cluster in kafka_clusters:
        namespaces.extend(kafka_cluster['namespaces'])
    ri, oc_map = ob.fetch_current_state(
        namespaces=namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        override_managed_types=['Secret'],
        internal=internal,
        use_jump_host=use_jump_host)
    defer(lambda: oc_map.cleanup())

    current_state = ocm_map.kafka_cluster_specs()
    desired_state = fetch_desired_state(kafka_clusters)

    error = False
    for kafka_cluster in kafka_clusters:
        kafka_cluster_name = kafka_cluster['name']
        desired_cluster = [c for c in desired_state
                           if kafka_cluster_name == c['name']][0]
        current_cluster = [c for c in current_state
                           if kafka_cluster_name == c['name']]
        # check if cluster exists. if not - create it
        if not current_cluster:
            logging.info(['create_cluster', kafka_cluster_name])
            if not dry_run:
                ocm = ocm_map.get(kafka_cluster_name)
                ocm.create_kafka_cluster(desired_cluster)
            continue
        # there should only be one cluster
        current_cluster = current_cluster[0]
        # check if desired cluster matches current cluster. if not - error
        if not all(k in current_cluster.keys()
                   for k in desired_cluster.keys()):
            logging.error(
                '[%s] desired spec %s is different ' +
                'from current spec %s',
                kafka_cluster_name, desired_cluster, current_cluster)
            error = True
            continue
        # check if cluster is ready. if not - wait
        if current_cluster['status'] != 'complete':
            continue
        # we have a ready cluster!
        # let's create a Secret in all referencing namespaces
        kafka_namespaces = kafka_cluster['namespaces']
        secret_fields = ['bootstrapServerHost']
        data = {k: v for k, v in current_cluster.items()
                if k in secret_fields}
        resource = construct_oc_resource(data)
        for namespace_info in kafka_namespaces:
            ri.add_desired(
                namespace_info['cluster']['name'],
                namespace_info['name'],
                resource.kind,
                resource.name,
                resource
            )

    ob.realize_data(dry_run, oc_map, ri)

    if error:
        sys.exit(ExitCodes.ERROR)