def run(dry_run, print_to_file=None,
        enable_deletion=False, thread_pool_size=10, defer=None):
    settings = queries.get_app_interface_settings()
    clusters = [c for c in queries.get_clusters()
                if c.get('peering') is not None]
    with_ocm = any(c.get('ocm') for c in clusters)
    if with_ocm:
        ocm_map = ocm.OCMMap(clusters=clusters,
                             integration=QONTRACT_INTEGRATION,
                             settings=settings)
    else:
        # this is a case for an OCP cluster which is not provisioned
        # through OCM. it is expected that an 'assume_role' is provided
        # on the vpc peering defition in the cluster file.
        ocm_map = None

    accounts = queries.get_aws_accounts()
    awsapi = aws_api.AWSApi(1, accounts, settings=settings, init_users=False)

    desired_state = []
    errors = []
    # Fetch desired state for cluster-to-vpc(account) VPCs
    desired_state_vpc, err = \
        build_desired_state_vpc(clusters, ocm_map, awsapi)
    desired_state.extend(desired_state_vpc)
    errors.append(err)

    # Fetch desired state for cluster-to-account (vpc mesh) VPCs
    if ocm_map is not None:
        desired_state_vpc_mesh, err = \
            build_desired_state_vpc_mesh(clusters, ocm_map, awsapi)
        desired_state.extend(desired_state_vpc_mesh)
        errors.append(err)
    else:
        logging.debug('account-vpc-mesh is not yet supported without OCM')

    # Fetch desired state for cluster-to-cluster VPCs
    if ocm_map is not None:
        desired_state_cluster, err = \
            build_desired_state_all_clusters(clusters, ocm_map, awsapi)
        desired_state.extend(desired_state_cluster)
        errors.append(err)
    else:
        logging.debug('cluster-vpc is not yet supported without OCM')

    # check there are no repeated vpc connection names
    connection_names = [c['connection_name'] for c in desired_state]
    if len(set(connection_names)) != len(connection_names):
        logging.error("duplicate vpc connection names found")
        sys.exit(1)

    participating_accounts = \
        [item['requester']['account'] for item in desired_state]
    participating_accounts += \
        [item['accepter']['account'] for item in desired_state]
    participating_account_names = \
        [a['name'] for a in participating_accounts]
    accounts = [a for a in accounts
                if a['name'] in participating_account_names]

    ts = terrascript.TerrascriptClient(
        QONTRACT_INTEGRATION,
        "",
        thread_pool_size,
        accounts,
        settings=settings)
    ts.populate_additional_providers(participating_accounts)
    ts.populate_vpc_peerings(desired_state)
    working_dirs = ts.dump(print_to_file=print_to_file)

    if print_to_file:
        sys.exit(0 if dry_run else int(any(errors)))

    tf = terraform.TerraformClient(
        QONTRACT_INTEGRATION,
        QONTRACT_INTEGRATION_VERSION,
        "",
        accounts,
        working_dirs,
        thread_pool_size,
        awsapi)

    if tf is None or any(errors):
        sys.exit(1)

    defer(tf.cleanup)

    disabled_deletions_detected, err = tf.plan(enable_deletion)
    errors.append(err)
    if disabled_deletions_detected:
        logging.error("Deletions detected when they are disabled")
        sys.exit(1)

    if dry_run:
        sys.exit(int(any(errors)))
    if any(errors):
        sys.exit(1)

    errors.append(tf.apply())
    sys.exit(int(any(errors)))
def run(dry_run, gitlab_project_id=None, thread_pool_size=10):
    settings = queries.get_app_interface_settings()
    clusters = queries.get_clusters()
    clusters = [c for c in clusters if c.get("ocm") is not None]
    ocm_map = ocmmod.OCMMap(
        clusters=clusters,
        integration=QONTRACT_INTEGRATION,
        settings=settings,
        init_provision_shards=True,
    )
    current_state, pending_state = ocm_map.cluster_specs()
    desired_state = fetch_desired_state(clusters)

    if not dry_run:
        mr_cli = mr_client_gateway.init(gitlab_project_id=gitlab_project_id)

    error = False
    clusters_updates = {}
    for cluster_name, desired_spec in desired_state.items():
        # Set the default network type
        if not desired_spec["network"].get("type"):
            desired_spec["network"]["type"] = "OpenShiftSDN"

        current_spec = current_state.get(cluster_name)
        if current_spec:
            clusters_updates[cluster_name] = {"spec": {}, "root": {}}
            cluster_path = (
                "data" +
                [c["path"] for c in clusters if c["name"] == cluster_name][0])

            # validate version
            desired_spec["spec"].pop("initial_version")
            desired_version = desired_spec["spec"].pop("version")
            current_version = current_spec["spec"].pop("version")
            compare_result = 1  # default value in case version is empty
            if desired_version:
                compare_result = semver.compare(current_version,
                                                desired_version)
            if compare_result > 0:
                # current version is larger due to an upgrade.
                # submit MR to update cluster version
                logging.info(
                    "[%s] desired version %s is different " +
                    "from current version %s. " +
                    "version will be updated automatically in app-interface.",
                    cluster_name,
                    desired_version,
                    current_version,
                )
                clusters_updates[cluster_name]["spec"][
                    "version"] = current_version  # noqa: E501
            elif compare_result < 0:
                logging.error(
                    f"[{cluster_name}] version {desired_version} " +
                    f"is different from current version {current_version}. " +
                    f"please correct version to be {current_version}, " +
                    "as this field is only meant for tracking purposes. " +
                    "upgrades are determined by ocm-upgrade-scheduler.")
                error = True

            if not desired_spec["spec"].get("id"):
                clusters_updates[cluster_name]["spec"]["id"] = current_spec[
                    "spec"]["id"]

            if not desired_spec["spec"].get("external_id"):
                clusters_updates[cluster_name]["spec"][
                    "external_id"] = current_spec["spec"]["external_id"]

            if not desired_spec.get("consoleUrl"):
                clusters_updates[cluster_name]["root"][
                    "consoleUrl"] = current_spec["console_url"]

            if not desired_spec.get("serverUrl"):
                clusters_updates[cluster_name]["root"][
                    "serverUrl"] = current_spec["server_url"]

            if not desired_spec.get("elbFQDN"):
                clusters_updates[cluster_name]["root"][
                    "elbFQDN"] = f"elb.apps.{cluster_name}.{current_spec['domain']}"

            desired_provision_shard_id = desired_spec["spec"].get(
                "provision_shard_id")
            current_provision_shard_id = current_spec["spec"][
                "provision_shard_id"]
            if desired_provision_shard_id != current_provision_shard_id:
                clusters_updates[cluster_name]["spec"][
                    "provision_shard_id"] = current_provision_shard_id

            if clusters_updates[cluster_name]:
                clusters_updates[cluster_name]["path"] = cluster_path

            # exclude params we don't want to check in the specs
            for k in ["id", "external_id", "provision_shard_id"]:
                current_spec["spec"].pop(k, None)
                desired_spec["spec"].pop(k, None)

            desired_uwm = desired_spec["spec"].get(ocmmod.DISABLE_UWM_ATTR)
            current_uwm = current_spec["spec"].get(ocmmod.DISABLE_UWM_ATTR)

            if desired_uwm is None and current_uwm is not None:
                clusters_updates[cluster_name]["spec"][
                    ocmmod.DISABLE_UWM_ATTR] = current_uwm  # noqa: E501

            # check if cluster update, if any, is valid
            update_spec, err = get_cluster_update_spec(
                cluster_name,
                current_spec,
                desired_spec,
            )
            if err:
                logging.warning(f"Invalid changes to spec: {update_spec}")
                error = True
                continue
            # update cluster
            # TODO(mafriedm): check dry_run in OCM API patch
            if update_spec:
                logging.info(["update_cluster", cluster_name])
                logging.debug(
                    "[%s] desired spec %s is different " +
                    "from current spec %s",
                    cluster_name,
                    desired_spec,
                    current_spec,
                )
                if not dry_run:
                    ocm = ocm_map.get(cluster_name)
                    ocm.update_cluster(cluster_name, update_spec, dry_run)
        else:
            # create cluster
            if cluster_name in pending_state:
                continue
            logging.info(["create_cluster", cluster_name])
            ocm = ocm_map.get(cluster_name)
            ocm.create_cluster(cluster_name, desired_spec, dry_run)

    create_update_mr = False
    for cluster_name, cluster_updates in clusters_updates.items():
        for k, v in cluster_updates["spec"].items():
            logging.info(f"[{cluster_name}] desired key in spec " +
                         f"{k} will be updated automatically " +
                         f"with value {v}.")
            create_update_mr = True
        for k, v in cluster_updates["root"].items():
            logging.info(f"[{cluster_name}] desired root key {k} will "
                         f"be updated automatically with value {v}")
            create_update_mr = True
    if create_update_mr and not dry_run:
        mr = cu.CreateClustersUpdates(clusters_updates)
        mr.submit(cli=mr_cli)

    sys.exit(int(error))
예제 #3
0
def run(dry_run, gitlab_project_id=None, thread_pool_size=10):
    settings = queries.get_app_interface_settings()
    clusters = queries.get_clusters()
    clusters = [c for c in clusters if c.get('ocm') is not None]
    ocm_map = ocmmod.OCMMap(
        clusters=clusters, integration=QONTRACT_INTEGRATION,
        settings=settings, init_provision_shards=True)
    current_state, pending_state = ocm_map.cluster_specs()
    desired_state = fetch_desired_state(clusters)

    if not dry_run:
        mr_cli = mr_client_gateway.init(gitlab_project_id=gitlab_project_id)

    error = False
    clusters_updates = {}
    for cluster_name, desired_spec in desired_state.items():
        # Set the default network type
        if not desired_spec['network'].get('type'):
            desired_spec['network']['type'] = 'OpenShiftSDN'

        current_spec = current_state.get(cluster_name)
        if current_spec:
            clusters_updates[cluster_name] = {'spec': {}, 'root': {}}
            cluster_path = 'data' + \
                [c['path'] for c in clusters
                 if c['name'] == cluster_name][0]

            # validate version
            desired_spec['spec'].pop('initial_version')
            desired_version = desired_spec['spec'].pop('version')
            current_version = current_spec['spec'].pop('version')
            compare_result = 1  # default value in case version is empty
            if desired_version:
                compare_result = \
                    semver.compare(current_version, desired_version)
            if compare_result > 0:
                # current version is larger due to an upgrade.
                # submit MR to update cluster version
                logging.info(
                    '[%s] desired version %s is different ' +
                    'from current version %s. ' +
                    'version will be updated automatically in app-interface.',
                    cluster_name, desired_version, current_version)
                clusters_updates[cluster_name]['spec']['version'] = current_version  # noqa: E501
            elif compare_result < 0:
                logging.error(
                    f'[{cluster_name}] version {desired_version} ' +
                    f'is different from current version {current_version}. ' +
                    f'please correct version to be {current_version}, ' +
                    'as this field is only meant for tracking purposes. ' +
                    'upgrades are determined by ocm-upgrade-scheduler.')
                error = True

            if not desired_spec['spec'].get('id'):
                clusters_updates[cluster_name]['spec']['id'] = \
                    current_spec['spec']['id']

            if not desired_spec['spec'].get('external_id'):
                clusters_updates[cluster_name]['spec']['external_id'] = \
                    current_spec['spec']['external_id']

            if not desired_spec.get('consoleUrl'):
                clusters_updates[cluster_name]['root']['consoleUrl'] = \
                    current_spec['console_url']

            if not desired_spec.get('serverUrl'):
                clusters_updates[cluster_name]['root']['serverUrl'] = \
                    current_spec['server_url']

            if not desired_spec.get('elbFQDN'):
                clusters_updates[cluster_name]['root']['elbFQDN'] = \
                    f"elb.apps.{cluster_name}.{current_spec['domain']}"

            desired_provision_shard_id = \
                desired_spec['spec'].get('provision_shard_id')
            current_provision_shard_id = \
                current_spec['spec']['provision_shard_id']
            if desired_provision_shard_id != current_provision_shard_id:
                clusters_updates[cluster_name]['spec']['provision_shard_id'] =\
                    current_provision_shard_id

            if clusters_updates[cluster_name]:
                clusters_updates[cluster_name]['path'] = cluster_path

            # exclude params we don't want to check in the specs
            for k in ['id', 'external_id', 'provision_shard_id']:
                current_spec['spec'].pop(k, None)
                desired_spec['spec'].pop(k, None)

            desired_uwm = desired_spec['spec'].get(ocmmod.DISABLE_UWM_ATTR)
            current_uwm = current_spec['spec'].get(ocmmod.DISABLE_UWM_ATTR)

            if desired_uwm is None and current_uwm is not None:
                clusters_updates[cluster_name]['spec'][ocmmod.DISABLE_UWM_ATTR] =\
                    current_uwm  # noqa: E501

            # check if cluster update, if any, is valid
            update_spec, err = get_cluster_update_spec(
                cluster_name,
                current_spec,
                desired_spec,
            )
            if err:
                logging.warning(f"Invalid changes to spec: {update_spec}")
                error = True
                continue
            # update cluster
            # TODO(mafriedm): check dry_run in OCM API patch
            if update_spec:
                logging.info(['update_cluster', cluster_name])
                logging.debug(
                    '[%s] desired spec %s is different ' +
                    'from current spec %s',
                    cluster_name, desired_spec, current_spec)
                if not dry_run:
                    ocm = ocm_map.get(cluster_name)
                    ocm.update_cluster(cluster_name, update_spec, dry_run)
        else:
            # create cluster
            if cluster_name in pending_state:
                continue
            logging.info(['create_cluster', cluster_name])
            ocm = ocm_map.get(cluster_name)
            ocm.create_cluster(cluster_name, desired_spec, dry_run)

    create_update_mr = False
    for cluster_name, cluster_updates in clusters_updates.items():
        for k, v in cluster_updates['spec'].items():
            logging.info(
                f"[{cluster_name}] desired key in spec " +
                f"{k} will be updated automatically " +
                f"with value {v}."
            )
            create_update_mr = True
        for k, v in cluster_updates['root'].items():
            logging.info(
                f"[{cluster_name}] desired root key {k} will "
                f"be updated automatically with value {v}"
            )
            create_update_mr = True
    if create_update_mr and not dry_run:
        mr = cu.CreateClustersUpdates(clusters_updates)
        mr.submit(cli=mr_cli)

    sys.exit(int(error))
예제 #4
0
def run(dry_run,
        print_only=False,
        enable_deletion=False,
        thread_pool_size=10,
        defer=None):
    settings = queries.get_app_interface_settings()
    clusters = [
        c for c in queries.get_clusters() if c.get('peering') is not None
    ]
    ocm_map = ocm.OCMMap(clusters=clusters,
                         integration=QONTRACT_INTEGRATION,
                         settings=settings)

    accounts = queries.get_aws_accounts()
    awsapi = aws_api.AWSApi(1, accounts, settings=settings, init_users=False)

    errors = []
    # Fetch desired state for cluster-to-vpc(account) VPCs
    desired_state_vpc, err = \
        build_desired_state_vpc(clusters, ocm_map, awsapi)
    errors.append(err)

    # Fetch desired state for cluster-to-account (vpc mesh) VPCs
    desired_state_vpc_mesh, err = \
        build_desired_state_vpc_mesh(clusters, ocm_map, awsapi)
    errors.append(err)

    # Fetch desired state for cluster-to-cluster VPCs
    desired_state_cluster, err = \
        build_desired_state_all_clusters(clusters, ocm_map, awsapi)
    errors.append(err)

    desired_state = \
        desired_state_vpc + \
        desired_state_vpc_mesh + \
        desired_state_cluster

    # check there are no repeated vpc connection names
    connection_names = [c['connection_name'] for c in desired_state]
    if len(set(connection_names)) != len(connection_names):
        logging.error("duplicate vpc connection names found")
        sys.exit(1)

    participating_accounts = \
        [item['requester']['account'] for item in desired_state]
    participating_accounts += \
        [item['accepter']['account'] for item in desired_state]
    participating_account_names = \
        [a['name'] for a in participating_accounts]
    accounts = [
        a for a in accounts if a['name'] in participating_account_names
    ]

    ts = terrascript.TerrascriptClient(QONTRACT_INTEGRATION,
                                       "",
                                       thread_pool_size,
                                       accounts,
                                       settings=settings)
    ts.populate_additional_providers(participating_accounts)
    ts.populate_vpc_peerings(desired_state)
    working_dirs = ts.dump(print_only=print_only)

    if print_only:
        sys.exit(0 if dry_run else int(any(errors)))

    tf = terraform.TerraformClient(QONTRACT_INTEGRATION,
                                   QONTRACT_INTEGRATION_VERSION, "", accounts,
                                   working_dirs, thread_pool_size)

    if tf is None or any(errors):
        sys.exit(1)

    defer(tf.cleanup)

    disabled_deletions_detected, err = tf.plan(enable_deletion)
    errors.append(err)
    if disabled_deletions_detected:
        logging.error("Deletions detected when they are disabled")
        sys.exit(1)

    if dry_run:
        sys.exit(int(any(errors)))
    if any(errors):
        sys.exit(1)

    errors.append(tf.apply())
    sys.exit(int(any(errors)))