예제 #1
0
def run(dry_run=False,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        take_over=True,
        defer=None):
    namespaces = [
        namespace_info for namespace_info in queries.get_namespaces()
        if namespace_info.get('limitRanges')
    ]

    namespaces = construct_resources(namespaces)

    if not namespaces:
        logging.debug("No LimitRanges definition found in app-interface!")
        sys.exit(0)

    ri, oc_map = ob.fetch_current_state(
        namespaces=namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        override_managed_types=['LimitRange'],
        internal=internal,
        use_jump_host=use_jump_host)
    defer(lambda: oc_map.cleanup())

    add_desired_state(namespaces, ri, oc_map)
    ob.realize_data(dry_run,
                    oc_map,
                    ri,
                    enable_deletion=True,
                    take_over=take_over)
예제 #2
0
def run(dry_run,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        providers=[],
        cluster_name=None,
        namespace_name=None,
        init_api_resources=False,
        defer=None):
    gqlapi = gql.get_api()
    namespaces = [
        namespace_info
        for namespace_info in gqlapi.query(NAMESPACES_QUERY)['namespaces']
        if is_in_shard(f"{namespace_info['cluster']['name']}/" +
                       f"{namespace_info['name']}")
    ]
    namespaces = \
        filter_namespaces_by_cluster_and_namespace(
            namespaces,
            cluster_name,
            namespace_name
        )
    namespaces = canonicalize_namespaces(namespaces, providers)
    oc_map, ri = \
        fetch_data(namespaces, thread_pool_size, internal, use_jump_host,
                   init_api_resources=init_api_resources)
    defer(lambda: oc_map.cleanup())

    ob.realize_data(dry_run, oc_map, ri)

    if ri.has_error_registered():
        sys.exit(1)

    return ri
def run(
    dry_run: bool,
    thread_pool_size: int = 10,
    internal: Optional[bool] = None,
    use_jump_host=True,
    defer=None,
):
    gabi_instances = queries.get_gabi_instances()
    if not gabi_instances:
        logging.debug("No gabi instances found in app-interface")
        sys.exit(ExitCodes.SUCCESS)

    gabi_namespaces = [i["namespace"] for g in gabi_instances for i in g["instances"]]

    ri, oc_map = ob.fetch_current_state(
        namespaces=gabi_namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        override_managed_types=["ConfigMap"],
        internal=internal,
        use_jump_host=use_jump_host,
    )
    defer(oc_map.cleanup)
    fetch_desired_state(gabi_instances, ri)
    ob.realize_data(dry_run, oc_map, ri, thread_pool_size)

    if ri.has_error_registered():
        sys.exit(1)
def run(dry_run,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        defer=None):
    namespaces = [
        namespace_info for namespace_info in queries.get_namespaces()
        if namespace_info.get("managedRoles")
        and is_in_shard(f"{namespace_info['cluster']['name']}/" +
                        f"{namespace_info['name']}")
    ]
    ri, oc_map = ob.fetch_current_state(
        namespaces=namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        override_managed_types=["RoleBinding.authorization.openshift.io"],
        internal=internal,
        use_jump_host=use_jump_host,
    )
    defer(oc_map.cleanup)
    fetch_desired_state(ri, oc_map)
    ob.realize_data(dry_run, oc_map, ri, thread_pool_size)

    if ri.has_error_registered():
        sys.exit(1)
예제 #5
0
def run(dry_run,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        take_over=True,
        defer=None):

    namespaces = [
        namespace_info for namespace_info in queries.get_namespaces()
        if namespace_info.get('quota')
    ]

    if not namespaces:
        logging.debug("No ResourceQuota definition found in app-interface!")
        sys.exit(0)

    ri, oc_map = ob.fetch_current_state(
        namespaces=namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        override_managed_types=['ResourceQuota'],
        internal=internal,
        use_jump_host=use_jump_host)
    defer(lambda: oc_map.cleanup())
    fetch_desired_state(namespaces, ri, oc_map)
    ob.realize_data(dry_run, oc_map, ri)

    if ri.has_error_registered():
        sys.exit(1)
def run(dry_run=False, thread_pool_size=10, internal=None,
        use_jump_host=True, defer=None):
    performance_parameters = queries.get_performance_parameters()
    observability_namespaces = [
        pp['namespace']['cluster']['observabilityNamespace']
        for pp in performance_parameters
        if pp['namespace']['cluster']['observabilityNamespace'] is not None]

    if not observability_namespaces:
        logging.error('No observability namespaces found')
        sys.exit(1)

    ri, oc_map = ob.fetch_current_state(
        namespaces=observability_namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        override_managed_types=['PrometheusRule'],
        internal=internal,
        use_jump_host=use_jump_host)
    defer(lambda: oc_map.cleanup())
    fetch_desired_state(performance_parameters, ri)
    ob.realize_data(dry_run, oc_map, ri)

    if ri.has_error_registered():
        sys.exit(1)
예제 #7
0
def run(dry_run,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        defer=None):

    gqlapi = gql.get_api()

    namespaces = []
    for namespace_info in gqlapi.query(NAMESPACES_QUERY)['namespaces']:
        if not namespace_info.get('networkPoliciesAllow'):
            continue

        shard_key = (f"{namespace_info['cluster']['name']}/"
                     f"{namespace_info['name']}")

        if not is_in_shard(shard_key):
            continue

        namespaces.append(namespace_info)

    ri, oc_map = ob.fetch_current_state(
        namespaces=namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        override_managed_types=['NetworkPolicy'],
        internal=internal,
        use_jump_host=use_jump_host)
    defer(lambda: oc_map.cleanup())
    fetch_desired_state(namespaces, ri, oc_map)
    ob.realize_data(dry_run, oc_map, ri)

    if ri.has_error_registered():
        sys.exit(1)
예제 #8
0
def run(dry_run: bool,
        thread_pool_size: int,
        internal: bool,
        use_jump_host: bool,
        defer=None) -> None:
    # prepare
    desired_endpoints = get_endpoints()
    namespaces = {
        p.blackboxExporter.namespace.get("name"): p.blackboxExporter.namespace
        for p in desired_endpoints if p.blackboxExporter
    }

    if namespaces:
        ri, oc_map = ob.fetch_current_state(
            namespaces.values(),
            thread_pool_size=thread_pool_size,
            internal=internal,
            use_jump_host=use_jump_host,
            integration=QONTRACT_INTEGRATION,
            integration_version=QONTRACT_INTEGRATION_VERSION,
            override_managed_types=["Probe"])
        defer(oc_map.cleanup)

        # reconcile
        for provider, endpoints in desired_endpoints.items():
            fill_desired_state(provider, endpoints, ri)
        ob.realize_data(dry_run,
                        oc_map,
                        ri,
                        thread_pool_size,
                        recycle_pods=False)

        if ri.has_error_registered():
            sys.exit(1)
예제 #9
0
def run(dry_run=False,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        defer=None):

    try:
        gqlapi = gql.get_api()
        namespaces = [
            namespace_info
            for namespace_info in gqlapi.query(NAMESPACES_QUERY)['namespaces']
            if namespace_info.get('networkPoliciesAllow')
        ]
        ri, oc_map = ob.fetch_current_state(
            namespaces=namespaces,
            thread_pool_size=thread_pool_size,
            integration=QONTRACT_INTEGRATION,
            integration_version=QONTRACT_INTEGRATION_VERSION,
            override_managed_types=['NetworkPolicy'],
            internal=internal,
            use_jump_host=use_jump_host)
        defer(lambda: oc_map.cleanup())
        fetch_desired_state(namespaces, ri, oc_map)
        ob.realize_data(dry_run, oc_map, ri)

    except Exception as e:
        msg = 'There was problem running openshift network policies reconcile.'
        msg += ' Exception: {}'
        msg = msg.format(str(e))
        logging.error(msg)
        sys.exit(1)
def run(
    dry_run,
    thread_pool_size=10,
    internal=None,
    use_jump_host=True,
    take_over=True,
    defer=None,
):
    namespaces = [
        namespace_info for namespace_info in queries.get_namespaces()
        if namespace_info.get("limitRanges")
    ]

    namespaces = construct_resources(namespaces)

    if not namespaces:
        logging.debug("No LimitRanges definition found in app-interface!")
        sys.exit(0)

    ri, oc_map = ob.fetch_current_state(
        namespaces=namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        override_managed_types=["LimitRange"],
        internal=internal,
        use_jump_host=use_jump_host,
    )
    defer(oc_map.cleanup)

    add_desired_state(namespaces, ri, oc_map)
    ob.realize_data(dry_run, oc_map, ri, thread_pool_size, take_over=take_over)

    if ri.has_error_registered():
        sys.exit(1)
def run(
    dry_run,
    thread_pool_size=10,
    internal=None,
    use_jump_host=True,
    vault_output_path="",
    defer=None,
):
    namespaces = canonicalize_namespaces(queries.get_serviceaccount_tokens())
    ri, oc_map = ob.fetch_current_state(
        namespaces=namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        override_managed_types=["Secret"],
        internal=internal,
        use_jump_host=use_jump_host,
    )
    defer(oc_map.cleanup)
    fetch_desired_state(namespaces, ri, oc_map)
    ob.realize_data(dry_run, oc_map, ri, thread_pool_size)
    if not dry_run and vault_output_path:
        write_outputs_to_vault(vault_output_path, ri)

    if ri.has_error_registered():
        sys.exit(1)
def run(dry_run=False,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        vault_output_path='',
        defer=None):
    namespaces = [
        namespace_info for namespace_info in queries.get_namespaces()
        if namespace_info.get('openshiftServiceAccountTokens')
    ]
    for namespace_info in namespaces:
        if not namespace_info.get('openshiftServiceAccountTokens'):
            continue
        for sat in namespace_info['openshiftServiceAccountTokens']:
            namespaces.append(sat['namespace'])

    ri, oc_map = ob.fetch_current_state(
        namespaces=namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        override_managed_types=['Secret'],
        internal=internal,
        use_jump_host=use_jump_host)
    defer(lambda: oc_map.cleanup())
    fetch_desired_state(namespaces, ri, oc_map)
    ob.realize_data(dry_run, oc_map, ri)
    if not dry_run and vault_output_path:
        write_outputs_to_vault(vault_output_path, ri)

    if ri.has_error_registered():
        sys.exit(1)
def run(dry_run, print_only=False,
        enable_deletion=False, io_dir='throughput/',
        thread_pool_size=10, internal=None, use_jump_host=True,
        light=False, vault_output_path='',
        account_name=None, defer=None):

    ri, oc_map, tf = \
        setup(dry_run, print_only, thread_pool_size, internal,
              use_jump_host, account_name)

    if not dry_run:
        defer(lambda: oc_map.cleanup())

    if print_only:
        cleanup_and_exit()
    if tf is None:
        err = True
        cleanup_and_exit(tf, err)

    if not light:
        deletions_detected, err = tf.plan(enable_deletion)
        if err:
            cleanup_and_exit(tf, err)
        if deletions_detected:
            if enable_deletion:
                tf.dump_deleted_users(io_dir)
            else:
                cleanup_and_exit(tf, deletions_detected)

    if dry_run:
        cleanup_and_exit(tf)

    if not light:
        err = tf.apply()
        if err:
            cleanup_and_exit(tf, err)

    # Temporary skip apply secret for running tf-r per account locally.
    # The integration running on the cluster will manage the secret
    # after any manual running.
    # Will refactor with caller for further operator implement.
    if account_name:
        cleanup_and_exit(tf)

    tf.populate_desired_state(ri, oc_map)

    ob.realize_data(dry_run, oc_map, ri)

    disable_keys(dry_run, thread_pool_size,
                 disable_service_account_keys=True)

    if vault_output_path:
        write_outputs_to_vault(vault_output_path, ri)

    if ri.has_error_registered():
        sys.exit(1)

    cleanup_and_exit(tf)
def run(dry_run=False, print_only=False,
        enable_deletion=False, io_dir='throughput/',
        thread_pool_size=10, internal=None, use_jump_host=True,
        light=False, vault_output_path='', defer=None):

    try:
        ri, oc_map, tf = \
            setup(print_only, thread_pool_size, internal, use_jump_host)

        defer(lambda: oc_map.cleanup())

        if print_only:
            cleanup_and_exit()
        if tf is None:
            err = True
            cleanup_and_exit(tf, err)

        if not light:
            deletions_detected, err = tf.plan(enable_deletion)
            if err:
                cleanup_and_exit(tf, err)
            if deletions_detected:
                if enable_deletion:
                    tf.dump_deleted_users(io_dir)
                else:
                    cleanup_and_exit(tf, deletions_detected)

        if dry_run:
            cleanup_and_exit(tf)

        if not light:
            err = tf.apply()
            if err:
                cleanup_and_exit(tf, err)

        tf.populate_desired_state(ri, oc_map)

        ob.realize_data(dry_run, oc_map, ri)

        disable_keys(dry_run, thread_pool_size,
                     disable_service_account_keys=True)

        if vault_output_path:
            write_outputs_to_vault(vault_output_path, ri)

        if ri.has_error_registered():
            sys.exit(1)

    except Exception as e:
        msg = 'There was problem running terraform resource reconcile.'
        msg += ' Exception: {}'
        msg = msg.format(str(e))
        logging.error(msg)
        sys.exit(1)

    cleanup_and_exit(tf)
def run(dry_run,
        thread_pool_size=10,
        saas_file_name=None,
        env_name=None,
        defer=None):
    saas_files = queries.get_saas_files(saas_file_name, env_name)
    if not saas_files:
        logging.error('no saas files found')
        sys.exit(1)

    instance = queries.get_gitlab_instance()
    desired_jenkins_instances = [s['instance']['name'] for s in saas_files]
    jenkins_map = jenkins_base.get_jenkins_map(
        desired_instances=desired_jenkins_instances)
    settings = queries.get_app_interface_settings()
    try:
        gl = GitLabApi(instance, settings=settings)
    except Exception:
        # allow execution without access to gitlab
        # as long as there are no access attempts.
        gl = None

    saasherder = SaasHerder(saas_files,
                            thread_pool_size=thread_pool_size,
                            gitlab=gl,
                            integration=QONTRACT_INTEGRATION,
                            integration_version=QONTRACT_INTEGRATION_VERSION,
                            settings=settings,
                            jenkins_map=jenkins_map)
    if not saasherder.valid:
        sys.exit(1)
    if len(saasherder.namespaces) == 0:
        logging.warning('no targets found')
        sys.exit(0)

    ri, oc_map = ob.fetch_current_state(
        namespaces=saasherder.namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION)
    defer(lambda: oc_map.cleanup())
    saasherder.populate_desired_state(ri)
    # if saas_file_name is defined, the integration
    # is being called from multiple running instances
    ob.realize_data(dry_run,
                    oc_map,
                    ri,
                    caller=saas_file_name,
                    wait_for_namespace=True,
                    no_dry_run_skip_compare=True,
                    take_over=saasherder.take_over)

    if ri.has_error_registered():
        sys.exit(1)
def run(dry_run: bool, thread_pool_size: int, internal: bool,
        use_jump_host: bool, defer=None) -> None:
    # verify blackbox-exporter modules
    settings = queries.get_app_interface_settings()
    allowed_modules = \
        set(settings["endpointMonitoringBlackboxExporterModules"])
    verification_errors = False
    if allowed_modules:
        for p in get_blackbox_providers():
            if p.blackboxExporter and \
                 p.blackboxExporter.module not in allowed_modules:
                LOG.error(
                    f"endpoint monitoring provider {p.name} uses "
                    f"blackbox-exporter module {p.blackboxExporter.module} "
                    f"which is not in the allow list {allowed_modules} of "
                    "app-interface-settings"
                )
                verification_errors = True
    if verification_errors:
        sys.exit(1)

    # prepare
    desired_endpoints = get_endpoints()
    namespaces = {
        p.blackboxExporter.namespace.get("name"):
        p.blackboxExporter.namespace
        for p in desired_endpoints
        if p.blackboxExporter
    }

    if namespaces:
        ri, oc_map = ob.fetch_current_state(
            namespaces.values(),
            thread_pool_size=thread_pool_size,
            internal=internal,
            use_jump_host=use_jump_host,
            integration=QONTRACT_INTEGRATION,
            integration_version=QONTRACT_INTEGRATION_VERSION,
            override_managed_types=["Probe"]
        )
        defer(oc_map.cleanup)

        # reconcile
        for provider, endpoints in desired_endpoints.items():
            fill_desired_state(provider, endpoints, ri)
        ob.realize_data(dry_run, oc_map, ri, thread_pool_size,
                        recycle_pods=False)

        if ri.has_error_registered():
            sys.exit(1)
def run(dry_run, print_only=False,
        enable_deletion=False, io_dir='throughput/',
        thread_pool_size=10, internal=None, use_jump_host=True,
        light=False, vault_output_path='', defer=None):

    ri, oc_map, tf = \
        setup(print_only, thread_pool_size, internal, use_jump_host)

    defer(lambda: oc_map.cleanup())

    if print_only:
        cleanup_and_exit()
    if tf is None:
        err = True
        cleanup_and_exit(tf, err)

    if not light:
        deletions_detected, err = tf.plan(enable_deletion)
        if err:
            cleanup_and_exit(tf, err)
        if deletions_detected:
            if enable_deletion:
                tf.dump_deleted_users(io_dir)
            else:
                cleanup_and_exit(tf, deletions_detected)

    if dry_run:
        cleanup_and_exit(tf)

    if not light:
        err = tf.apply()
        if err:
            cleanup_and_exit(tf, err)

    tf.populate_desired_state(ri, oc_map)

    ob.realize_data(dry_run, oc_map, ri)

    disable_keys(dry_run, thread_pool_size,
                 disable_service_account_keys=True)

    if vault_output_path:
        write_outputs_to_vault(vault_output_path, ri)

    if ri.has_error_registered():
        sys.exit(1)

    cleanup_and_exit(tf)
예제 #18
0
def run(
    dry_run: bool,
    thread_pool_size: int = 10,
    internal: Optional[bool] = None,
    use_jump_host: bool = True,
    saas_file_name: Optional[str] = None,
) -> None:

    tkn_providers = fetch_tkn_providers(saas_file_name)

    # TODO: This will need to be an error condition in the future
    if not tkn_providers:
        LOG.debug("No saas files found to be processed")
        sys.exit(0)

    # We need to start with the desired state to know the names of the
    # tekton objects that will be created in the providers' namespaces. We
    # need to make sure that this integration only manages its resources
    # and not the tekton resources already created via openshift-resources
    LOG.debug("Fetching desired resources")
    desired_resources = fetch_desired_resources(tkn_providers)

    tkn_namespaces = [tknp["namespace"] for tknp in tkn_providers.values()]
    LOG.debug("Fetching current resources")
    ri, oc_map = ob.fetch_current_state(
        namespaces=tkn_namespaces,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        override_managed_types=["Pipeline", "Task"],
        internal=internal,
        use_jump_host=use_jump_host,
        thread_pool_size=thread_pool_size,
    )
    defer(oc_map.cleanup)

    LOG.debug("Adding desired resources to inventory")
    for desired_resource in desired_resources:
        ri.add_desired(**desired_resource)

    LOG.debug("Realizing data")
    ob.realize_data(dry_run, oc_map, ri, thread_pool_size)

    if ri.has_error_registered():
        sys.exit(ExitCodes.ERROR)

    sys.exit(0)
예제 #19
0
def run(dry_run, thread_pool_size=10, internal=None,
        use_jump_host=True, defer=None):
    clusters = [cluster_info for cluster_info
                in queries.get_clusters()
                if cluster_info.get('managedClusterRoles')]
    ri, oc_map = ob.fetch_current_state(
        clusters=clusters,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        override_managed_types=['ClusterRoleBinding'],
        internal=internal,
        use_jump_host=use_jump_host)
    defer(lambda: oc_map.cleanup())
    fetch_desired_state(ri, oc_map)
    ob.realize_data(dry_run, oc_map, ri)

    if ri.has_error_registered():
        sys.exit(1)
예제 #20
0
def run(dry_run,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        providers=[],
        defer=None):
    gqlapi = gql.get_api()
    namespaces = gqlapi.query(NAMESPACES_QUERY)['namespaces']
    namespaces = conicalize_namespaces(namespaces, providers)
    oc_map, ri = \
        fetch_data(namespaces, thread_pool_size, internal, use_jump_host)
    defer(lambda: oc_map.cleanup())

    ob.realize_data(dry_run, oc_map, ri)

    if ri.has_error_registered():
        sys.exit(1)

    return ri
예제 #21
0
def run(dry_run=False,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        defer=None):
    namespaces = [
        namespace_info for namespace_info in queries.get_namespaces()
        if namespace_info.get('managedRoles')
    ]
    ri, oc_map = ob.fetch_current_state(
        namespaces=namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        override_managed_types=['RoleBinding'],
        internal=internal,
        use_jump_host=use_jump_host)
    defer(lambda: oc_map.cleanup())
    fetch_desired_state(ri, oc_map)
    ob.realize_data(dry_run, oc_map, ri)
def run(dry_run=False,
        thread_pool_size=10,
        saas_file_name=None,
        env_name=None,
        defer=None):
    saas_files = queries.get_saas_files(saas_file_name, env_name)
    if not saas_files:
        logging.error('no saas files found')
        sys.exit(1)

    instance = queries.get_gitlab_instance()
    settings = queries.get_app_interface_settings()
    try:
        gl = GitLabApi(instance, settings=settings)
    except Exception:
        # allow execution without access to gitlab
        # as long as there are no access attempts.
        gl = None

    saasherder = SaasHerder(saas_files,
                            thread_pool_size=thread_pool_size,
                            gitlab=gl,
                            integration=QONTRACT_INTEGRATION,
                            integration_version=QONTRACT_INTEGRATION_VERSION,
                            settings=settings)
    if not saasherder.valid:
        sys.exit(1)

    ri, oc_map = ob.fetch_current_state(
        namespaces=saasherder.namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION)
    defer(lambda: oc_map.cleanup())
    saasherder.populate_desired_state(ri)
    # if saas_file_name is defined, the integration
    # is being called from multiple running instances
    ob.realize_data(dry_run, oc_map, ri, caller=saas_file_name)

    if ri.has_error_registered():
        sys.exit(1)
예제 #23
0
def run(dry_run=False,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        defer=None):
    gqlapi = gql.get_api()
    namespaces = gqlapi.query(NAMESPACES_QUERY)['namespaces']
    oc_map, ri = \
        fetch_data(namespaces, thread_pool_size, internal, use_jump_host)
    defer(lambda: oc_map.cleanup())

    # check for unused resources types
    # listed under `managedResourceTypes`
    # only applicable for openshift-resources
    ob.check_unused_resource_types(ri)

    enable_deletion = False if ri.has_error_registered() else True
    ob.realize_data(dry_run, oc_map, ri, enable_deletion=enable_deletion)

    if ri.has_error_registered():
        sys.exit(1)
예제 #24
0
def run(dry_run, thread_pool_size=10, internal=None,
        use_jump_host=True, defer=None):

    try:
        namespaces = [
            namespace_info for namespace_info
            in queries.get_namespaces()
            if namespace_info.get('openshiftAcme')
            ]

        namespaces = construct_resources(namespaces)

        ri, oc_map = ob.fetch_current_state(
            namespaces=namespaces,
            thread_pool_size=thread_pool_size,
            integration=QONTRACT_INTEGRATION,
            integration_version=QONTRACT_INTEGRATION_VERSION,
            override_managed_types=[
                'Deployment',
                'Role',
                'RoleBinding',
                'ServiceAccount',
                'Secret'],
            internal=internal,
            use_jump_host=use_jump_host)
        add_desired_state(namespaces, ri, oc_map)

        defer(lambda: oc_map.cleanup())

        ob.realize_data(dry_run, oc_map, ri)

        if ri.has_error_registered():
            sys.exit(1)

    except Exception as e:
        msg = 'There was problem running openshift acme reconcile.'
        msg += ' Exception: {}'
        msg = msg.format(str(e))
        logging.error(msg)
        sys.exit(1)
예제 #25
0
def run(dry_run=False, thread_pool_size=10, defer=None):
    instance = queries.get_gitlab_instance()
    settings = queries.get_app_interface_settings()
    aws_accounts = queries.get_aws_accounts()
    gl = GitLabApi(instance, settings=settings)

    saas_files = queries.get_saas_files()
    saasherder = SaasHerder(saas_files,
                            gitlab=gl,
                            integration=QONTRACT_INTEGRATION,
                            integration_version=QONTRACT_INTEGRATION_VERSION,
                            settings=settings)
    ri, oc_map = ob.fetch_current_state(
        namespaces=saasherder.namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION)
    defer(lambda: oc_map.cleanup())
    saasherder.populate_desired_state(ri)
    enable_deletion = False if ri.has_error_registered() else True
    ob.realize_data(dry_run, oc_map, ri, enable_deletion=enable_deletion)
    saasherder.slack_notify(dry_run, aws_accounts, ri)
def run(dry_run, print_only=False,
        enable_deletion=False, io_dir='throughput/',
        thread_pool_size=10, internal=None, use_jump_host=True,
        light=False, vault_output_path='',
        account_name=None, extra_labels=None, defer=None):

    ri, oc_map, tf, tf_namespaces = \
        setup(dry_run, print_only, thread_pool_size, internal,
              use_jump_host, account_name, extra_labels)

    if not dry_run:
        defer(lambda: oc_map.cleanup())

    if print_only:
        cleanup_and_exit()
    if tf is None:
        err = True
        cleanup_and_exit(tf, err)

    if not light:
        disabled_deletions_detected, err = tf.plan(enable_deletion)
        if err:
            cleanup_and_exit(tf, err)
        tf.dump_deleted_users(io_dir)
        if disabled_deletions_detected:
            cleanup_and_exit(tf, disabled_deletions_detected)

    if dry_run:
        cleanup_and_exit(tf)

    if not light:
        err = tf.apply()
        if err:
            cleanup_and_exit(tf, err)

    tf.populate_desired_state(ri, oc_map, tf_namespaces, account_name)

    actions = ob.realize_data(dry_run, oc_map, ri, caller=account_name)

    disable_keys(dry_run, thread_pool_size,
                 disable_service_account_keys=True,
                 account_name=account_name)

    if actions and vault_output_path:
        write_outputs_to_vault(vault_output_path, ri)

    if ri.has_error_registered():
        err = True
        cleanup_and_exit(tf, err)

    cleanup_and_exit(tf)
def run(dry_run=False, thread_pool_size=10, internal=None,
        use_jump_host=True, take_over=True, defer=None):
    try:
        namespaces = [namespace_info for namespace_info
                      in queries.get_namespaces()
                      if namespace_info.get('quota')]
        ri, oc_map = ob.fetch_current_state(
            namespaces=namespaces,
            thread_pool_size=thread_pool_size,
            integration=QONTRACT_INTEGRATION,
            integration_version=QONTRACT_INTEGRATION_VERSION,
            override_managed_types=['ResourceQuota'],
            internal=internal,
            use_jump_host=use_jump_host)
        defer(lambda: oc_map.cleanup())
        fetch_desired_state(namespaces, ri, oc_map)
        ob.realize_data(dry_run, oc_map, ri)

        if ri.has_error_registered():
            sys.exit(1)

    except Exception as e:
        logging.error(f"Error during execution. Exception: {str(e)}")
        sys.exit(1)
예제 #28
0
def run(dry_run,
        print_only=False,
        enable_deletion=False,
        io_dir='throughput/',
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        light=False,
        vault_output_path='',
        account_name=None,
        defer=None):

    ri, oc_map, tf, tf_namespaces = \
        setup(dry_run, print_only, thread_pool_size, internal,
              use_jump_host, account_name)

    if not dry_run:
        defer(lambda: oc_map.cleanup())

    if print_only:
        cleanup_and_exit()
    if tf is None:
        err = True
        cleanup_and_exit(tf, err)

    if not light:
        disabled_deletions_detected, err = tf.plan(enable_deletion)
        if err:
            cleanup_and_exit(tf, err)
        tf.dump_deleted_users(io_dir)
        if disabled_deletions_detected:
            cleanup_and_exit(tf, disabled_deletions_detected)

    if dry_run:
        cleanup_and_exit(tf)

    if not light:
        err = tf.apply()
        if err:
            cleanup_and_exit(tf, err)

    # Temporary skip apply secret for running tf-r per account locally.
    # The integration running on the cluster will manage the secret
    # after any manual running.
    # Will refactor with caller for further operator implement.
    if account_name:
        cleanup_and_exit(tf)

    tf.populate_desired_state(ri, oc_map, tf_namespaces)

    # temporarily not allowing resources to be deleted
    # or for pods to be recycled
    # this should be removed after we gained confidence
    # following the terraform 0.13 upgrade
    actions = ob.realize_data(dry_run, oc_map, ri)

    disable_keys(dry_run, thread_pool_size, disable_service_account_keys=True)

    if actions and vault_output_path:
        write_outputs_to_vault(vault_output_path, ri)

    if ri.has_error_registered():
        err = True
        cleanup_and_exit(tf, err)

    cleanup_and_exit(tf)
예제 #29
0
def run(dry_run,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        vault_throughput_path=None,
        defer=None):
    if not vault_throughput_path:
        logging.error('must supply vault throughput path')
        sys.exit(ExitCodes.ERROR)

    kafka_clusters = queries.get_kafka_clusters()
    if not kafka_clusters:
        logging.debug("No Kafka clusters found in app-interface")
        sys.exit(ExitCodes.SUCCESS)

    settings = queries.get_app_interface_settings()
    ocm_map = OCMMap(clusters=kafka_clusters,
                     integration=QONTRACT_INTEGRATION,
                     settings=settings)
    namespaces = []
    for kafka_cluster in kafka_clusters:
        namespaces.extend(kafka_cluster['namespaces'])
    ri, oc_map = ob.fetch_current_state(
        namespaces=namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        override_managed_types=['Secret'],
        internal=internal,
        use_jump_host=use_jump_host)
    defer(oc_map.cleanup)

    current_state = ocm_map.kafka_cluster_specs()
    desired_state = fetch_desired_state(kafka_clusters)
    kafka_service_accounts = ocm_map.kafka_service_account_specs()

    for kafka_cluster in kafka_clusters:
        kafka_cluster_name = kafka_cluster['name']
        desired_cluster = [
            c for c in desired_state if kafka_cluster_name == c['name']
        ][0]
        current_cluster = [
            c for c in current_state if kafka_cluster_name == c['name']
        ]
        # check if cluster exists. if not - create it
        if not current_cluster:
            logging.info(['create_cluster', kafka_cluster_name])
            if not dry_run:
                ocm = ocm_map.get(kafka_cluster_name)
                ocm.create_kafka_cluster(desired_cluster)
            continue
        # there should only be one cluster
        current_cluster = current_cluster[0]
        # check if desired cluster matches current cluster. if not - error
        if not all(k in current_cluster.keys()
                   for k in desired_cluster.keys()):
            logging.error(
                '[%s] desired spec %s is different ' + 'from current spec %s',
                kafka_cluster_name, desired_cluster, current_cluster)
            ri.register_error()
            continue
        # check if cluster is ready. if not - wait
        status = current_cluster['status']
        if status != STATUS_READY:
            # check if cluster is failed
            if status == STATUS_FAILED:
                failed_reason = current_cluster['failed_reason']
                logging.error(
                    f'[{kafka_cluster_name}] cluster status is {status}. '
                    f'reason: {failed_reason}')
                ri.register_error()
            else:
                logging.warning(
                    f'[{kafka_cluster_name}] cluster status is {status}')
            continue
        # we have a ready cluster!
        # get a service account for the cluster
        kafka_service_account = get_kafa_service_account(
            kafka_service_accounts,
            kafka_cluster_name,
            vault_throughput_path,
            dry_run,
            ocm_map,
        )
        # let's create a Secret in all referencing namespaces
        kafka_namespaces = kafka_cluster['namespaces']
        secret_fields = ['bootstrap_server_host']
        data = {k: v for k, v in current_cluster.items() if k in secret_fields}
        data.update(kafka_service_account)
        resource = construct_oc_resource(data)
        for namespace_info in kafka_namespaces:
            ri.add_desired(namespace_info['cluster']['name'],
                           namespace_info['name'], resource.kind,
                           resource.name, resource)
        if not dry_run:
            write_output_to_vault(vault_throughput_path, kafka_cluster_name,
                                  resource.body['data'])

    ob.realize_data(dry_run, oc_map, ri, thread_pool_size)

    if ri.has_error_registered():
        sys.exit(ExitCodes.ERROR)
def run(
    dry_run,
    thread_pool_size=10,
    io_dir="throughput/",
    saas_file_name=None,
    env_name=None,
    gitlab_project_id=None,
    defer=None,
):
    all_saas_files = queries.get_saas_files(v1=True, v2=True)
    saas_files = queries.get_saas_files(saas_file_name, env_name, v1=True, v2=True)
    app_interface_settings = queries.get_app_interface_settings()
    if not saas_files:
        logging.error("no saas files found")
        sys.exit(ExitCodes.ERROR)

    # notify different outputs (publish results, slack notifications)
    # we only do this if:
    # - this is not a dry run
    # - there is a single saas file deployed
    notify = not dry_run and len(saas_files) == 1
    if notify:
        saas_file = saas_files[0]
        slack_info = saas_file.get("slack")
        if slack_info:
            slack = slackapi_from_slack_workspace(
                slack_info,
                app_interface_settings,
                QONTRACT_INTEGRATION,
                init_usergroups=False,
            )
            # support built-in start and end slack notifications
            # only in v2 saas files
            if saas_file["apiVersion"] == "v2":
                ri = ResourceInventory()
                console_url = compose_console_url(saas_file, saas_file_name, env_name)
                # deployment result notification
                defer(
                    lambda: slack_notify(
                        saas_file_name,
                        env_name,
                        slack,
                        ri,
                        console_url,
                        in_progress=False,
                    )
                )
                # deployment start notification
                slack_notifications = slack_info.get("notifications")
                if slack_notifications and slack_notifications.get("start"):
                    slack_notify(
                        saas_file_name,
                        env_name,
                        slack,
                        ri,
                        console_url,
                        in_progress=True,
                    )
        else:
            slack = None

    instance = queries.get_gitlab_instance()
    # instance exists in v1 saas files only
    desired_jenkins_instances = [
        s["instance"]["name"] for s in saas_files if s.get("instance")
    ]
    jenkins_map = jenkins_base.get_jenkins_map(
        desired_instances=desired_jenkins_instances
    )
    settings = queries.get_app_interface_settings()
    accounts = queries.get_aws_accounts()
    try:
        gl = GitLabApi(instance, settings=settings)
    except Exception:
        # allow execution without access to gitlab
        # as long as there are no access attempts.
        gl = None

    saasherder = SaasHerder(
        saas_files,
        thread_pool_size=thread_pool_size,
        gitlab=gl,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        settings=settings,
        jenkins_map=jenkins_map,
        accounts=accounts,
    )
    if len(saasherder.namespaces) == 0:
        logging.warning("no targets found")
        sys.exit(ExitCodes.SUCCESS)

    ri, oc_map = ob.fetch_current_state(
        namespaces=saasherder.namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        init_api_resources=True,
        cluster_admin=saasherder.cluster_admin,
    )
    defer(oc_map.cleanup)
    saasherder.populate_desired_state(ri)

    # validate that this deployment is valid
    # based on promotion information in targets
    if not saasherder.validate_promotions():
        logging.error("invalid promotions")
        ri.register_error()
        sys.exit(ExitCodes.ERROR)

    # if saas_file_name is defined, the integration
    # is being called from multiple running instances
    actions = ob.realize_data(
        dry_run,
        oc_map,
        ri,
        thread_pool_size,
        caller=saas_file_name,
        wait_for_namespace=True,
        no_dry_run_skip_compare=(not saasherder.compare),
        take_over=saasherder.take_over,
    )

    if not dry_run:
        if saasherder.publish_job_logs:
            try:
                ob.follow_logs(oc_map, actions, io_dir)
            except Exception as e:
                logging.error(str(e))
                ri.register_error()
        try:
            ob.validate_data(oc_map, actions)
        except Exception as e:
            logging.error(str(e))
            ri.register_error()

    # publish results of this deployment
    # based on promotion information in targets
    success = not ri.has_error_registered()
    # only publish promotions for deployment jobs (a single saas file)
    if notify:
        # Auto-promote next stages only if there are changes in the
        # promoting stage. This prevents trigger promotions on job re-runs
        auto_promote = len(actions) > 0
        mr_cli = mr_client_gateway.init(gitlab_project_id=gitlab_project_id)
        saasherder.publish_promotions(success, all_saas_files, mr_cli, auto_promote)

    if not success:
        sys.exit(ExitCodes.ERROR)

    # send human readable notifications to slack
    # we only do this if:
    # - this is not a dry run
    # - there is a single saas file deployed
    # - output is 'events'
    # - no errors were registered
    if notify and slack and actions and slack_info.get("output") == "events":
        for action in actions:
            message = (
                f"[{action['cluster']}] "
                + f"{action['kind']} {action['name']} {action['action']}"
            )
            slack.chat_post_message(message)