def run(dry_run,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        defer=None):
    namespaces = [
        namespace_info for namespace_info in queries.get_namespaces()
        if namespace_info.get("managedRoles")
        and is_in_shard(f"{namespace_info['cluster']['name']}/" +
                        f"{namespace_info['name']}")
    ]
    ri, oc_map = ob.fetch_current_state(
        namespaces=namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        override_managed_types=["RoleBinding.authorization.openshift.io"],
        internal=internal,
        use_jump_host=use_jump_host,
    )
    defer(oc_map.cleanup)
    fetch_desired_state(ri, oc_map)
    ob.realize_data(dry_run, oc_map, ri, thread_pool_size)

    if ri.has_error_registered():
        sys.exit(1)
Ejemplo n.º 2
0
def run(dry_run,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        providers=[],
        cluster_name=None,
        namespace_name=None,
        init_api_resources=False,
        defer=None):
    gqlapi = gql.get_api()
    namespaces = [
        namespace_info
        for namespace_info in gqlapi.query(NAMESPACES_QUERY)['namespaces']
        if is_in_shard(f"{namespace_info['cluster']['name']}/" +
                       f"{namespace_info['name']}")
    ]
    namespaces = \
        filter_namespaces_by_cluster_and_namespace(
            namespaces,
            cluster_name,
            namespace_name
        )
    namespaces = canonicalize_namespaces(namespaces, providers)
    oc_map, ri = \
        fetch_data(namespaces, thread_pool_size, internal, use_jump_host,
                   init_api_resources=init_api_resources)
    defer(lambda: oc_map.cleanup())

    ob.realize_data(dry_run, oc_map, ri)

    if ri.has_error_registered():
        sys.exit(1)

    return ri
Ejemplo n.º 3
0
def run(dry_run,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        defer=None):

    gqlapi = gql.get_api()

    namespaces = []
    for namespace_info in gqlapi.query(NAMESPACES_QUERY)['namespaces']:
        if not namespace_info.get('networkPoliciesAllow'):
            continue

        shard_key = (f"{namespace_info['cluster']['name']}/"
                     f"{namespace_info['name']}")

        if not is_in_shard(shard_key):
            continue

        namespaces.append(namespace_info)

    ri, oc_map = ob.fetch_current_state(
        namespaces=namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        override_managed_types=['NetworkPolicy'],
        internal=internal,
        use_jump_host=use_jump_host)
    defer(lambda: oc_map.cleanup())
    fetch_desired_state(namespaces, ri, oc_map)
    ob.realize_data(dry_run, oc_map, ri)

    if ri.has_error_registered():
        sys.exit(1)
def get_desired_state(internal, use_jump_host, thread_pool_size):
    gqlapi = gql.get_api()
    all_namespaces = gqlapi.query(QUERY)['namespaces']

    namespaces = []
    for namespace in all_namespaces:
        shard_key = f'{namespace["cluster"]["name"]}/{namespace["name"]}'
        if is_in_shard(shard_key):
            namespaces.append(namespace)

    ri = ResourceInventory()
    settings = queries.get_app_interface_settings()
    oc_map = OC_Map(namespaces=namespaces,
                    integration=QONTRACT_INTEGRATION,
                    settings=settings,
                    internal=internal,
                    use_jump_host=use_jump_host,
                    thread_pool_size=thread_pool_size,
                    init_projects=True)
    ob.init_specs_to_fetch(ri,
                           oc_map,
                           namespaces=namespaces,
                           override_managed_types=['Namespace'])

    desired_state = []
    for cluster, namespace, _, _ in ri:
        if cluster not in oc_map.clusters():
            continue
        desired_state.append({"cluster": cluster, "namespace": namespace})

    return oc_map, desired_state
Ejemplo n.º 5
0
def test_is_in_shard_single_shard(monkeypatch):
    monkeypatch.setenv("SHARDS", "1")
    monkeypatch.setenv("SHARD_ID", "0")
    # SHARDS and SHARD_ID are defined as global variables.
    # To get tests to pick up new environment variables,
    # reloading is required after each call to monkeypatch.setenv()
    importlib.reload(sharding)

    assert sharding.is_in_shard(VALUE) is True
def get_gql_namespaces_in_shard() -> List[Any]:
    """
    Get all namespaces from qontract-server and filter those which are in
    our shard
    """
    all_namespaces = queries.get_namespaces()

    return [ns for ns in all_namespaces
            if not ns.get('delete') and
            is_in_shard(f"{ns['cluster']['name']}/{ns['name']}")]
Ejemplo n.º 7
0
def fetch_current_state(thread_pool_size, internal, use_jump_host):
    clusters = [c for c in queries.get_clusters() if is_in_shard(c['name'])]
    ocm_clusters = [c['name'] for c in clusters if c.get('ocm') is not None]
    current_state = []
    settings = queries.get_app_interface_settings()
    oc_map = OC_Map(clusters=clusters, integration=QONTRACT_INTEGRATION,
                    settings=settings, internal=internal,
                    use_jump_host=use_jump_host,
                    thread_pool_size=thread_pool_size)

    groups_list = create_groups_list(clusters, oc_map)
    results = threaded.run(get_cluster_state, groups_list, thread_pool_size,
                           oc_map=oc_map)

    current_state = list(itertools.chain.from_iterable(results))
    return oc_map, current_state, ocm_clusters
Ejemplo n.º 8
0
def get_shard_namespaces(
        namespaces: Iterable[Mapping[str, Any]]) \
            -> Tuple[List[Dict[str, str]], bool]:

    # Structure holding duplicates by namespace key
    duplicates: Dict[str, List[Dict[str, str]]] = {}
    # namespace filtered list without duplicates
    filtered_ns: Dict[str, Dict[str, Any]] = {}

    err = False
    for ns in namespaces:
        key = f'{ns["cluster"]["name"]}/{ns["name"]}'

        if is_in_shard(key):
            if key not in filtered_ns:
                filtered_ns[key] = cast(dict, ns)
            else:
                # Duplicated NS
                dupe_list_by_key = duplicates.setdefault(key, [])
                dupe_list_by_key.append(cast(dict, ns))

    for key, dupe_list in duplicates.items():
        dupe_list.append(filtered_ns[key])
        delete_flags = [ns["delete"] for ns in dupe_list_by_key]

        if len(set(delete_flags)) > 1:
            # If true only some definitions in list have the delete flag.
            # this case will generate an error
            err = True
            # Remove the namespace found from the filtered list
            del filtered_ns[key]
            logging.error(DUPLICATES_LOG_MSG.format(key=key))
        else:
            # If all namespaces have the same delete option
            # The action will be performaed
            logging.debug(DUPLICATES_LOG_MSG.format(key=key))

    return list(filtered_ns.values()), err
def setup(thread_pool_size, internal, use_jump_host, integration,
          integration_version, v1, v2):
    """Setup required resources for triggering integrations

    Args:
        thread_pool_size (int): Thread pool size to use
        internal (bool): Should run for internal/extrenal/all clusters
        use_jump_host (bool): Should use jump host to reach clusters
        integration (string): Name of calling integration
        integration_version (string): Version of calling integration
        v1 (bool): Should trigger for v1 SaaS files
        v2 (bool): Should trigger for v2 SaaS files

    Returns:
        saasherder (SaasHerder): a SaasHerder instance
        jenkins_map (dict): Instance names with JenkinsApi instances
        oc_map (OC_Map): a dictionary of OC clients per cluster
        settings (dict): App-interface settings
        error (bool): True if one happened, False otherwise
    """

    saas_files = queries.get_saas_files(v1=v1, v2=v2)
    if not saas_files:
        logging.error("no saas files found")
        return None, None, None, None, True
    saas_files = [sf for sf in saas_files if is_in_shard(sf["name"])]

    # Remove saas-file targets that are disabled
    for saas_file in saas_files[:]:
        resource_templates = saas_file["resourceTemplates"]
        for rt in resource_templates[:]:
            targets = rt["targets"]
            for target in targets[:]:
                if target["disable"]:
                    targets.remove(target)

    instance = queries.get_gitlab_instance()
    settings = queries.get_app_interface_settings()
    accounts = queries.get_state_aws_accounts()
    gl = GitLabApi(instance, settings=settings)
    jenkins_map = jenkins_base.get_jenkins_map()
    pipelines_providers = queries.get_pipelines_providers()
    tkn_provider_namespaces = [
        pp["namespace"] for pp in pipelines_providers
        if pp["provider"] == "tekton"
    ]

    oc_map = OC_Map(
        namespaces=tkn_provider_namespaces,
        integration=integration,
        settings=settings,
        internal=internal,
        use_jump_host=use_jump_host,
        thread_pool_size=thread_pool_size,
    )

    saasherder = SaasHerder(
        saas_files,
        thread_pool_size=thread_pool_size,
        gitlab=gl,
        integration=integration,
        integration_version=integration_version,
        settings=settings,
        jenkins_map=jenkins_map,
        accounts=accounts,
    )

    return saasherder, jenkins_map, oc_map, settings, False
def fetch_desired_state(ri, oc_map):
    gqlapi = gql.get_api()
    roles = expiration.filter(gqlapi.query(ROLES_QUERY)["roles"])
    users_desired_state = []
    for role in roles:
        permissions = [{
            "cluster": a["namespace"]["cluster"],
            "namespace": a["namespace"]["name"],
            "role": a["role"],
        } for a in role["access"] or []
                       if None not in [a["namespace"], a["role"]]
                       and a["namespace"].get("managedRoles")]
        if not permissions:
            continue

        service_accounts = [
            bot["openshift_serviceaccount"] for bot in role["bots"]
            if bot.get("openshift_serviceaccount")
        ]

        for permission in permissions:
            cluster_info = permission["cluster"]
            cluster = cluster_info["name"]
            namespace = permission["namespace"]
            if not is_in_shard(f"{cluster}/{namespace}"):
                continue
            if oc_map and not oc_map.get(cluster):
                continue
            user_key = ob.determine_user_key_for_access(cluster_info)
            for user in role["users"]:
                # used by openshift-users and github integrations
                # this is just to simplify things a bit on the their side
                users_desired_state.append({
                    "cluster": cluster,
                    "user": user[user_key]
                })
                if ri is None:
                    continue
                oc_resource, resource_name = construct_user_oc_resource(
                    permission["role"], user[user_key])
                try:
                    ri.add_desired(
                        cluster,
                        permission["namespace"],
                        "RoleBinding.authorization.openshift.io",
                        resource_name,
                        oc_resource,
                    )
                except ResourceKeyExistsError:
                    # a user may have a Role assigned to them
                    # from multiple app-interface roles
                    pass
            for sa in service_accounts:
                if ri is None:
                    continue
                namespace, sa_name = sa.split("/")
                oc_resource, resource_name = construct_sa_oc_resource(
                    permission["role"], namespace, sa_name)
                try:
                    ri.add_desired(
                        cluster,
                        permission["namespace"],
                        "RoleBinding.authorization.openshift.io",
                        resource_name,
                        oc_resource,
                    )
                except ResourceKeyExistsError:
                    # a ServiceAccount may have a Role assigned to it
                    # from multiple app-interface roles
                    pass

    return users_desired_state
Ejemplo n.º 11
0
def test_is_in_shard_three_shards_fail(monkeypatch):
    monkeypatch.setenv("SHARDS", "3")
    monkeypatch.setenv("SHARD_ID", "2")
    importlib.reload(sharding)

    assert sharding.is_in_shard(VALUE) is False
Ejemplo n.º 12
0
def fetch_desired_state(ri, oc_map):
    gqlapi = gql.get_api()
    roles = gqlapi.query(ROLES_QUERY)['roles']
    users_desired_state = []
    for role in roles:
        permissions = [{'cluster': a['namespace']['cluster']['name'],
                        'namespace': a['namespace']['name'],
                        'role': a['role']}
                       for a in role['access'] or []
                       if None not in [a['namespace'], a['role']]
                       and a['namespace'].get('managedRoles')]
        if not permissions:
            continue

        users = [user['github_username']
                 for user in role['users']]
        bot_users = [bot['github_username']
                     for bot in role['bots']
                     if bot.get('github_username')]
        users.extend(bot_users)
        service_accounts = [bot['openshift_serviceaccount']
                            for bot in role['bots']
                            if bot.get('openshift_serviceaccount')]

        for permission in permissions:
            cluster = permission['cluster']
            namespace = permission['namespace']
            if not is_in_shard(f"{cluster}/{namespace}"):
                continue
            if oc_map and not oc_map.get(cluster):
                continue
            for user in users:
                # used by openshift-users and github integrations
                # this is just to simplify things a bit on the their side
                users_desired_state.append({
                    'cluster': cluster,
                    'user': user
                })
                if ri is None:
                    continue
                oc_resource, resource_name = \
                    construct_user_oc_resource(permission['role'], user)
                try:
                    ri.add_desired(
                        cluster,
                        permission['namespace'],
                        'RoleBinding.authorization.openshift.io',
                        resource_name,
                        oc_resource
                    )
                except ResourceKeyExistsError:
                    # a user may have a Role assigned to them
                    # from multiple app-interface roles
                    pass
            for sa in service_accounts:
                if ri is None:
                    continue
                namespace, sa_name = sa.split('/')
                oc_resource, resource_name = \
                    construct_sa_oc_resource(
                        permission['role'], namespace, sa_name)
                try:
                    ri.add_desired(
                        permission['cluster'],
                        permission['namespace'],
                        'RoleBinding.authorization.openshift.io',
                        resource_name,
                        oc_resource
                    )
                except ResourceKeyExistsError:
                    # a ServiceAccount may have a Role assigned to it
                    # from multiple app-interface roles
                    pass

    return users_desired_state