Exemple #1
0
 def run(self, cname=None):
     validation_list = threaded.run(func=self._get_validation_names,
                                    iterable=self._get_clusters(cname),
                                    thread_pool_size=self.thread_pool_size,
                                    filter='deployment_validation_operator')
     validation_names = {}
     if validation_list:
         validation_names = {
             v['cluster']: v['data']
             for v in validation_list if v
         }
     clusters = self._get_clusters(cname)
     self._get_token()
     for cluster in clusters:
         cluster_name = cluster['name']
         if cluster_name not in validation_names:
             LOG.debug('%s Skipping cluster: %s', self.logmarker,
                       cluster_name)
             continue
         LOG.debug('%s Processing cluster: %s', self.logmarker,
                   cluster_name)
         validations = threaded.run(func=self._get_deploymentvalidation,
                                    iterable=validation_names[cluster_name],
                                    thread_pool_size=self.thread_pool_size,
                                    clusterinfo=cluster)
         threaded.run(func=self._post,
                      iterable=validations,
                      thread_pool_size=self.thread_pool_size)
     self._close_token()
Exemple #2
0
def fetch_current_state(dry_run, namespaces, thread_pool_size, internal,
                        use_jump_host, account_name):
    ri = ResourceInventory()
    if dry_run:
        return ri, None
    settings = queries.get_app_interface_settings()
    oc_map = OC_Map(namespaces=namespaces,
                    integration=QONTRACT_INTEGRATION,
                    settings=settings,
                    internal=internal,
                    use_jump_host=use_jump_host,
                    thread_pool_size=thread_pool_size)
    state_specs = \
        ob.init_specs_to_fetch(
            ri,
            oc_map,
            namespaces=namespaces,
            override_managed_types=['Secret']
        )
    threaded.run(populate_oc_resources,
                 state_specs,
                 thread_pool_size,
                 ri=ri,
                 account_name=account_name)

    return ri, oc_map
def fetch_data(
    namespaces,
    thread_pool_size,
    internal,
    use_jump_host,
    init_api_resources=False,
    overrides=None,
):
    ri = ResourceInventory()
    settings = queries.get_app_interface_settings()
    logging.debug(f"Overriding keys {overrides}")
    oc_map = OC_Map(
        namespaces=namespaces,
        integration=QONTRACT_INTEGRATION,
        settings=settings,
        internal=internal,
        use_jump_host=use_jump_host,
        thread_pool_size=thread_pool_size,
        init_api_resources=init_api_resources,
    )
    state_specs = ob.init_specs_to_fetch(ri,
                                         oc_map,
                                         namespaces=namespaces,
                                         override_managed_types=overrides)
    threaded.run(fetch_states, state_specs, thread_pool_size, ri=ri)

    return oc_map, ri
def realize(
    inventory: LabelInventory,
    state: State,
    oc_map: OC_Map,
    dry_run: bool,
    thread_pool_size: int,
) -> None:
    """
    Apply the changes in the state store and on the namespaces
    """
    for cluster, namespace, types in inventory:
        if inventory.errors(cluster, namespace):
            continue
        upd_managed = types.get(UPDATED_MANAGED, [])
        if upd_managed:
            key = state_key(cluster, namespace)
            _LOG.debug(f"Updating state store: {key}: {upd_managed}")
            if not dry_run:
                state.add(key, upd_managed, force=True)

    # Potential exceptions will get raised up
    threaded.run(
        label,
        inventory,
        thread_pool_size,
        oc_map=oc_map,
        dry_run=dry_run,
        inventory=inventory,
    )
def run(dry_run, thread_pool_size=10):
    instance = queries.get_gitlab_instance()
    settings = queries.get_app_interface_settings()
    repos = queries.get_repos_gitlab_owner(server=instance['url'])
    threaded.run(act, repos, thread_pool_size,
                 dry_run=dry_run,
                 instance=instance,
                 settings=settings)
Exemple #6
0
def run(thread_pool_size=10, defer=None):
    oc_map = tb.get_oc_map(QONTRACT_E2E_TEST)
    defer(oc_map.cleanup)
    ns_under_test = tb.get_test_namespace_name()
    threaded.run(test_cluster,
                 oc_map.clusters(),
                 thread_pool_size,
                 oc_map=oc_map,
                 ns_under_test=ns_under_test)
Exemple #7
0
def run(thread_pool_size=10, defer=None):
    oc_map = tb.get_oc_map(QONTRACT_E2E_TEST)
    defer(oc_map.cleanup)
    pattern = tb.get_namespaces_pattern()
    threaded.run(test_cluster,
                 oc_map.clusters(),
                 thread_pool_size,
                 oc_map=oc_map,
                 pattern=pattern)
Exemple #8
0
    def run(self):
        slo_documents = queries.get_slo_documents()

        service_slos = threaded.run(func=self._get_service_slo,
                                    iterable=slo_documents,
                                    thread_pool_size=self.thread_pool_size)

        self._get_token()
        threaded.run(func=self._post,
                     iterable=service_slos,
                     thread_pool_size=self.thread_pool_size)
        self._close_token()
def get_build_history_pool(jenkins_map, jobs, timestamp_limit,
                           thread_pool_size):
    history_to_get = []
    for instance, jobs in jobs.items():
        jenkins = jenkins_map[instance]
        for job in jobs:
            job['jenkins'] = jenkins
            job['timestamp_limit'] = timestamp_limit
            history_to_get.append(job)

    result = run(func=get_build_history,
                 iterable=history_to_get,
                 thread_pool_size=thread_pool_size)

    history = {}
    for job in result:
        build_history = job.get('build_history')
        if not build_history:
            continue
        successes = [_ for _ in build_history if _ == 'SUCCESS']
        history[job['name']] = {
            "total": len(build_history),
            "success": len(successes)
        }
    return history
Exemple #10
0
def run(dry_run: bool,
        thread_pool_size=10,
        internal: Optional[bool] = None,
        use_jump_host=True,
        defer=None):

    all_namespaces = queries.get_namespaces(minimal=True)
    shard_namespaces, duplicates = get_shard_namespaces(all_namespaces)

    desired_state = get_desired_state(shard_namespaces)

    settings = queries.get_app_interface_settings()
    oc_map = OC_Map(namespaces=shard_namespaces,
                    integration=QONTRACT_INTEGRATION,
                    settings=settings,
                    internal=internal,
                    use_jump_host=use_jump_host,
                    thread_pool_size=thread_pool_size,
                    init_projects=True)

    defer(oc_map.cleanup)

    results = threaded.run(manage_namespaces,
                           desired_state,
                           thread_pool_size,
                           return_exceptions=True,
                           dry_run=dry_run,
                           oc_map=oc_map)

    err = check_results(desired_state, results)
    if err or duplicates:
        sys.exit(ExitCodes.ERROR)
def run(dry_run, print_to_file=None,
        enable_deletion=False, io_dir='throughput/',
        thread_pool_size=10, internal=None, use_jump_host=True,
        light=False, vault_output_path='', extra_labels=None):
    account_names = [
        name for index, name in enumerate(sorted(get_accounts_names()))
        if is_in_shard_round_robin(name, index)
    ]

    if not account_names:
        logging.warning("No accounts in shards")
        return

    exit_codes = threaded.run(
        tfr_run_wrapper, account_names, thread_pool_size,
        dry_run=dry_run,
        print_to_file=print_to_file,
        enable_deletion=enable_deletion,
        io_dir=io_dir,
        internal_thread_pool_size=thread_pool_size,
        internal=internal,
        use_jump_host=use_jump_host,
        light=light,
        vault_output_path=vault_output_path,
        extra_labels=extra_labels,
    )

    if any(exit_codes):
        sys.exit(ExitCodes.ERROR)
    def init_ecr_auth_tokens(self, accounts):
        accounts_with_ecr = [a for a in accounts if a.get('ecrs')]
        if not accounts_with_ecr:
            return

        auth_tokens = {}
        results = threaded.run(self.get_tf_secrets, accounts_with_ecr,
                               self.thread_pool_size)
        account_secrets = dict(results)
        for account in accounts_with_ecr:
            account_name = account['name']
            account_secret = account_secrets[account_name]
            access_key = account_secret['aws_access_key_id']
            secret_key = account_secret['aws_secret_access_key']

            ecrs = account['ecrs']
            for ecr in ecrs:
                region_name = ecr['region']
                session = Session(
                    aws_access_key_id=access_key,
                    aws_secret_access_key=secret_key,
                    region_name=region_name,
                )
                client = session.client('ecr')
                token = client.get_authorization_token()
                auth_tokens[f"{account_name}/{region_name}"] = token

        self.auth_tokens = auth_tokens
Exemple #13
0
def run(dry_run, gitlab_project_id=None, thread_pool_size=10):
    accounts = queries.get_aws_accounts()
    settings = queries.get_app_interface_settings()
    aws = AWSApi(thread_pool_size, accounts, settings=settings)
    existing_keys = aws.get_users_keys()
    existing_keys_list = [
        key for user_key in existing_keys.values()
        for keys in user_key.values() for key in keys
    ]
    logging.info("found {} existing keys".format(len(existing_keys_list)))

    app_int_github_repos = queries.get_repos(server="https://github.com")
    all_repos = get_all_repos_to_scan(app_int_github_repos)
    logging.info("about to scan {} repos".format(len(all_repos)))

    results = threaded.run(
        git_secrets.scan_history,
        all_repos,
        thread_pool_size,
        existing_keys=existing_keys_list,
    )
    all_leaked_keys = [key for keys in results for key in keys]

    deleted_keys = aws_sos.get_deleted_keys(accounts)
    keys_to_delete = [
        {
            "account": account,
            "key": key
        } for key in all_leaked_keys
        for account, user_keys in existing_keys.items()
        if key in [uk for uks in user_keys.values()
                   for uk in uks] and key not in deleted_keys[account]
    ]
    aws_sos.act(dry_run, gitlab_project_id, accounts, keys_to_delete)
def realize_data(dry_run,
                 oc_map: OC_Map,
                 ri: ResourceInventory,
                 thread_pool_size,
                 take_over=False,
                 caller=None,
                 wait_for_namespace=False,
                 no_dry_run_skip_compare=False,
                 override_enable_deletion=None,
                 recycle_pods=True):
    """
    Realize the current state to the desired state.

    :param dry_run: run in dry-run mode
    :param oc_map: a dictionary containing oc client per cluster
    :param ri: a ResourceInventory containing current and desired states
    :param thread_pool_size: Thread pool size to use for parallelism
    :param take_over: manage resource types in a namespace exclusively
    :param caller: name of the calling entity.
                   enables multiple running instances of the same integration
                   to deploy to the same namespace
    :param wait_for_namespace: wait for namespace to exist before applying
    :param no_dry_run_skip_compare: when running without dry-run, skip compare
    :param override_enable_deletion: override calculated enable_deletion value
    :param recycle_pods: should pods be recycled if a dependency changed
    """
    args = locals()
    del args['thread_pool_size']
    results = threaded.run(_realize_resource_data, ri, thread_pool_size,
                           **args)
    return list(itertools.chain.from_iterable(results))
def run(
    dry_run,
    gitlab_project_id=None,
    thread_pool_size=10,
    enable_deletion=False,
    send_mails=False,
):
    settings = queries.get_app_interface_settings()
    users = queries.get_users()
    g = init_github()

    results = threaded.run(get_user_company, users, thread_pool_size, github=g)

    users_to_delete = get_users_to_delete(results)

    if not dry_run and enable_deletion:
        mr_cli = mr_client_gateway.init(gitlab_project_id=gitlab_project_id)

    for user in users_to_delete:
        username = user["username"]
        paths = user["paths"]
        logging.info(["delete_user", username])

        if not dry_run:
            if send_mails:
                send_email_notification(user, settings)
            elif enable_deletion:
                mr = CreateDeleteUser(username, paths)
                mr.submit(cli=mr_cli)
            else:
                msg = ("'delete' action is not enabled. "
                       "Please run the integration manually "
                       "with the '--enable-deletion' flag.")
                logging.warning(msg)
Exemple #16
0
    def __init__(self,
                 clusters=None,
                 namespaces=None,
                 integration='',
                 e2e_test='',
                 settings=None,
                 internal=None,
                 use_jump_host=True,
                 thread_pool_size=1,
                 init_projects=False,
                 init_api_resources=False,
                 cluster_admin=False):
        self.oc_map = {}
        self.calling_integration = integration
        self.calling_e2e_test = e2e_test
        self.settings = settings
        self.internal = internal
        self.use_jump_host = use_jump_host
        self.thread_pool_size = thread_pool_size
        self.init_projects = init_projects
        self.init_api_resources = init_api_resources
        self._lock = Lock()
        self.jh_ports = {}

        if clusters and namespaces:
            raise KeyError('expected only one of clusters or namespaces.')
        elif clusters:
            threaded.run(self.init_oc_client,
                         clusters,
                         self.thread_pool_size,
                         cluster_admin=cluster_admin)
        elif namespaces:
            clusters = []
            cluster_names = []
            for ns_info in namespaces:
                cluster = ns_info['cluster']
                name = cluster['name']
                if name not in cluster_names:
                    cluster_names.append(name)
                    clusters.append(cluster)
            threaded.run(self.init_oc_client,
                         clusters,
                         self.thread_pool_size,
                         cluster_admin=cluster_admin)
        else:
            raise KeyError('expected one of clusters or namespaces.')
Exemple #17
0
 def init_specs(self):
     wd_specs = \
         [{'name': name, 'wd': wd}
          for name, wd in self.working_dirs.items()]
     results = threaded.run(self.terraform_init, wd_specs,
                            self.thread_pool_size)
     self.specs = \
         [{'name': name, 'tf': tf} for name, tf in results]
Exemple #18
0
def fetch_data(namespaces,
               thread_pool_size,
               internal,
               use_jump_host,
               init_api_resources=False):
    ri = ResourceInventory()
    settings = queries.get_app_interface_settings()
    oc_map = OC_Map(namespaces=namespaces,
                    integration=QONTRACT_INTEGRATION,
                    settings=settings,
                    internal=internal,
                    use_jump_host=use_jump_host,
                    thread_pool_size=thread_pool_size,
                    init_api_resources=init_api_resources)
    state_specs = ob.init_specs_to_fetch(ri, oc_map, namespaces=namespaces)
    threaded.run(fetch_states, state_specs, thread_pool_size, ri=ri)

    return oc_map, ri
    def run(self):
        clusters = queries.get_clusters()

        oc_map = OC_Map(clusters=clusters,
                        integration=QONTRACT_INTEGRATION,
                        settings=self.settings,
                        use_jump_host=True,
                        thread_pool_size=self.thread_pool_size)

        manifests = threaded.run(func=self._get_imagemanifestvuln,
                                 iterable=oc_map.clusters(),
                                 thread_pool_size=self.thread_pool_size,
                                 oc_map=oc_map)

        self._get_token()
        threaded.run(func=self._post,
                     iterable=manifests,
                     thread_pool_size=self.thread_pool_size)
        self._close_token()
Exemple #20
0
    def apply(self):
        errors = False

        results = threaded.run(self.terraform_apply, self.specs,
                               self.thread_pool_size)

        for error in results:
            if error:
                errors = True
        return errors
Exemple #21
0
def fetch_current_state(
    namespaces=None,
    clusters=None,
    thread_pool_size=None,
    integration=None,
    integration_version=None,
    override_managed_types=None,
    internal=None,
    use_jump_host=True,
    init_api_resources=False,
    cluster_admin=False,
):
    ri = ResourceInventory()
    settings = queries.get_app_interface_settings()
    oc_map = OC_Map(
        namespaces=namespaces,
        clusters=clusters,
        integration=integration,
        settings=settings,
        internal=internal,
        use_jump_host=use_jump_host,
        thread_pool_size=thread_pool_size,
        init_api_resources=init_api_resources,
        cluster_admin=cluster_admin,
    )
    state_specs = init_specs_to_fetch(
        ri,
        oc_map,
        namespaces=namespaces,
        clusters=clusters,
        override_managed_types=override_managed_types,
    )
    threaded.run(
        populate_current_state,
        state_specs,
        thread_pool_size,
        ri=ri,
        integration=integration,
        integration_version=integration_version,
    )

    return ri, oc_map
def validate_users_github(users, thread_pool_size):
    ok = True
    g = init_github()
    results = threaded.run(get_github_user, users, thread_pool_size, github=g)
    for org_username, gb_username, gh_login in results:
        if gb_username != gh_login:
            logging.error(
                "Github username is case sensitive in OSD. "
                f"User {org_username} github_username should be: {gh_login}.")
            ok = False

    return ok
Exemple #23
0
def validate_users_github(users, thread_pool_size):
    ok = True
    g = init_github()
    results = threaded.run(get_github_user, users, thread_pool_size, github=g)
    for org_username, gb_username, gh_login in results:
        if gb_username != gh_login:
            logging.error("Github username is case sensitive in OSD. "
                          f"User {org_username} is expecting to have "
                          f"the github username of {gh_login}, "
                          f"but the username specified in "
                          f"app-interface is {gb_username}")
            ok = False

    return ok
def run(dry_run, thread_pool_size=10):
    namespaces = queries.get_namespaces()

    tfrs_to_mirror = []
    for namespace in namespaces:

        if namespace["terraformResources"] is None:
            continue

        for tfr in namespace["terraformResources"]:
            if tfr["provider"] != "ecr":
                continue

            if tfr["mirror"] is None:
                continue

            tfrs_to_mirror.append(tfr)

    work_list = threaded.run(EcrMirror,
                             tfrs_to_mirror,
                             thread_pool_size=thread_pool_size,
                             dry_run=dry_run)
    threaded.run(worker, work_list, thread_pool_size=thread_pool_size)
Exemple #25
0
    def plan(self, enable_deletion):
        errors = False
        disabled_deletions_detected = False
        results = threaded.run(self.terraform_plan, self.specs,
                               self.thread_pool_size,
                               enable_deletion=enable_deletion)

        self.deleted_users = []
        for disabled_deletion_detected, deleted_users, error in results:
            if error:
                errors = True
            if disabled_deletion_detected:
                disabled_deletions_detected = True
                self.deleted_users.extend(deleted_users)
        return disabled_deletions_detected, errors
def fetch_current_state(thread_pool_size, internal, use_jump_host):
    clusters = queries.get_clusters(minimal=True)
    settings = queries.get_app_interface_settings()
    oc_map = OC_Map(clusters=clusters,
                    integration=QONTRACT_INTEGRATION,
                    settings=settings,
                    internal=internal,
                    use_jump_host=use_jump_host,
                    thread_pool_size=thread_pool_size)
    results = threaded.run(get_cluster_users,
                           oc_map.clusters(include_errors=True),
                           thread_pool_size,
                           oc_map=oc_map)
    current_state = list(itertools.chain.from_iterable(results))
    return oc_map, current_state
Exemple #27
0
def fetch_current_state(thread_pool_size, internal, use_jump_host):
    clusters = [c for c in queries.get_clusters() if is_in_shard(c['name'])]
    ocm_clusters = [c['name'] for c in clusters if c.get('ocm') is not None]
    current_state = []
    settings = queries.get_app_interface_settings()
    oc_map = OC_Map(clusters=clusters, integration=QONTRACT_INTEGRATION,
                    settings=settings, internal=internal,
                    use_jump_host=use_jump_host,
                    thread_pool_size=thread_pool_size)

    groups_list = create_groups_list(clusters, oc_map)
    results = threaded.run(get_cluster_state, groups_list, thread_pool_size,
                           oc_map=oc_map)

    current_state = list(itertools.chain.from_iterable(results))
    return oc_map, current_state, ocm_clusters
 def init_sessions_and_resources(self, accounts: Iterable[Account]):
     results = threaded.run(self.get_tf_secrets, accounts,
                            self.thread_pool_size)
     self.sessions: Dict[str, Session] = {}
     self.resources: Dict[str, Any] = {}
     for account, secret in results:
         access_key = secret['aws_access_key_id']
         secret_key = secret['aws_secret_access_key']
         region_name = secret['region']
         session = Session(
             aws_access_key_id=access_key,
             aws_secret_access_key=secret_key,
             region_name=region_name,
         )
         self.sessions[account] = session
         self.resources[account] = {}
Exemple #29
0
def fetch_current_state(thread_pool_size):
    clusters = queries.get_clusters()
    clusters = [c for c in clusters if c.get("ocm") is not None]
    current_state = []
    settings = queries.get_app_interface_settings()
    ocm_map = OCMMap(clusters=clusters,
                     integration=QONTRACT_INTEGRATION,
                     settings=settings)
    groups_list = openshift_groups.create_groups_list(clusters, oc_map=ocm_map)
    results = threaded.run(get_cluster_state,
                           groups_list,
                           thread_pool_size,
                           ocm_map=ocm_map)

    current_state = list(itertools.chain.from_iterable(results))
    return ocm_map, current_state
def run(dry_run, thread_pool_size=10):
    instance = queries.get_gitlab_instance()
    settings = queries.get_app_interface_settings()
    gl = GitLabApi(instance, settings=settings)
    repos = queries.get_repos(server=gl.server)
    app_sre = gl.get_app_sre_group_users()
    results = threaded.run(get_members_to_add,
                           repos,
                           thread_pool_size,
                           gl=gl,
                           app_sre=app_sre)

    members_to_add = list(itertools.chain.from_iterable(results))
    for m in members_to_add:
        logging.info(['add_maintainer', m["repo"], m["user"].username])
        if not dry_run:
            gl.add_project_member(m["repo"], m["user"])