def run(thread_pool_size=10, defer=None):
    oc_map = tb.get_oc_map(QONTRACT_E2E_TEST)
    defer(lambda: oc_map.cleanup())
    pattern = tb.get_namespaces_pattern()
    threaded.run(test_cluster, oc_map.clusters(), thread_pool_size,
                 oc_map=oc_map,
                 pattern=pattern)
Exemplo n.º 2
0
    def __init__(self,
                 clusters=None,
                 namespaces=None,
                 integration='',
                 e2e_test='',
                 settings=None,
                 internal=None,
                 use_jump_host=True,
                 thread_pool_size=1,
                 init_projects=False,
                 init_api_resources=False):
        self.oc_map = {}
        self.calling_integration = integration
        self.calling_e2e_test = e2e_test
        self.settings = settings
        self.internal = internal
        self.use_jump_host = use_jump_host
        self.thread_pool_size = thread_pool_size
        self.init_projects = init_projects
        self.init_api_resources = init_api_resources
        self._lock = Lock()

        if clusters and namespaces:
            raise KeyError('expected only one of clusters or namespaces.')
        elif clusters:
            threaded.run(self.init_oc_client, clusters, self.thread_pool_size)
        elif namespaces:
            clusters = [ns_info['cluster'] for ns_info in namespaces]
            threaded.run(self.init_oc_client, clusters, self.thread_pool_size)
        else:
            raise KeyError('expected one of clusters or namespaces.')
Exemplo n.º 3
0
def fetch_current_state(namespaces=None,
                        clusters=None,
                        thread_pool_size=None,
                        integration=None,
                        integration_version=None,
                        override_managed_types=None,
                        internal=None,
                        use_jump_host=True,
                        init_api_resources=False):
    ri = ResourceInventory()
    settings = queries.get_app_interface_settings()
    oc_map = OC_Map(namespaces=namespaces,
                    clusters=clusters,
                    integration=integration,
                    settings=settings,
                    internal=internal,
                    use_jump_host=use_jump_host,
                    thread_pool_size=thread_pool_size,
                    init_api_resources=init_api_resources)
    state_specs = \
        init_specs_to_fetch(
            ri,
            oc_map,
            namespaces=namespaces,
            clusters=clusters,
            override_managed_types=override_managed_types
        )
    threaded.run(populate_current_state, state_specs, thread_pool_size,
                 ri=ri,
                 integration=integration,
                 integration_version=integration_version)

    return ri, oc_map
Exemplo n.º 4
0
def run(thread_pool_size=10, defer=None):
    oc_map = tb.get_oc_map(QONTRACT_E2E_TEST)
    defer(lambda: oc_map.cleanup())
    ns_under_test = tb.get_test_namespace_name()
    threaded.run(test_cluster,
                 oc_map.clusters(),
                 thread_pool_size,
                 oc_map=oc_map,
                 ns_under_test=ns_under_test)
Exemplo n.º 5
0
 def populate_desired_state(self, ri):
     results = threaded.run(self.init_populate_desired_state_specs,
                            self.saas_files, self.thread_pool_size)
     desired_state_specs = \
         [item for sublist in results for item in sublist]
     promotions = threaded.run(self.populate_desired_state_saas_file,
                               desired_state_specs,
                               self.thread_pool_size,
                               ri=ri)
     self.promotions = promotions
Exemplo n.º 6
0
    def run(self):
        slo_documents = queries.get_slo_documents()

        service_slos = threaded.run(func=self._get_service_slo,
                                    iterable=slo_documents,
                                    thread_pool_size=self.thread_pool_size)

        threaded.run(func=self._post,
                     iterable=service_slos,
                     thread_pool_size=self.thread_pool_size)
Exemplo n.º 7
0
def run(dry_run, thread_pool_size=10):
    instance = queries.get_gitlab_instance()
    settings = queries.get_app_interface_settings()
    repos = queries.get_repos_gitlab_owner(server=instance['url'])
    threaded.run(act,
                 repos,
                 thread_pool_size,
                 dry_run=dry_run,
                 instance=instance,
                 settings=settings)
Exemplo n.º 8
0
 def run(self, cname=None):
     validation_list = ('operator_replica', 'operator_request_limit')
     for validation in validation_list:
         LOG.debug('%s Processing validation: %s', self.logmarker,
                   validation)
         validations = threaded.run(func=self._get_deploymentvalidation,
                                    iterable=self._get_clusters(cname),
                                    thread_pool_size=self.thread_pool_size,
                                    validation=validation)
         threaded.run(func=self._post,
                      iterable=validations,
                      thread_pool_size=self.thread_pool_size)
Exemplo n.º 9
0
def run(dry_run, gitlab_project_id=None, thread_pool_size=10):
    accounts = queries.get_aws_accounts()
    settings = queries.get_app_interface_settings()
    aws = AWSApi(thread_pool_size, accounts, settings=settings)
    existing_keys = aws.get_users_keys()
    existing_keys_list = [
        key for user_key in existing_keys.values()
        for keys in user_key.values() for key in keys
    ]
    logging.info('found {} existing keys'.format(len(existing_keys_list)))

    app_int_github_repos = queries.get_repos(server='https://github.com')
    all_repos = get_all_repos_to_scan(app_int_github_repos)
    logging.info('about to scan {} repos'.format(len(all_repos)))

    results = threaded.run(git_secrets.scan_history,
                           all_repos,
                           thread_pool_size,
                           existing_keys=existing_keys_list)
    all_leaked_keys = [key for keys in results for key in keys]

    deleted_keys = aws_sos.get_deleted_keys(accounts)
    keys_to_delete = \
        [{'account': account, 'key': key}
         for key in all_leaked_keys
         for account, user_keys in existing_keys.items()
         if key in [uk for uks in user_keys.values() for uk in uks]
         and key not in deleted_keys[account]]
    aws_sos.act(dry_run, gitlab_project_id, accounts, keys_to_delete)
Exemplo n.º 10
0
    def init_ecr_auth_tokens(self, accounts):
        accounts_with_ecr = [a for a in accounts if a.get('ecrs')]
        if not accounts_with_ecr:
            return

        auth_tokens = {}
        results = threaded.run(self.get_tf_secrets, accounts_with_ecr,
                               self.thread_pool_size)
        account_secrets = {account: secret for account, secret in results}
        for account in accounts_with_ecr:
            account_name = account['name']
            account_secret = account_secrets[account_name]
            access_key = account_secret['aws_access_key_id']
            secret_key = account_secret['aws_secret_access_key']

            ecrs = account['ecrs']
            for ecr in ecrs:
                region_name = ecr['region']
                session = boto3.Session(
                    aws_access_key_id=access_key,
                    aws_secret_access_key=secret_key,
                    region_name=region_name,
                )
                client = session.client('ecr')
                token = client.get_authorization_token()
                auth_tokens[f"{account_name}/{region_name}"] = token

        self.auth_tokens = auth_tokens
Exemplo n.º 11
0
def get_build_history_pool(jenkins_map, jobs,
                           timestamp_limit, thread_pool_size):
    history_to_get = []
    for instance, jobs in jobs.items():
        jenkins = jenkins_map[instance]
        for job in jobs:
            job['jenkins'] = jenkins
            job['timestamp_limit'] = timestamp_limit
            history_to_get.append(job)

    result = run(func=get_build_history,
                 iterable=history_to_get,
                 thread_pool_size=thread_pool_size)

    history = {}
    for job in result:
        build_history = job.get('build_history')
        if not build_history:
            continue
        successes = [_ for _ in build_history if _ == 'SUCCESS']
        history[job['name']] = {
            "total": len(build_history),
            "success": len(successes)
        }
    return history
Exemplo n.º 12
0
def run(dry_run,
        gitlab_project_id=None,
        thread_pool_size=10,
        enable_deletion=False,
        send_mails=False):
    settings = queries.get_app_interface_settings()
    users = queries.get_users()
    g = init_github()

    results = threaded.run(get_user_company, users, thread_pool_size, github=g)

    users_to_delete = get_users_to_delete(results)

    if not dry_run and enable_deletion:
        mr_cli = mr_client_gateway.init(gitlab_project_id=gitlab_project_id)

    for user in users_to_delete:
        username = user['username']
        paths = user['paths']
        logging.info(['delete_user', username])

        if not dry_run:
            if send_mails:
                send_email_notification(user, settings)
            elif enable_deletion:
                mr = CreateDeleteUser(username, paths)
                mr.submit(cli=mr_cli)
            else:
                msg = ('\'delete\' action is not enabled. '
                       'Please run the integration manually '
                       'with the \'--enable-deletion\' flag.')
                logging.warning(msg)
Exemplo n.º 13
0
    def run(self):
        clusters = queries.get_clusters()

        oc_map = OC_Map(clusters=clusters,
                        integration=QONTRACT_INTEGRATION,
                        settings=self.settings,
                        use_jump_host=True,
                        thread_pool_size=self.thread_pool_size)

        manifests = threaded.run(func=self._get_imagemanifestvuln,
                                 iterable=oc_map.clusters(),
                                 thread_pool_size=self.thread_pool_size,
                                 oc_map=oc_map)

        threaded.run(func=self._post,
                     iterable=manifests,
                     thread_pool_size=self.thread_pool_size)
Exemplo n.º 14
0
 def init_specs(self):
     wd_specs = \
         [{'name': name, 'wd': wd}
          for name, wd in self.working_dirs.items()]
     results = threaded.run(self.terraform_init, wd_specs,
                            self.thread_pool_size)
     self.specs = \
         [{'name': name, 'tf': tf} for name, tf in results]
Exemplo n.º 15
0
def fetch_data(namespaces,
               thread_pool_size,
               internal,
               use_jump_host,
               init_api_resources=False):
    ri = ResourceInventory()
    settings = queries.get_app_interface_settings()
    oc_map = OC_Map(namespaces=namespaces,
                    integration=QONTRACT_INTEGRATION,
                    settings=settings,
                    internal=internal,
                    use_jump_host=use_jump_host,
                    thread_pool_size=thread_pool_size,
                    init_api_resources=init_api_resources)
    state_specs = ob.init_specs_to_fetch(ri, oc_map, namespaces=namespaces)
    threaded.run(fetch_states, state_specs, thread_pool_size, ri=ri)

    return oc_map, ri
Exemplo n.º 16
0
    def apply(self):
        errors = False

        results = threaded.run(self.terraform_apply, self.specs,
                               self.thread_pool_size)

        for error in results:
            if error:
                errors = True
        return errors
Exemplo n.º 17
0
def fetch_current_state(thread_pool_size, internal, use_jump_host):
    clusters = queries.get_clusters(minimal=True)
    settings = queries.get_app_interface_settings()
    oc_map = OC_Map(clusters=clusters, integration=QONTRACT_INTEGRATION,
                    settings=settings, internal=internal,
                    use_jump_host=use_jump_host,
                    thread_pool_size=thread_pool_size)
    results = threaded.run(get_cluster_users, oc_map.clusters(),
                           thread_pool_size, oc_map=oc_map)
    current_state = [item for sublist in results for item in sublist]
    return oc_map, current_state
def fetch_current_state(dry_run, namespaces, thread_pool_size,
                        internal, use_jump_host, account_name):
    ri = ResourceInventory()
    if dry_run:
        return ri, None
    settings = queries.get_app_interface_settings()
    oc_map = OC_Map(namespaces=namespaces, integration=QONTRACT_INTEGRATION,
                    settings=settings, internal=internal,
                    use_jump_host=use_jump_host,
                    thread_pool_size=thread_pool_size)
    state_specs = \
        ob.init_specs_to_fetch(
            ri,
            oc_map,
            namespaces=namespaces,
            override_managed_types=['Secret']
        )
    threaded.run(populate_oc_resources, state_specs, thread_pool_size, ri=ri,
                 account_name=account_name)

    return ri, oc_map
Exemplo n.º 19
0
def fetch_current_state(thread_pool_size):
    clusters = queries.get_clusters()
    clusters = [c for c in clusters if c.get('ocm') is not None]
    current_state = []
    settings = queries.get_app_interface_settings()
    ocm_map = OCMMap(clusters=clusters, integration=QONTRACT_INTEGRATION,
                     settings=settings)
    groups_list = openshift_groups.create_groups_list(clusters, oc_map=ocm_map)
    results = threaded.run(get_cluster_state, groups_list, thread_pool_size,
                           ocm_map=ocm_map)

    current_state = [item for sublist in results for item in sublist]
    return ocm_map, current_state
Exemplo n.º 20
0
    def _check_images(self, options):
        saas_file_name = options['saas_file_name']
        resource_template_name = options['resource_template_name']
        html_url = options['html_url']
        resources = options['resources']
        image_auth = options['image_auth']
        image_patterns = options['image_patterns']
        error_prefix = \
            f"[{saas_file_name}/{resource_template_name}] {html_url}:"

        images_list = threaded.run(self._collect_images, resources,
                                   self.available_thread_pool_size)
        images = {item for sublist in images_list for item in sublist}
        if not images:
            return False  # no errors
        errors = threaded.run(self._check_image, images,
                              self.available_thread_pool_size,
                              image_patterns=image_patterns,
                              image_auth=image_auth,
                              error_prefix=error_prefix)
        error = True in errors
        return error
Exemplo n.º 21
0
def run(dry_run, thread_pool_size=10):
    namespaces = queries.get_namespaces()

    tfrs_to_mirror = []
    for namespace in namespaces:

        if namespace['terraformResources'] is None:
            continue

        for tfr in namespace['terraformResources']:
            if tfr['provider'] != 'ecr':
                continue

            if tfr['mirror'] is None:
                continue

            tfrs_to_mirror.append(tfr)

    work_list = threaded.run(EcrMirror,
                             tfrs_to_mirror,
                             thread_pool_size=thread_pool_size,
                             dry_run=dry_run)
    threaded.run(worker, work_list, thread_pool_size=thread_pool_size)
Exemplo n.º 22
0
def run(dry_run, gitlab_project_id=None, thread_pool_size=10):
    users = init_users()
    user_specs = threaded.run(init_user_spec, users, thread_pool_size)
    users_to_delete = [(username, paths) for username, delete, paths
                       in user_specs if delete]

    if not dry_run:
        mr_cli = mr_client_gateway.init(gitlab_project_id=gitlab_project_id,
                                        sqs_or_gitlab='gitlab')

    for username, paths in users_to_delete:
        logging.info(['delete_user', username])

        if not dry_run:
            mr = CreateDeleteUser(username, paths)
            mr.submit(cli=mr_cli)
Exemplo n.º 23
0
    def plan(self, enable_deletion):
        errors = False
        disabled_deletions_detected = False
        results = threaded.run(self.terraform_plan,
                               self.specs,
                               self.thread_pool_size,
                               enable_deletion=enable_deletion)

        self.deleted_users = []
        for disabled_deletion_detected, deleted_users, error in results:
            if error:
                errors = True
            if disabled_deletion_detected:
                disabled_deletions_detected = True
                self.deleted_users.extend(deleted_users)
        return disabled_deletions_detected, errors
Exemplo n.º 24
0
def fetch_current_state(thread_pool_size, internal, use_jump_host):
    clusters = queries.get_clusters()
    ocm_clusters = [c['name'] for c in clusters if c.get('ocm') is not None]
    current_state = []
    settings = queries.get_app_interface_settings()
    oc_map = OC_Map(clusters=clusters, integration=QONTRACT_INTEGRATION,
                    settings=settings, internal=internal,
                    use_jump_host=use_jump_host,
                    thread_pool_size=thread_pool_size)

    groups_list = create_groups_list(clusters, oc_map)
    results = threaded.run(get_cluster_state, groups_list, thread_pool_size,
                           oc_map=oc_map)

    current_state = [item for sublist in results for item in sublist]
    return oc_map, current_state, ocm_clusters
Exemplo n.º 25
0
 def init_sessions_and_resources(self, accounts):
     results = threaded.run(self.get_tf_secrets, accounts,
                            self.thread_pool_size)
     self.sessions = {}
     self.resources = {}
     for account, secret in results:
         access_key = secret['aws_access_key_id']
         secret_key = secret['aws_secret_access_key']
         region_name = secret['region']
         session = boto3.Session(
             aws_access_key_id=access_key,
             aws_secret_access_key=secret_key,
             region_name=region_name,
         )
         self.sessions[account] = session
         self.resources[account] = {}
def run(dry_run, thread_pool_size=10):
    instance = queries.get_gitlab_instance()
    settings = queries.get_app_interface_settings()
    gl = GitLabApi(instance, settings=settings)
    repos = queries.get_repos(server=gl.server)
    app_sre = gl.get_app_sre_group_users()
    results = threaded.run(get_members_to_add,
                           repos,
                           thread_pool_size,
                           gl=gl,
                           app_sre=app_sre)

    members_to_add = [item for sublist in results for item in sublist]
    for m in members_to_add:
        logging.info(['add_maintainer', m["repo"], m["user"].username])
        if not dry_run:
            gl.add_project_member(m["repo"], m["user"])
def run(dry_run,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        defer=None):
    oc_map, desired_state = get_desired_state(internal, use_jump_host,
                                              thread_pool_size)
    defer(lambda: oc_map.cleanup())
    results = threaded.run(check_ns_exists,
                           desired_state,
                           thread_pool_size,
                           oc_map=oc_map)
    specs_to_create = [spec for spec, create in results if create]

    for spec in specs_to_create:
        logging.info(['create', spec['cluster'], spec['namespace']])

        if not dry_run:
            create_new_project(spec, oc_map)
def run(dry_run, thread_pool_size=10, io_dir='throughput/'):
    saas_file_owners_diffs = read_saas_file_owners_diffs(io_dir)
    if len(saas_file_owners_diffs) == 0:
        return

    available_thread_pool_size = \
        threaded.estimate_available_thread_pool_size(
            thread_pool_size,
            len(saas_file_owners_diffs))

    exit_codes = threaded.run(
        osd_run_wrapper,
        saas_file_owners_diffs,
        thread_pool_size,
        dry_run=dry_run,
        available_thread_pool_size=available_thread_pool_size)

    if [ec for ec in exit_codes if ec]:
        sys.exit(1)
Exemplo n.º 29
0
def check_prometheus_rules(rules, thread_pool_size):
    '''Returns a list of dicts with failed rule checks'''
    # flatten the list of prometheus rules to have a list of dicts
    rules_to_check = []
    for path, cluster_data in rules.items():
        for cluster, namespace_data in cluster_data.items():
            for namespace, rule_data in namespace_data.items():
                rules_to_check.append({
                    'path': path,
                    'cluster': cluster,
                    'namespace': namespace,
                    'spec': rule_data['spec']
                })

    result = threaded.run(func=check_rule,
                          iterable=rules_to_check,
                          thread_pool_size=thread_pool_size)

    # return invalid rules
    return [rule for rule in result if not rule['check_result']]
Exemplo n.º 30
0
def run(dry_run,
        print_only=False,
        enable_deletion=False,
        io_dir='throughput/',
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        light=False,
        vault_output_path='',
        extra_labels=None):
    account_names = [
        name for index, name in enumerate(sorted(get_accounts_names()))
        if is_in_shard_round_robin(name, index)
    ]

    if not account_names:
        logging.warning("No accounts in shards")
        return

    exit_codes = threaded.run(
        tfr_run_wrapper,
        account_names,
        thread_pool_size,
        dry_run=dry_run,
        print_only=print_only,
        enable_deletion=enable_deletion,
        io_dir=io_dir,
        internal_thread_pool_size=thread_pool_size,
        internal=internal,
        use_jump_host=use_jump_host,
        light=light,
        vault_output_path=vault_output_path,
        extra_labels=extra_labels,
    )

    if any(exit_codes):
        sys.exit(ExitCodes.ERROR)