예제 #1
0
def run(dry_run, gitlab_project_id=None, thread_pool_size=10):
    accounts = queries.get_aws_accounts()
    settings = queries.get_app_interface_settings()
    aws = AWSApi(thread_pool_size, accounts, settings=settings)
    existing_keys = aws.get_users_keys()
    existing_keys_list = [
        key for user_key in existing_keys.values()
        for keys in user_key.values() for key in keys
    ]
    logging.info('found {} existing keys'.format(len(existing_keys_list)))

    app_int_github_repos = queries.get_repos(server='https://github.com')
    all_repos = get_all_repos_to_scan(app_int_github_repos)
    logging.info('about to scan {} repos'.format(len(all_repos)))

    results = threaded.run(git_secrets.scan_history,
                           all_repos,
                           thread_pool_size,
                           existing_keys=existing_keys_list)
    all_leaked_keys = [key for keys in results for key in keys]

    deleted_keys = aws_sos.get_deleted_keys(accounts)
    keys_to_delete = \
        [{'account': account, 'key': key}
         for key in all_leaked_keys
         for account, user_keys in existing_keys.items()
         if key in [uk for uks in user_keys.values() for uk in uks]
         and key not in deleted_keys[account]]
    aws_sos.act(dry_run, gitlab_project_id, accounts, keys_to_delete)
    def __init__(self, accounts, settings=None):
        queue_url = os.environ['gitlab_pr_submitter_queue_url']
        account = self.get_queue_account(accounts, queue_url)
        aws_api = AWSApi(1, accounts, settings=settings)
        session = aws_api.get_session(account)

        self.sqs = session.client('sqs')
        self.queue_url = queue_url
예제 #3
0
    def __init__(self, integration, accounts, settings=None):
        """Initiates S3 client from AWSApi."""
        self.state_path = f"state/{integration}"
        self.bucket = os.environ['APP_INTERFACE_STATE_BUCKET']
        account = os.environ['APP_INTERFACE_STATE_BUCKET_ACCOUNT']
        accounts = [a for a in accounts if a['name'] == account]
        aws_api = AWSApi(1, accounts, settings=settings)
        session = aws_api.get_session(account)

        self.client = session.client('s3')
예제 #4
0
def run(dry_run=False,
        thread_pool_size=10,
        disable_service_account_keys=False,
        defer=None):
    accounts = queries.get_aws_accounts()
    settings = queries.get_app_interface_settings()
    aws = AWSApi(thread_pool_size, accounts, settings=settings)
    keys_to_delete = get_keys_to_delete(accounts)
    working_dirs = init_tf_working_dirs(accounts, thread_pool_size, settings)
    defer(lambda: cleanup(working_dirs))
    error = aws.delete_keys(dry_run, keys_to_delete, working_dirs,
                            disable_service_account_keys)
    if error:
        sys.exit(1)
def run(dry_run, thread_pool_size=10, io_dir='throughput/'):
    accounts = [a for a in queries.get_aws_accounts()
                if a.get('garbageCollection')]
    settings = queries.get_app_interface_settings()
    aws = AWSApi(thread_pool_size, accounts, settings=settings)
    if dry_run:
        aws.simulate_deleted_users(io_dir)
    aws.map_resources()
    aws.delete_resources_without_owner(dry_run)
예제 #6
0
def run(dry_run=False,
        thread_pool_size=10,
        enable_deletion=False,
        io_dir='throughput/'):
    aws = AWSApi(thread_pool_size)
    if dry_run:
        aws.simulate_deleted_users(io_dir)
    aws.map_resources()
    aws.delete_resources_without_owner(dry_run, enable_deletion)
예제 #7
0
def run(dry_run=False,
        gitlab_project_id=None,
        thread_pool_size=10,
        enable_deletion=False):
    accounts = queries.get_aws_accounts()
    settings = queries.get_app_interface_settings()
    aws = AWSApi(thread_pool_size, accounts, settings=settings)
    deleted_keys = get_deleted_keys(accounts)
    existing_keys = aws.get_users_keys()
    aws_support_cases = aws.get_support_cases()
    keys_to_delete_from_cases = get_keys_to_delete(aws_support_cases)
    keys_to_delete = [
        ktd for ktd in keys_to_delete_from_cases
        if ktd['key'] not in deleted_keys[ktd['account']]
        and ktd['key'] in existing_keys[ktd['account']]
    ]
    act(dry_run, gitlab_project_id, accounts, keys_to_delete)
예제 #8
0
def run(dry_run=False,
        thread_pool_size=10,
        enable_deletion=False,
        io_dir='throughput/'):
    accounts = queries.get_aws_accounts()
    settings = queries.get_app_interface_settings()
    aws = AWSApi(thread_pool_size, accounts, settings=settings)
    if dry_run:
        aws.simulate_deleted_users(io_dir)
    aws.map_resources()
    aws.delete_resources_without_owner(dry_run, enable_deletion)
예제 #9
0
def run(dry_run, vault_output_path=''):
    accounts = [a for a in queries.get_aws_accounts() if a.get('ecrs')]
    settings = queries.get_app_interface_settings()
    aws = AWSApi(1, accounts, settings=settings, init_ecr_auth_tokens=True)
    for account, data in aws.auth_tokens.items():
        dockercfg_secret_data = construct_dockercfg_secret_data(data)
        basic_auth_secret_data = construct_basic_auth_secret_data(data)
        write_output_to_vault(dry_run, vault_output_path, account,
                              dockercfg_secret_data, 'dockercfg')
        write_output_to_vault(dry_run, vault_output_path, account,
                              basic_auth_secret_data, 'basic-auth')
예제 #10
0
def run(dry_run=False, thread_pool_size=10):
    settings = queries.get_app_interface_settings()
    zones = queries.get_dns_zones()

    desired_state, err = build_desired_state(zones)
    if err:
        sys.exit(ExitCodes.ERROR)

    participating_accounts = [z['account'] for z in zones]
    awsapi = AWSApi(thread_pool_size, participating_accounts, settings)
    current_state, err = build_current_state(awsapi)
    if err:
        sys.exit(ExitCodes.ERROR)

    actions, err = reconcile_state(current_state, desired_state)
    if err:
        sys.exit(ExitCodes.ERROR)

    for action in actions:
        err = action[0](dry_run, awsapi, *action[1:])
        if err:
            sys.exit(ExitCodes.ERROR)
def build_desired_state_cluster(clusters, ocm_map, settings):
    """
    Fetch state for VPC peerings between two OCM clusters
    """
    desired_state = []
    error = False

    for cluster_info in clusters:
        cluster_name = cluster_info['name']

        # Find an aws account with the "network-mgmt" access level on the
        # requester cluster and use that as the account for the requester
        req_aws = aws_account_from_infrastructure_access(
            cluster_info, 'network-mgmt', ocm_map)
        if not req_aws:
            msg = f"could not find an AWS account with the " \
                  f"'network-mgmt' access level on the cluster {cluster_name}"
            logging.error(msg)
            error = True
            continue
        req_aws['assume_region'] = cluster_info['spec']['region']
        req_aws['assume_cidr'] = cluster_info['network']['vpc']

        peering_info = cluster_info['peering']
        peer_connections = peering_info['connections']
        for peer_connection in peer_connections:
            # We only care about cluster-vpc-requester peering providers
            peer_connection_provider = peer_connection['provider']
            if not peer_connection_provider == 'cluster-vpc-requester':
                continue

            peer_connection_name = peer_connection['name']
            peer_cluster = peer_connection['cluster']
            peer_cluster_name = peer_cluster['name']
            requester_manage_routes = peer_connection.get('manageRoutes')

            # Ensure we have a matching peering connection
            peer_info = find_matching_peering(cluster_info, peer_connection,
                                              peer_cluster,
                                              'cluster-vpc-accepter')
            if not peer_info:
                msg = f"could not find a matching peering connection for " \
                      f"cluster {cluster_name}, " \
                      f"connection {peer_connection_name}"
                logging.error(msg)
                error = True
                continue
            accepter_manage_routes = peer_info.get('manageRoutes')

            aws_api = AWSApi(1, [req_aws], settings=settings)
            requester_vpc_id, requester_route_table_ids = \
                aws_api.get_cluster_vpc_id(
                    req_aws,
                    route_tables=requester_manage_routes
                )
            if requester_vpc_id is None:
                msg = f'[{cluster_name} could not find VPC ID for cluster'
                logging.error(msg)
                error = True
                continue
            requester = {
                'cidr_block': cluster_info['network']['vpc'],
                'region': cluster_info['spec']['region'],
                'vpc_id': requester_vpc_id,
                'route_table_ids': requester_route_table_ids,
                'account': req_aws
            }

            # Find an aws account with the "network-mgmt" access level on the
            # peer cluster and use that as the account for the accepter
            acc_aws = aws_account_from_infrastructure_access(
                peer_cluster, 'network-mgmt', ocm_map)
            if not acc_aws:
                msg = "could not find an AWS account with the " \
                    "'network-mgmt' access level on the cluster"
                logging.error(msg)
                error = True
                continue
            acc_aws['assume_region'] = peer_cluster['spec']['region']
            acc_aws['assume_cidr'] = peer_cluster['network']['vpc']

            aws_api = AWSApi(1, [acc_aws], settings=settings)
            accepter_vpc_id, accepter_route_table_ids = \
                aws_api.get_cluster_vpc_id(
                    acc_aws,
                    route_tables=accepter_manage_routes
                )
            if accepter_vpc_id is None:
                msg = f'[{peer_cluster_name} could not find VPC ID for cluster'
                logging.error(msg)
                error = True
                continue
            requester['peer_owner_id'] = acc_aws['assume_role'].split(':')[4]
            accepter = {
                'cidr_block': peer_cluster['network']['vpc'],
                'region': peer_cluster['spec']['region'],
                'vpc_id': accepter_vpc_id,
                'route_table_ids': accepter_route_table_ids,
                'account': acc_aws
            }

            item = {
                'connection_provider': peer_connection_provider,
                'connection_name': peer_connection_name,
                'requester': requester,
                'accepter': accepter,
                'deleted': peer_connection.get('delete', False)
            }
            desired_state.append(item)

    return desired_state, error
def build_desired_state_vpc(clusters, ocm_map, settings):
    """
    Fetch state for VPC peerings between a cluster and a VPC (account)
    """
    desired_state = []
    error = False

    for cluster_info in clusters:
        cluster = cluster_info['name']
        ocm = ocm_map.get(cluster)
        peering_info = cluster_info['peering']
        peer_connections = peering_info['connections']
        for peer_connection in peer_connections:
            # We only care about account-vpc peering providers
            peer_connection_provider = peer_connection['provider']
            if not peer_connection_provider == 'account-vpc':
                continue
            # requester is the cluster's AWS account
            requester = {
                'cidr_block': cluster_info['network']['vpc'],
                'region': cluster_info['spec']['region']
            }

            connection_name = peer_connection['name']
            peer_vpc = peer_connection['vpc']
            # accepter is the peered AWS account
            accepter = {
                'vpc_id': peer_vpc['vpc_id'],
                'cidr_block': peer_vpc['cidr_block'],
                'region': peer_vpc['region']
            }
            account = peer_vpc['account']
            # assume_role is the role to assume to provision the
            # peering connection request, through the accepter AWS account.
            account['assume_role'] = \
                ocm.get_aws_infrastructure_access_terraform_assume_role(
                    cluster,
                    peer_vpc['account']['uid'],
                    peer_vpc['account']['terraformUsername']
                )
            account['assume_region'] = requester['region']
            account['assume_cidr'] = requester['cidr_block']
            aws_api = AWSApi(1, [account], settings=settings)
            requester_vpc_id, requester_route_table_ids = \
                aws_api.get_cluster_vpc_id(
                    account,
                    route_tables=peer_connection.get('manageRoutes')
                )

            if requester_vpc_id is None:
                logging.error(f'[{cluster} could not find VPC ID for cluster')
                error = True
                continue
            requester['vpc_id'] = requester_vpc_id
            requester['route_table_ids'] = requester_route_table_ids
            requester['account'] = account
            accepter['account'] = account
            item = {
                'connection_provider': peer_connection_provider,
                'connection_name': connection_name,
                'requester': requester,
                'accepter': accepter,
                'deleted': peer_connection.get('delete', False)
            }
            desired_state.append(item)
    return desired_state, error
예제 #13
0
    def __init__(self, dry_run, instance):
        self.dry_run = dry_run
        self.settings = queries.get_app_interface_settings()

        cluster_info = instance['hiveCluster']
        hive_cluster = instance['hiveCluster']['name']

        # Getting the OCM Client for the hive cluster
        ocm_map = OCMMap(clusters=[cluster_info],
                         integration=QONTRACT_INTEGRATION,
                         settings=self.settings)

        self.ocm_cli = ocm_map.get(hive_cluster)
        if not self.ocm_cli:
            raise OcpReleaseEcrMirrorError(f"Can't create ocm client for "
                                           f"cluster {hive_cluster}")

        # Getting the OC Client for the hive cluster
        oc_map = OC_Map(clusters=[cluster_info],
                        integration=QONTRACT_INTEGRATION,
                        settings=self.settings)
        self.oc_cli = oc_map.get(hive_cluster)
        if not self.oc_cli:
            raise OcpReleaseEcrMirrorError(f"Can't create oc client for "
                                           f"cluster {hive_cluster}")

        namespace = instance['ecrResourcesNamespace']
        ocp_release_identifier = instance['ocpReleaseEcrIdentifier']
        ocp_art_dev_identifier = instance['ocpArtDevEcrIdentifier']

        ocp_release_info = self._get_tf_resource_info(namespace,
                                                      ocp_release_identifier)
        if ocp_release_info is None:
            raise OcpReleaseEcrMirrorError(f"Could not find rds "
                                           f"identifier "
                                           f"{ocp_release_identifier} in "
                                           f"namespace {namespace['name']}")

        ocp_art_dev_info = self._get_tf_resource_info(namespace,
                                                      ocp_art_dev_identifier)
        if ocp_art_dev_info is None:
            raise OcpReleaseEcrMirrorError(f"Could not find rds identifier"
                                           f" {ocp_art_dev_identifier} in"
                                           f"namespace {namespace['name']}")

        # Getting the AWS Client for the accounts
        aws_accounts = [
            self._get_aws_account_info(account=ocp_release_info['account']),
            self._get_aws_account_info(account=ocp_art_dev_info['account'])
        ]
        self.aws_cli = AWSApi(thread_pool_size=1,
                              accounts=aws_accounts,
                              settings=self.settings,
                              init_ecr_auth_tokens=True)
        self.aws_cli.map_ecr_resources()

        self.ocp_release_ecr_uri = self._get_image_uri(
            account=ocp_release_info['account'],
            repository=ocp_release_identifier)
        if self.ocp_release_ecr_uri is None:
            raise OcpReleaseEcrMirrorError(f"Could not find the "
                                           f"ECR repository "
                                           f"{ocp_release_identifier}")

        self.ocp_art_dev_ecr_uri = self._get_image_uri(
            account=ocp_art_dev_info['account'],
            repository=ocp_art_dev_identifier)
        if self.ocp_art_dev_ecr_uri is None:
            raise OcpReleaseEcrMirrorError(f"Could not find the "
                                           f"ECR repository "
                                           f"{ocp_art_dev_identifier}")

        # Getting all the credentials
        quay_creds = self._get_quay_creds()
        ocp_release_creds = self._get_ecr_creds(
            account=ocp_release_info['account'],
            region=ocp_release_info['region'])
        ocp_art_dev_creds = self._get_ecr_creds(
            account=ocp_art_dev_info['account'],
            region=ocp_art_dev_info['region'])

        # Creating a single dictionary with all credentials to be used by the
        # "oc adm release mirror" command
        self.registry_creds = {
            'auths': {
                **quay_creds['auths'],
                **ocp_release_creds['auths'],
                **ocp_art_dev_creds['auths'],
            }
        }
예제 #14
0
class OcpReleaseEcrMirror:
    def __init__(self, dry_run, instance):
        self.dry_run = dry_run
        self.settings = queries.get_app_interface_settings()

        cluster_info = instance['hiveCluster']
        hive_cluster = instance['hiveCluster']['name']

        # Getting the OCM Client for the hive cluster
        ocm_map = OCMMap(clusters=[cluster_info],
                         integration=QONTRACT_INTEGRATION,
                         settings=self.settings)

        self.ocm_cli = ocm_map.get(hive_cluster)
        if not self.ocm_cli:
            raise OcpReleaseEcrMirrorError(f"Can't create ocm client for "
                                           f"cluster {hive_cluster}")

        # Getting the OC Client for the hive cluster
        oc_map = OC_Map(clusters=[cluster_info],
                        integration=QONTRACT_INTEGRATION,
                        settings=self.settings)
        self.oc_cli = oc_map.get(hive_cluster)
        if not self.oc_cli:
            raise OcpReleaseEcrMirrorError(f"Can't create oc client for "
                                           f"cluster {hive_cluster}")

        namespace = instance['ecrResourcesNamespace']
        ocp_release_identifier = instance['ocpReleaseEcrIdentifier']
        ocp_art_dev_identifier = instance['ocpArtDevEcrIdentifier']

        ocp_release_info = self._get_tf_resource_info(namespace,
                                                      ocp_release_identifier)
        if ocp_release_info is None:
            raise OcpReleaseEcrMirrorError(f"Could not find rds "
                                           f"identifier "
                                           f"{ocp_release_identifier} in "
                                           f"namespace {namespace['name']}")

        ocp_art_dev_info = self._get_tf_resource_info(namespace,
                                                      ocp_art_dev_identifier)
        if ocp_art_dev_info is None:
            raise OcpReleaseEcrMirrorError(f"Could not find rds identifier"
                                           f" {ocp_art_dev_identifier} in"
                                           f"namespace {namespace['name']}")

        # Getting the AWS Client for the accounts
        aws_accounts = [
            self._get_aws_account_info(account=ocp_release_info['account']),
            self._get_aws_account_info(account=ocp_art_dev_info['account'])
        ]
        self.aws_cli = AWSApi(thread_pool_size=1,
                              accounts=aws_accounts,
                              settings=self.settings,
                              init_ecr_auth_tokens=True)
        self.aws_cli.map_ecr_resources()

        self.ocp_release_ecr_uri = self._get_image_uri(
            account=ocp_release_info['account'],
            repository=ocp_release_identifier)
        if self.ocp_release_ecr_uri is None:
            raise OcpReleaseEcrMirrorError(f"Could not find the "
                                           f"ECR repository "
                                           f"{ocp_release_identifier}")

        self.ocp_art_dev_ecr_uri = self._get_image_uri(
            account=ocp_art_dev_info['account'],
            repository=ocp_art_dev_identifier)
        if self.ocp_art_dev_ecr_uri is None:
            raise OcpReleaseEcrMirrorError(f"Could not find the "
                                           f"ECR repository "
                                           f"{ocp_art_dev_identifier}")

        # Getting all the credentials
        quay_creds = self._get_quay_creds()
        ocp_release_creds = self._get_ecr_creds(
            account=ocp_release_info['account'],
            region=ocp_release_info['region'])
        ocp_art_dev_creds = self._get_ecr_creds(
            account=ocp_art_dev_info['account'],
            region=ocp_art_dev_info['region'])

        # Creating a single dictionary with all credentials to be used by the
        # "oc adm release mirror" command
        self.registry_creds = {
            'auths': {
                **quay_creds['auths'],
                **ocp_release_creds['auths'],
                **ocp_art_dev_creds['auths'],
            }
        }

    def run(self):
        ocp_releases = self._get_ocp_releases()
        if not ocp_releases:
            raise RuntimeError('No OCP Releases found')

        for ocp_release in ocp_releases:
            tag = ocp_release.split(':')[-1]
            dest_ocp_release = f'{self.ocp_release_ecr_uri}:{tag}'
            self._run_mirror(ocp_release=ocp_release,
                             dest_ocp_release=dest_ocp_release,
                             dest_ocp_art_dev=self.ocp_art_dev_ecr_uri)

    def _run_mirror(self, ocp_release, dest_ocp_release, dest_ocp_art_dev):
        # Checking if the image is already there
        if self._is_image_there(dest_ocp_release):
            LOG.info(f'Image {ocp_release} already in '
                     f'the mirror. Skipping.')
            return

        LOG.info(f'Mirroring {ocp_release} to {dest_ocp_art_dev} '
                 f'to_release {dest_ocp_release}')

        if self.dry_run:
            return

        # Creating a new, bare, OC client since we don't
        # want to run this against any cluster or via
        # a jump host
        oc_cli = OC(server='',
                    token='',
                    jh=None,
                    settings=None,
                    init_projects=False,
                    init_api_resources=False)
        oc_cli.release_mirror(from_release=ocp_release,
                              to=dest_ocp_art_dev,
                              to_release=dest_ocp_release,
                              dockerconfig=self.registry_creds)

    def _is_image_there(self, image):
        image_obj = Image(image)

        for registry, creds in self.registry_creds['auths'].items():
            # Getting the credentials for the image_obj
            registry_obj = urlparse(registry)
            if registry_obj.netloc != image_obj.registry:
                continue
            image_obj.auth = (creds['username'], creds['password'])

            # Checking if the image is already
            # in the registry
            if image_obj:
                return True

        return False

    @staticmethod
    def _get_aws_account_info(account):
        for account_info in queries.get_aws_accounts():
            if 'name' not in account_info:
                continue
            if account_info['name'] != account:
                continue
            return account_info

    def _get_ocp_releases(self):
        ocp_releases = list()
        clusterimagesets = self.oc_cli.get_all('clusterimageset')
        for clusterimageset in clusterimagesets['items']:
            release_image = clusterimageset['spec']['releaseImage']
            # There are images in some ClusterImagesSets not coming
            # from quay.io, e.g.:
            # registry.svc.ci.openshift.org/ocp/release:4.2.0-0.nightly-2020-11-04-053758
            # Let's filter out everything not from quay.io
            if not release_image.startswith('quay.io'):
                continue
            ocp_releases.append(release_image)
        return ocp_releases

    def _get_quay_creds(self):
        return self.ocm_cli.get_pull_secrets()

    def _get_ecr_creds(self, account, region):
        if region is None:
            region = self.aws_cli.accounts[account]['resourcesDefaultRegion']
        auth_token = f'{account}/{region}'
        data = self.aws_cli.auth_tokens[auth_token]
        auth_data = data['authorizationData'][0]
        server = auth_data['proxyEndpoint']
        token = auth_data['authorizationToken']
        password = base64.b64decode(token).decode('utf-8').split(':')[1]

        return {
            'auths': {
                server: {
                    'username': '******',
                    'password': password,
                    'email': '*****@*****.**',
                    'auth': token
                }
            }
        }

    @staticmethod
    def _get_tf_resource_info(namespace, identifier):
        tf_resources = namespace['terraformResources']
        for tf_resource in tf_resources:
            if 'identifier' not in tf_resource:
                continue

            if tf_resource['identifier'] != identifier:
                continue

            if tf_resource['provider'] != 'ecr':
                continue

            return {
                'account': tf_resource['account'],
                'region': tf_resource.get('region'),
            }

    def _get_image_uri(self, account, repository):
        for repo in self.aws_cli.resources[account]['ecr']:
            if repo['repositoryName'] == repository:
                return repo['repositoryUri']
예제 #15
0
def run(dry_run=False, thread_pool_size=10, enable_deletion=False):
    aws = AWSApi(thread_pool_size)
    keys_to_delete = fetch_keys_to_delete()
    aws.delete_keys(dry_run, keys_to_delete)