Exemplo n.º 1
0
def run(dry_run,
        gitlab_project_id=None,
        thread_pool_size=10,
        enable_deletion=False):
    accounts = queries.get_aws_accounts()
    settings = queries.get_app_interface_settings()
    aws = AWSApi(thread_pool_size, accounts, settings=settings)
    deleted_keys = get_deleted_keys(accounts)
    existing_keys = aws.get_users_keys()
    aws_support_cases = aws.get_support_cases()
    keys_to_delete_from_cases = get_keys_to_delete(aws_support_cases)
    keys_to_delete = []
    for ktd in keys_to_delete_from_cases:
        ktd_account = ktd['account']
        ktd_key = ktd['key']
        account_deleted_keys = deleted_keys.get(ktd_account)
        if account_deleted_keys and ktd_key in account_deleted_keys:
            continue
        account_existing_keys = existing_keys.get(ktd_account)
        if account_existing_keys:
            keys_only = \
                itertools.chain.from_iterable(account_existing_keys.values())
            if ktd_key not in keys_only:
                continue
        keys_to_delete.append(ktd)

    act(dry_run, gitlab_project_id, accounts, keys_to_delete)
Exemplo n.º 2
0
def clusters_egress_ips(ctx):
    settings = queries.get_app_interface_settings()
    clusters = queries.get_clusters()
    clusters = [
        c for c in clusters if c.get('ocm') is not None
        and c.get('awsInfrastructureAccess') is not None
    ]
    ocm_map = OCMMap(clusters=clusters, settings=settings)

    results = []
    for cluster in clusters:
        cluster_name = cluster['name']
        account = tfvpc.aws_account_from_infrastructure_access(
            cluster, 'network-mgmt', ocm_map)
        aws_api = AWSApi(1, [account], settings=settings)
        egress_ips = \
            aws_api.get_cluster_nat_gateways_egress_ips(account)
        item = {
            'cluster': cluster_name,
            'egress_ips': ', '.join(sorted(egress_ips))
        }
        results.append(item)

    columns = ['cluster', 'egress_ips']
    print_output(ctx.obj['options'], results, columns)
Exemplo n.º 3
0
def clusters_egress_ips(ctx):
    settings = queries.get_app_interface_settings()
    clusters = queries.get_clusters()
    clusters = [c for c in clusters
                if c.get('ocm') is not None
                and c.get('awsInfrastructureManagementAccounts') is not None]
    ocm_map = OCMMap(clusters=clusters, settings=settings)

    results = []
    for cluster in clusters:
        cluster_name = cluster['name']
        management_account = tfvpc._get_default_management_account(cluster)
        account = tfvpc._build_infrastructure_assume_role(
            management_account,
            cluster,
            ocm_map.get(cluster_name)
        )
        account['resourcesDefaultRegion'] = \
            management_account['resourcesDefaultRegion']
        aws_api = AWSApi(1, [account], settings=settings)
        egress_ips = \
            aws_api.get_cluster_nat_gateways_egress_ips(account)
        item = {
            'cluster': cluster_name,
            'egress_ips': ', '.join(sorted(egress_ips))
        }
        results.append(item)

    columns = ['cluster', 'egress_ips']
    print_output(ctx.obj['options'], results, columns)
Exemplo n.º 4
0
def run(dry_run, gitlab_project_id=None, thread_pool_size=10):
    accounts = queries.get_aws_accounts()
    settings = queries.get_app_interface_settings()
    aws = AWSApi(thread_pool_size, accounts, settings=settings)
    existing_keys = aws.get_users_keys()
    existing_keys_list = [
        key for user_key in existing_keys.values()
        for keys in user_key.values() for key in keys
    ]
    logging.info("found {} existing keys".format(len(existing_keys_list)))

    app_int_github_repos = queries.get_repos(server="https://github.com")
    all_repos = get_all_repos_to_scan(app_int_github_repos)
    logging.info("about to scan {} repos".format(len(all_repos)))

    results = threaded.run(
        git_secrets.scan_history,
        all_repos,
        thread_pool_size,
        existing_keys=existing_keys_list,
    )
    all_leaked_keys = [key for keys in results for key in keys]

    deleted_keys = aws_sos.get_deleted_keys(accounts)
    keys_to_delete = [
        {
            "account": account,
            "key": key
        } for key in all_leaked_keys
        for account, user_keys in existing_keys.items()
        if key in [uk for uks in user_keys.values()
                   for uk in uks] and key not in deleted_keys[account]
    ]
    aws_sos.act(dry_run, gitlab_project_id, accounts, keys_to_delete)
Exemplo n.º 5
0
def run(dry_run):
    accounts = queries.get_aws_accounts(reset_passwords=True)
    settings = queries.get_app_interface_settings()
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=settings)

    for a in accounts:
        aws_api = None
        account_name = a['name']
        reset_passwords = a.get('resetPasswords')
        if not reset_passwords:
            continue
        for r in reset_passwords:
            user_name = r['user']['org_username']
            request_id = r['requestId']
            state_key = f"{account_name}/{user_name}/{request_id}"
            if state.exists(state_key):
                continue

            logging.info(['reset_password', account_name, user_name])
            if dry_run:
                continue

            if aws_api is None:
                aws_api = AWSApi(1, [a], settings=settings)

            aws_api.reset_password(account_name, user_name)
            state.add(state_key)
Exemplo n.º 6
0
    def __init__(self, accounts, settings=None):
        queue_url = os.environ['gitlab_pr_submitter_queue_url']
        account = self.get_queue_account(accounts, queue_url)
        aws_api = AWSApi(1, accounts, settings=settings)
        session = aws_api.get_session(account)

        self.sqs = session.client('sqs')
        self.queue_url = queue_url
Exemplo n.º 7
0
    def __init__(self, accounts, settings=None):
        queue_url = os.environ["gitlab_pr_submitter_queue_url"]
        account = self.get_queue_account(accounts, queue_url)
        accounts = [a for a in accounts if a["name"] == account]
        aws_api = AWSApi(1, accounts, settings=settings, init_users=False)
        session = aws_api.get_session(account)

        self.sqs = session.client("sqs")
        self.queue_url = queue_url
Exemplo n.º 8
0
    def __init__(self, integration, accounts, settings=None):
        """Initiates S3 client from AWSApi."""
        self.state_path = f"state/{integration}"
        self.bucket = os.environ['APP_INTERFACE_STATE_BUCKET']
        account = os.environ['APP_INTERFACE_STATE_BUCKET_ACCOUNT']
        accounts = [a for a in accounts if a['name'] == account]
        aws_api = AWSApi(1, accounts, settings=settings)
        session = aws_api.get_session(account)

        self.client = session.client('s3')
Exemplo n.º 9
0
def run(dry_run, thread_pool_size=10,
        disable_service_account_keys=False, defer=None):
    accounts = queries.get_aws_accounts()
    settings = queries.get_app_interface_settings()
    aws = AWSApi(thread_pool_size, accounts, settings=settings)
    keys_to_delete = get_keys_to_delete(accounts)
    working_dirs = init_tf_working_dirs(accounts, thread_pool_size, settings)
    defer(lambda: cleanup(working_dirs))
    error = aws.delete_keys(dry_run, keys_to_delete, working_dirs,
                            disable_service_account_keys)
    if error:
        sys.exit(1)
Exemplo n.º 10
0
def run(dry_run):
    accounts = queries.get_aws_accounts(sharing=True)
    sharing_accounts = filter_accounts(accounts)
    settings = queries.get_app_interface_settings()
    aws_api = AWSApi(1, sharing_accounts, settings=settings, init_users=False)

    for src_account in sharing_accounts:
        sharing = src_account.get("sharing")
        if not sharing:
            continue
        for share in sharing:
            if share["provider"] != "ami":
                continue
            dst_account = share["account"]
            regex = share["regex"]
            region = get_region(share, src_account, dst_account)
            src_amis = aws_api.get_amis_details(src_account, src_account, regex, region)
            dst_amis = aws_api.get_amis_details(dst_account, src_account, regex, region)

            for src_ami in src_amis:
                src_ami_id = src_ami["image_id"]
                found_dst_amis = [d for d in dst_amis if d["image_id"] == src_ami_id]
                if not found_dst_amis:
                    logging.info(
                        [
                            "share_ami",
                            src_account["name"],
                            dst_account["name"],
                            src_ami_id,
                        ]
                    )
                    if not dry_run:
                        aws_api.share_ami(
                            src_account, dst_account["uid"], src_ami_id, region
                        )
                    # we assume an unshared ami does not have tags
                    found_dst_amis = [{"image_id": src_ami_id, "tags": []}]

                dst_ami = found_dst_amis[0]
                dst_ami_id = dst_ami["image_id"]
                dst_ami_tags = dst_ami["tags"]
                if MANAGED_TAG not in dst_ami_tags:
                    logging.info(
                        ["tag_shared_ami", dst_account["name"], dst_ami_id, MANAGED_TAG]
                    )
                    if not dry_run:
                        aws_api.create_tag(dst_account, dst_ami_id, MANAGED_TAG)
                src_ami_tags = src_ami["tags"]
                for src_tag in src_ami_tags:
                    if src_tag not in dst_ami_tags:
                        logging.info(
                            ["tag_shared_ami", dst_account["name"], dst_ami_id, src_tag]
                        )
                        if not dry_run:
                            aws_api.create_tag(dst_account, dst_ami_id, src_tag)
def run(dry_run, thread_pool_size=10, io_dir="throughput/"):
    accounts = [a for a in queries.get_aws_accounts() if a.get("garbageCollection")]
    settings = queries.get_app_interface_settings()
    aws = AWSApi(thread_pool_size, accounts, settings=settings)
    if dry_run:
        aws.simulate_deleted_users(io_dir)
    aws.map_resources()
    aws.delete_resources_without_owner(dry_run)
Exemplo n.º 12
0
def build_desired_state_vpc_single_cluster(cluster_info, ocm: OCM,
                                           awsapi: AWSApi):
    desired_state = []

    peering_info = cluster_info['peering']
    peer_connections = peering_info['connections']
    cluster = cluster_info['name']

    for peer_connection in peer_connections:
        # We only care about account-vpc peering providers
        peer_connection_provider = peer_connection['provider']
        if not peer_connection_provider == 'account-vpc':
            continue
        # requester is the cluster's AWS account
        requester = {
            'cidr_block': cluster_info['network']['vpc'],
            'region': cluster_info['spec']['region']
        }
        connection_name = peer_connection['name']
        peer_vpc = peer_connection['vpc']
        # accepter is the peered AWS account
        accepter = {
            'vpc_id': peer_vpc['vpc_id'],
            'cidr_block': peer_vpc['cidr_block'],
            'region': peer_vpc['region']
        }
        account = peer_vpc['account']
        # assume_role is the role to assume to provision the peering
        # connection request, through the accepter AWS account.
        account['assume_role'] = \
            ocm.get_aws_infrastructure_access_terraform_assume_role(
            cluster,
            peer_vpc['account']['uid'],
            peer_vpc['account']['terraformUsername']
        )
        account['assume_region'] = requester['region']
        account['assume_cidr'] = requester['cidr_block']
        requester_vpc_id, requester_route_table_ids, _ = \
            awsapi.get_cluster_vpc_details(
                account,
                route_tables=peer_connection.get('manageRoutes')
            )

        if requester_vpc_id is None:
            raise BadTerraformPeeringState(
                f'[{cluster} could not find VPC ID for cluster')
        requester['vpc_id'] = requester_vpc_id
        requester['route_table_ids'] = requester_route_table_ids
        requester['account'] = account
        accepter['account'] = account
        item = {
            'connection_provider': peer_connection_provider,
            'connection_name': connection_name,
            'requester': requester,
            'accepter': accepter,
            'deleted': peer_connection.get('delete', False)
        }
        desired_state.append(item)
    return desired_state
Exemplo n.º 13
0
def aws_api(accounts, mocker):
    mock_secret_reader = mocker.patch("reconcile.utils.aws_api.SecretReader",
                                      autospec=True)
    mock_secret_reader.return_value.read_all.return_value = {
        "aws_access_key_id": "key_id",
        "aws_secret_access_key": "access_key",
        "region": "tf_state_bucket_region",
    }
    return AWSApi(1, accounts, init_users=False)
def run(dry_run,
        gitlab_project_id=None,
        thread_pool_size=10,
        enable_deletion=False):
    accounts = queries.get_aws_accounts()
    settings = queries.get_app_interface_settings()
    aws = AWSApi(thread_pool_size, accounts, settings=settings)
    deleted_keys = get_deleted_keys(accounts)
    existing_keys = aws.get_users_keys()
    aws_support_cases = aws.get_support_cases()
    keys_to_delete_from_cases = get_keys_to_delete(aws_support_cases)
    keys_to_delete = [
        ktd for ktd in keys_to_delete_from_cases
        if deleted_keys.get(ktd['account']) is not None and existing_keys.get(
            ktd['account']) is not None and ktd['key'] not in deleted_keys[
                ktd['account']] and ktd['key'] in existing_keys[ktd['account']]
    ]
    act(dry_run, gitlab_project_id, accounts, keys_to_delete)
Exemplo n.º 15
0
    def __init__(self, instance, dry_run):
        self.dry_run = dry_run
        self.instance = instance
        self.settings = queries.get_app_interface_settings()
        self.secret_reader = SecretReader(settings=self.settings)
        self.skopeo_cli = Skopeo(dry_run)
        self.error = False

        identifier = instance["identifier"]
        account = instance["account"]
        region = instance.get("region")

        self.aws_cli = AWSApi(
            thread_pool_size=1,
            accounts=[self._get_aws_account_info(account)],
            settings=self.settings,
            init_ecr_auth_tokens=True,
        )

        self.aws_cli.map_ecr_resources()

        self.ecr_uri = self._get_image_uri(
            account=account,
            repository=identifier,
        )
        if self.ecr_uri is None:
            self.error = True
            LOG.error(f"Could not find the ECR repository {identifier}")

        self.ecr_username, self.ecr_password = self._get_ecr_creds(
            account=account,
            region=region,
        )
        self.ecr_auth = f"{self.ecr_username}:{self.ecr_password}"

        self.image_username = None
        self.image_password = None
        self.image_auth = None
        pull_secret = self.instance["mirror"]["pullCredentials"]
        if pull_secret is not None:
            raw_data = self.secret_reader.read_all(pull_secret)
            self.image_username = raw_data["user"]
            self.image_password = raw_data["token"]
            self.image_auth = f"{self.image_username}:{self.image_password}"
def run(dry_run, vault_output_path=""):
    accounts = [a for a in queries.get_aws_accounts() if a.get("ecrs")]
    settings = queries.get_app_interface_settings()
    aws = AWSApi(1, accounts, settings=settings, init_ecr_auth_tokens=True)
    for account, data in aws.auth_tokens.items():
        dockercfg_secret_data = construct_dockercfg_secret_data(data)
        basic_auth_secret_data = construct_basic_auth_secret_data(data)
        write_output_to_vault(dry_run, vault_output_path, account,
                              dockercfg_secret_data, "dockercfg")
        write_output_to_vault(dry_run, vault_output_path, account,
                              basic_auth_secret_data, "basic-auth")
Exemplo n.º 17
0
    def __init__(self,
                 integration: str,
                 accounts: Iterable[Mapping[str, Any]],
                 settings: Optional[Mapping[str, Any]] = None) -> None:
        """Initiates S3 client from AWSApi."""
        self.state_path = f'state/{integration}' if integration else 'state'
        self.bucket = os.environ['APP_INTERFACE_STATE_BUCKET']
        account = os.environ['APP_INTERFACE_STATE_BUCKET_ACCOUNT']
        accounts = [a for a in accounts if a['name'] == account]
        aws_api = AWSApi(1, accounts, settings=settings, init_users=False)
        session = aws_api.get_session(account)

        self.client = session.client('s3')

        # check if the bucket exists
        try:
            self.client.head_bucket(Bucket=self.bucket)
        except ClientError as details:
            raise StateInaccessibleException(
                f"Bucket {self.bucket} is not accessible - {str(details)}")
Exemplo n.º 18
0
def run(
    dry_run,
    thread_pool_size=10,
    disable_service_account_keys=False,
    account_name=None,
    defer=None,
):
    accounts = filter_accounts(queries.get_aws_accounts(), account_name)
    if not accounts:
        logging.debug("nothing to do here")
        # using return because terraform-resources
        # may be the calling entity, and has more to do
        return

    settings = queries.get_app_interface_settings()
    aws = AWSApi(thread_pool_size, accounts, settings=settings)
    keys_to_delete = get_keys_to_delete(accounts)
    working_dirs = init_tf_working_dirs(accounts, thread_pool_size, settings)
    defer(lambda: cleanup(working_dirs))
    error = aws.delete_keys(dry_run, keys_to_delete, working_dirs,
                            disable_service_account_keys)
    if error:
        sys.exit(1)
def setup(print_to_file, thread_pool_size: int) \
        -> tuple[list[dict[str, Any]], dict[str, str], bool, AWSApi]:
    gqlapi = gql.get_api()
    accounts = queries.get_aws_accounts()
    settings = queries.get_app_interface_settings()
    roles = expiration.filter(gqlapi.query(TF_QUERY)['roles'])
    tf_roles = [
        r for r in roles
        if r['aws_groups'] is not None or r['user_policies'] is not None
    ]
    ts = Terrascript(QONTRACT_INTEGRATION,
                     QONTRACT_TF_PREFIX,
                     thread_pool_size,
                     accounts,
                     settings=settings)
    err = ts.populate_users(tf_roles)
    working_dirs = ts.dump(print_to_file)
    aws_api = AWSApi(1, accounts, settings=settings, init_users=False)

    return accounts, working_dirs, err, aws_api
def setup(dry_run, print_to_file, thread_pool_size, internal, use_jump_host,
          account_name, extra_labels):
    gqlapi = gql.get_api()
    accounts = queries.get_aws_accounts()
    if account_name:
        accounts = [n for n in accounts if n['name'] == account_name]
        if not accounts:
            raise ValueError(f"aws account {account_name} is not found")
        extra_labels['shard_key'] = account_name
    settings = queries.get_app_interface_settings()
    namespaces = gqlapi.query(TF_NAMESPACES_QUERY)['namespaces']
    tf_namespaces = filter_tf_namespaces(namespaces, account_name)
    ri, oc_map = fetch_current_state(dry_run, tf_namespaces, thread_pool_size,
                                     internal, use_jump_host, account_name)
    ts, working_dirs = init_working_dirs(accounts,
                                         thread_pool_size,
                                         settings=settings)
    aws_api = AWSApi(1, accounts, settings=settings, init_users=False)
    tf = Terraform(QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION,
                   QONTRACT_TF_PREFIX, accounts, working_dirs,
                   thread_pool_size, aws_api)
    existing_secrets = tf.get_terraform_output_secrets()
    clusters = [c for c in queries.get_clusters() if c.get('ocm') is not None]
    if clusters:
        ocm_map = OCMMap(clusters=clusters,
                         integration=QONTRACT_INTEGRATION,
                         settings=settings)
    else:
        ocm_map = None
    ts.populate_resources(tf_namespaces,
                          existing_secrets,
                          account_name,
                          ocm_map=ocm_map)
    ts.dump(print_to_file, existing_dirs=working_dirs)

    return ri, oc_map, tf, tf_namespaces
Exemplo n.º 21
0
def build_desired_state(zones: Iterable[Mapping],
                        all_accounts: Iterable[Mapping],
                        settings: Mapping) -> list[dict]:
    """
    Build the desired state from the app-interface resources

    :param zones: List of zone resources to build state for
    :type zones: list of dict
    :return: State
    :rtype: list of dict
    """

    desired_state = []
    for zone in zones:
        account = zone["account"]
        account_name = account["name"]

        zone_name = zone["name"]
        zone_values = {
            "name": zone_name,
            "account_name": account_name,
            "records": []
        }

        # a vpc will be referenced for a zone to be considered private
        vpc = zone.get("vpc")
        if vpc:
            zone_values["vpc"] = {
                "vpc_id": vpc["vpc_id"],
                "vpc_region": vpc["region"]
            }

        # Check if we have unmanaged_record_names (urn) and compile them
        # all as regular expressions
        urn_compiled = []
        for urn in zone.get("unmanaged_record_names", []):
            urn_compiled.append(re.compile(urn))

        for record in zone["records"]:
            record_name = record["name"]
            record_type = record["type"]

            # Check if this record should be ignored
            # as per 'unmanaged_record_names'
            ignored = False
            for regex in urn_compiled:
                if regex.fullmatch(record["name"]):
                    logging.debug(f"{zone_name}: excluding unmanaged "
                                  f"record {record_name} because it matched "
                                  f"unmanaged_record_names pattern "
                                  f"'{regex.pattern}'")
                    ignored = True
            if ignored:
                continue

            # We use the record object as-is from the list as the terraform
            # data to apply. This makes things simpler and map 1-to-1 with
            # Terraform's capabilities. As such we need to remove (pop) some of
            # the keys we use for our own features

            # Process '_target_cluster'
            target_cluster = record.pop("_target_cluster", None)
            if target_cluster:
                target_cluster_elb = target_cluster["elbFQDN"]

                if target_cluster_elb is None or target_cluster_elb == "":
                    msg = (f"{zone_name}: field `_target_cluster` for record "
                           f"{record_name} of type {record_type} points to a "
                           f"cluster that has an empty elbFQDN field.")
                    logging.error(msg)
                    sys.exit(ExitCodes.ERROR)

                record_values = []
                if record_type == "A":
                    record_values = dnsutils.get_a_records(target_cluster_elb)
                elif record_type == "CNAME":
                    record_values = [target_cluster_elb]
                else:
                    msg = (f"{zone_name}: field `_target_cluster` found "
                           f"for record {record_name} of type {record_type}. "
                           f"The use of _target_cluster on this record type "
                           f"is not supported by the integration.")
                    logging.error(msg)
                    sys.exit(ExitCodes.ERROR)

                if not record_values:
                    msg = (f"{zone_name}: field `_target_cluster` found "
                           f"for record {record_name} of type {record_type} "
                           f"has no values! (invalid elb FQDN?)")
                    logging.error(msg)
                    sys.exit(ExitCodes.ERROR)

                msg = (f"{zone_name}: field `_target_cluster` found "
                       f"for record {record_name} of type {record_type}. "
                       f"Value will be set to {record_values}")
                logging.debug(msg)
                record["records"] = record_values

            # Process '_target_namespace_zone'
            target_namespace_zone = record.pop("_target_namespace_zone", None)
            if target_namespace_zone:
                tf_resources = target_namespace_zone["namespace"][
                    "terraformResources"]
                tf_zone_name = target_namespace_zone["name"]
                tf_zone_resources = [
                    tfr for tfr in tf_resources
                    if tfr["provider"] == "route53-zone"
                    and tfr["name"] == tf_zone_name
                ]
                if not tf_zone_resources:
                    logging.error(
                        f"{zone_name}: field `_target_namespace_zone` found "
                        f"for record {record_name}, but target zone not found: "
                        f"{tf_zone_name}")
                    sys.exit(ExitCodes.ERROR)
                tf_zone_resource = tf_zone_resources[0]
                tf_zone_account_name = tf_zone_resource["account"]
                zone_account = [
                    a for a in all_accounts
                    if a["name"] == tf_zone_account_name
                ][0]
                awsapi = AWSApi(1, [zone_account],
                                settings=settings,
                                init_users=False)
                tf_zone_region = (tf_zone_resource.get("region")
                                  or zone_account["resourcesDefaultRegion"])
                tf_zone_ns_records = awsapi.get_route53_zone_ns_records(
                    tf_zone_account_name, tf_zone_name, tf_zone_region)
                if not tf_zone_ns_records:
                    logging.warning(
                        f"{zone_name}: field `_target_namespace_zone` found "
                        f"for record {record_name}, but target zone not found (yet): "
                        f"{tf_zone_name}")
                    continue
                logging.debug(
                    f"{zone_name}: field `_target_namespace_zone` found "
                    f"for record {record_name}, Values are: "
                    f"{tf_zone_ns_records}")
                record["records"] = tf_zone_ns_records

            # Process '_healthcheck'
            healthcheck = record.pop("_healthcheck", None)
            if healthcheck:
                logging.debug(f"{zone_name}: field `_healthcheck` found "
                              f"for record {record_name}. Values are: "
                              f"{healthcheck}")
                record["healthcheck"] = healthcheck

            zone_values["records"].append(record)

        desired_state.append(zone_values)
    return desired_state
Exemplo n.º 22
0
def build_desired_state_cluster(clusters, ocm_map, settings):
    """
    Fetch state for VPC peerings between two OCM clusters
    """
    desired_state = []
    error = False

    for cluster_info in clusters:
        cluster_name = cluster_info['name']

        # Find an aws account with the "network-mgmt" access level on the
        # requester cluster and use that as the account for the requester
        req_aws = aws_account_from_infrastructure_access(
            cluster_info, 'network-mgmt', ocm_map)
        if not req_aws:
            msg = f"could not find an AWS account with the " \
                  f"'network-mgmt' access level on the cluster {cluster_name}"
            logging.error(msg)
            error = True
            continue
        req_aws['assume_region'] = cluster_info['spec']['region']
        req_aws['assume_cidr'] = cluster_info['network']['vpc']

        peering_info = cluster_info['peering']
        peer_connections = peering_info['connections']
        for peer_connection in peer_connections:
            # We only care about cluster-vpc-requester peering providers
            peer_connection_provider = peer_connection['provider']
            if not peer_connection_provider == 'cluster-vpc-requester':
                continue

            peer_connection_name = peer_connection['name']
            peer_cluster = peer_connection['cluster']
            peer_cluster_name = peer_cluster['name']
            requester_manage_routes = peer_connection.get('manageRoutes')

            # Ensure we have a matching peering connection
            peer_info = find_matching_peering(cluster_info, peer_connection,
                                              peer_cluster,
                                              'cluster-vpc-accepter')
            if not peer_info:
                msg = f"could not find a matching peering connection for " \
                      f"cluster {cluster_name}, " \
                      f"connection {peer_connection_name}"
                logging.error(msg)
                error = True
                continue
            accepter_manage_routes = peer_info.get('manageRoutes')

            aws_api = AWSApi(1, [req_aws], settings=settings)
            requester_vpc_id, requester_route_table_ids, _ = \
                aws_api.get_cluster_vpc_details(
                    req_aws,
                    route_tables=requester_manage_routes
                )
            if requester_vpc_id is None:
                msg = f'[{cluster_name} could not find VPC ID for cluster'
                logging.error(msg)
                error = True
                continue
            requester = {
                'cidr_block': cluster_info['network']['vpc'],
                'region': cluster_info['spec']['region'],
                'vpc_id': requester_vpc_id,
                'route_table_ids': requester_route_table_ids,
                'account': req_aws
            }

            # Find an aws account with the "network-mgmt" access level on the
            # peer cluster and use that as the account for the accepter
            acc_aws = aws_account_from_infrastructure_access(
                peer_cluster, 'network-mgmt', ocm_map)
            if not acc_aws:
                msg = "could not find an AWS account with the " \
                    "'network-mgmt' access level on the cluster"
                logging.error(msg)
                error = True
                continue
            acc_aws['assume_region'] = peer_cluster['spec']['region']
            acc_aws['assume_cidr'] = peer_cluster['network']['vpc']

            aws_api = AWSApi(1, [acc_aws], settings=settings)
            accepter_vpc_id, accepter_route_table_ids, _ = \
                aws_api.get_cluster_vpc_details(
                    acc_aws,
                    route_tables=accepter_manage_routes
                )
            if accepter_vpc_id is None:
                msg = f'[{peer_cluster_name} could not find VPC ID for cluster'
                logging.error(msg)
                error = True
                continue
            requester['peer_owner_id'] = acc_aws['assume_role'].split(':')[4]
            accepter = {
                'cidr_block': peer_cluster['network']['vpc'],
                'region': peer_cluster['spec']['region'],
                'vpc_id': accepter_vpc_id,
                'route_table_ids': accepter_route_table_ids,
                'account': acc_aws
            }

            item = {
                'connection_provider': peer_connection_provider,
                'connection_name': peer_connection_name,
                'requester': requester,
                'accepter': accepter,
                'deleted': peer_connection.get('delete', False)
            }
            desired_state.append(item)

    return desired_state, error
def build_desired_state_tgw_attachments(clusters, ocm_map, settings):
    """
    Fetch state for TGW attachments between a cluster and all TGWs
    in an account in the same region as the cluster
    """
    desired_state = []
    error = False

    for cluster_info in clusters:
        cluster = cluster_info['name']
        ocm = ocm_map.get(cluster)
        peering_info = cluster_info['peering']
        peer_connections = peering_info['connections']
        for peer_connection in peer_connections:
            # We only care about account-tgw peering providers
            peer_connection_provider = peer_connection['provider']
            if not peer_connection_provider == 'account-tgw':
                continue
            # accepter is the cluster's AWS account
            cluster_region = cluster_info['spec']['region']
            cluster_cidr_block = cluster_info['network']['vpc']
            accepter = {
                'cidr_block': cluster_cidr_block,
                'region': cluster_region
            }

            account = peer_connection['account']
            # assume_role is the role to assume to provision the
            # peering connection request, through the accepter AWS account.
            account['assume_role'] = \
                ocm.get_aws_infrastructure_access_terraform_assume_role(
                    cluster,
                    account['uid'],
                    account['terraformUsername']
                )
            account['assume_region'] = accepter['region']
            account['assume_cidr'] = accepter['cidr_block']
            aws_api = AWSApi(1, [account], settings=settings)
            accepter_vpc_id, accepter_route_table_ids, \
                accepter_subnets_id_az = \
                aws_api.get_cluster_vpc_details(
                    account,
                    route_tables=peer_connection.get('manageRoutes'),
                    subnets=True,
                )

            if accepter_vpc_id is None:
                logging.error(f'[{cluster} could not find VPC ID for cluster')
                error = True
                continue
            accepter['vpc_id'] = accepter_vpc_id
            accepter['route_table_ids'] = accepter_route_table_ids
            accepter['subnets_id_az'] = accepter_subnets_id_az
            accepter['account'] = account

            account_tgws = \
                aws_api.get_tgws_details(
                    account,
                    cluster_region,
                    cluster_cidr_block,
                    tags=json.loads(peer_connection.get('tags') or {}),
                    route_tables=peer_connection.get('manageRoutes'),
                    security_groups=peer_connection.get(
                        'manageSecurityGroups'),
                )
            for tgw in account_tgws:
                tgw_id = tgw['tgw_id']
                connection_name = \
                    f"{peer_connection['name']}_" + \
                    f"{account['name']}-{tgw_id}"
                requester = {
                    'tgw_id': tgw_id,
                    'tgw_arn': tgw['tgw_arn'],
                    'region': tgw['region'],
                    'routes': tgw.get('routes'),
                    'rules': tgw.get('rules'),
                    'cidr_block': peer_connection.get('cidrBlock'),
                    'account': account,
                }
                item = {
                    'connection_provider': peer_connection_provider,
                    'connection_name': connection_name,
                    'requester': requester,
                    'accepter': accepter,
                    'deleted': peer_connection.get('delete', False)
                }
                desired_state.append(item)

    return desired_state, error
Exemplo n.º 24
0
def build_desired_state_vpc(clusters, ocm_map, settings):
    """
    Fetch state for VPC peerings between a cluster and a VPC (account)
    """
    desired_state = []
    error = False

    for cluster_info in clusters:
        cluster = cluster_info['name']
        ocm = ocm_map.get(cluster)
        peering_info = cluster_info['peering']
        peer_connections = peering_info['connections']
        for peer_connection in peer_connections:
            # We only care about account-vpc peering providers
            peer_connection_provider = peer_connection['provider']
            if not peer_connection_provider == 'account-vpc':
                continue
            # requester is the cluster's AWS account
            requester = {
                'cidr_block': cluster_info['network']['vpc'],
                'region': cluster_info['spec']['region']
            }

            connection_name = peer_connection['name']
            peer_vpc = peer_connection['vpc']
            # accepter is the peered AWS account
            accepter = {
                'vpc_id': peer_vpc['vpc_id'],
                'cidr_block': peer_vpc['cidr_block'],
                'region': peer_vpc['region']
            }
            account = peer_vpc['account']
            # assume_role is the role to assume to provision the
            # peering connection request, through the accepter AWS account.
            account['assume_role'] = \
                ocm.get_aws_infrastructure_access_terraform_assume_role(
                    cluster,
                    peer_vpc['account']['uid'],
                    peer_vpc['account']['terraformUsername']
                )
            account['assume_region'] = requester['region']
            account['assume_cidr'] = requester['cidr_block']
            aws_api = AWSApi(1, [account], settings=settings)
            requester_vpc_id, requester_route_table_ids, _ = \
                aws_api.get_cluster_vpc_details(
                    account,
                    route_tables=peer_connection.get('manageRoutes')
                )

            if requester_vpc_id is None:
                logging.error(f'[{cluster} could not find VPC ID for cluster')
                error = True
                continue
            requester['vpc_id'] = requester_vpc_id
            requester['route_table_ids'] = requester_route_table_ids
            requester['account'] = account
            accepter['account'] = account
            item = {
                'connection_provider': peer_connection_provider,
                'connection_name': connection_name,
                'requester': requester,
                'accepter': accepter,
                'deleted': peer_connection.get('delete', False)
            }
            desired_state.append(item)
    return desired_state, error
def run(dry_run,
        print_to_file=None,
        enable_deletion=False,
        thread_pool_size=10,
        defer=None):
    settings = queries.get_app_interface_settings()
    clusters = [
        c for c in queries.get_clusters() if c.get("peering") is not None
    ]
    with_ocm = any(c.get("ocm") for c in clusters)
    if with_ocm:
        ocm_map = OCMMap(clusters=clusters,
                         integration=QONTRACT_INTEGRATION,
                         settings=settings)
    else:
        # this is a case for an OCP cluster which is not provisioned
        # through OCM. it is expected that an 'assume_role' is provided
        # on the tgw defition in the cluster file.
        ocm_map = {}

    accounts = queries.get_aws_accounts()
    awsapi = AWSApi(1, accounts, settings=settings, init_users=False)

    # Fetch desired state for cluster-to-vpc(account) VPCs
    desired_state, err = build_desired_state_tgw_attachments(
        clusters, ocm_map, awsapi)
    if err:
        sys.exit(1)

    # check there are no repeated vpc connection names
    connection_names = [c["connection_name"] for c in desired_state]
    if len(set(connection_names)) != len(connection_names):
        logging.error("duplicate vpc connection names found")
        sys.exit(1)

    participating_accounts = [
        item["requester"]["account"] for item in desired_state
    ]
    participating_accounts += [
        item["accepter"]["account"] for item in desired_state
    ]
    participating_account_names = [a["name"] for a in participating_accounts]
    accounts = [
        a for a in queries.get_aws_accounts()
        if a["name"] in participating_account_names
    ]

    ts = Terrascript(QONTRACT_INTEGRATION,
                     "",
                     thread_pool_size,
                     accounts,
                     settings=settings)
    ts.populate_additional_providers(participating_accounts)
    ts.populate_tgw_attachments(desired_state)
    working_dirs = ts.dump(print_to_file=print_to_file)
    aws_api = AWSApi(1, accounts, settings=settings, init_users=False)

    if print_to_file:
        sys.exit()

    tf = Terraform(
        QONTRACT_INTEGRATION,
        QONTRACT_INTEGRATION_VERSION,
        "",
        accounts,
        working_dirs,
        thread_pool_size,
        aws_api,
    )

    if tf is None:
        sys.exit(1)

    defer(tf.cleanup)

    disabled_deletions_detected, err = tf.plan(enable_deletion)
    if err:
        sys.exit(1)
    if disabled_deletions_detected:
        sys.exit(1)

    if dry_run:
        return

    err = tf.apply()
    if err:
        sys.exit(1)
def build_desired_state_vpc_single_cluster(cluster_info, ocm: Optional[OCM],
                                           awsapi: AWSApi):
    desired_state = []

    peering_info = cluster_info['peering']
    peer_connections = peering_info['connections']
    cluster = cluster_info['name']

    for peer_connection in peer_connections:
        # We only care about account-vpc peering providers
        peer_connection_provider = peer_connection['provider']
        if not peer_connection_provider == 'account-vpc':
            continue
        # requester is the cluster's AWS account
        requester = {
            'cidr_block': cluster_info['network']['vpc'],
            'region': cluster_info['spec']['region']
        }
        connection_name = peer_connection['name']
        peer_vpc = peer_connection['vpc']
        # accepter is the peered AWS account
        accepter = {
            'vpc_id': peer_vpc['vpc_id'],
            'cidr_block': peer_vpc['cidr_block'],
            'region': peer_vpc['region']
        }
        account = peer_vpc['account']
        # assume_role is the role to assume to provision the peering
        # connection request, through the accepter AWS account.
        provided_assume_role = peer_connection.get('assumeRole')
        # if an assume_role is provided, it means we don't need
        # to get the information from OCM. it likely means that
        # there is no OCM at all.
        if provided_assume_role:
            account['assume_role'] = provided_assume_role
        elif ocm is not None:
            account['assume_role'] = \
                ocm.get_aws_infrastructure_access_terraform_assume_role(
                cluster,
                peer_vpc['account']['uid'],
                peer_vpc['account']['terraformUsername']
            )
        else:
            raise KeyError(
                f'[{cluster}] peering connection '
                f'{connection_name} must either specify assumeRole '
                'or ocm should be defined to obtain role to assume')
        account['assume_region'] = requester['region']
        account['assume_cidr'] = requester['cidr_block']
        requester_vpc_id, requester_route_table_ids, _ = \
            awsapi.get_cluster_vpc_details(
                account,
                route_tables=peer_connection.get('manageRoutes')
            )

        if requester_vpc_id is None:
            raise BadTerraformPeeringState(
                f'[{cluster}] could not find VPC ID for cluster'
            )
        requester['vpc_id'] = requester_vpc_id
        requester['route_table_ids'] = requester_route_table_ids
        requester['account'] = account
        accepter['account'] = account
        item = {
            'connection_provider': peer_connection_provider,
            'connection_name': connection_name,
            'requester': requester,
            'accepter': accepter,
            'deleted': peer_connection.get('delete', False)
        }
        desired_state.append(item)
    return desired_state
def build_desired_state_single_cluster(cluster_info, ocm: OCM,
                                       awsapi: AWSApi):
    cluster_name = cluster_info['name']

    peerings = []

    peering_info = cluster_info['peering']
    peer_connections = peering_info['connections']
    for peer_connection in peer_connections:
        # We only care about cluster-vpc-requester peering providers
        peer_connection_provider = peer_connection['provider']
        if peer_connection_provider != 'cluster-vpc-requester':
            continue

        peer_connection_name = peer_connection['name']
        peer_cluster = peer_connection['cluster']
        peer_cluster_name = peer_cluster['name']
        requester_manage_routes = peer_connection.get('manageRoutes')
        # Ensure we have a matching peering connection
        peer_info = find_matching_peering(cluster_info,
                                          peer_cluster,
                                          'cluster-vpc-accepter')
        if not peer_info:
            raise BadTerraformPeeringState(
                "[no_matching_peering] could not find a matching peering "
                f"connection for cluster {cluster_name}, connection "
                f"{peer_connection_name}"
            )

        accepter_manage_routes = peer_info.get('manageRoutes')

        req_aws, acc_aws = aws_assume_roles_for_cluster_vpc_peering(
            cluster_info,
            peer_info,
            peer_cluster,
            ocm
        )

        requester_vpc_id, requester_route_table_ids, _ = \
            awsapi.get_cluster_vpc_details(
                req_aws,
                route_tables=requester_manage_routes
            )
        if requester_vpc_id is None:
            raise BadTerraformPeeringState(
                f'[{cluster_name}] could not find VPC ID for cluster'
            )

        requester = {
            'cidr_block': cluster_info['network']['vpc'],
            'region': cluster_info['spec']['region'],
            'vpc_id': requester_vpc_id,
            'route_table_ids': requester_route_table_ids,
            'account': req_aws
        }

        accepter_vpc_id, accepter_route_table_ids, _ = \
            awsapi.get_cluster_vpc_details(
                acc_aws,
                route_tables=accepter_manage_routes
            )
        if accepter_vpc_id is None:
            raise BadTerraformPeeringState(
                f'[{peer_cluster_name}] could not find VPC ID for cluster'
            )

        requester['peer_owner_id'] = acc_aws['assume_role'].split(':')[4]
        accepter = {
            'cidr_block': peer_cluster['network']['vpc'],
            'region': peer_cluster['spec']['region'],
            'vpc_id': accepter_vpc_id,
            'route_table_ids': accepter_route_table_ids,
            'account': acc_aws
        }

        item = {
            'connection_provider': peer_connection_provider,
            'connection_name': peer_connection_name,
            'requester': requester,
            'accepter': accepter,
            'deleted': peer_connection.get('delete', False)
        }
        peerings.append(item)

    return peerings
Exemplo n.º 28
0
    def __init__(self, dry_run, instance):
        self.dry_run = dry_run
        self.settings = queries.get_app_interface_settings()

        cluster_info = instance['hiveCluster']
        hive_cluster = instance['hiveCluster']['name']

        # Getting the OCM Client for the hive cluster
        ocm_map = OCMMap(clusters=[cluster_info],
                         integration=QONTRACT_INTEGRATION,
                         settings=self.settings)

        self.ocm_cli = ocm_map.get(hive_cluster)
        if not self.ocm_cli:
            raise OcpReleaseMirrorError(f"Can't create ocm client for "
                                        f"cluster {hive_cluster}")

        # Getting the OC Client for the hive cluster
        oc_map = OC_Map(clusters=[cluster_info],
                        integration=QONTRACT_INTEGRATION,
                        settings=self.settings)
        self.oc_cli = oc_map.get(hive_cluster)
        if not self.oc_cli:
            raise OcpReleaseMirrorError(f"Can't create oc client for "
                                        f"cluster {hive_cluster}")

        namespace = instance['ecrResourcesNamespace']
        ocp_release_identifier = instance['ocpReleaseEcrIdentifier']
        ocp_art_dev_identifier = instance['ocpArtDevEcrIdentifier']

        ocp_release_info = self._get_tf_resource_info(namespace,
                                                      ocp_release_identifier)
        if ocp_release_info is None:
            raise OcpReleaseMirrorError(f"Could not find rds "
                                        f"identifier "
                                        f"{ocp_release_identifier} in "
                                        f"namespace {namespace['name']}")

        ocp_art_dev_info = self._get_tf_resource_info(namespace,
                                                      ocp_art_dev_identifier)
        if ocp_art_dev_info is None:
            raise OcpReleaseMirrorError(f"Could not find rds identifier"
                                        f" {ocp_art_dev_identifier} in"
                                        f"namespace {namespace['name']}")

        # Getting the AWS Client for the accounts
        aws_accounts = [
            self._get_aws_account_info(account=ocp_release_info['account']),
            self._get_aws_account_info(account=ocp_art_dev_info['account'])
        ]
        self.aws_cli = AWSApi(thread_pool_size=1,
                              accounts=aws_accounts,
                              settings=self.settings,
                              init_ecr_auth_tokens=True)
        self.aws_cli.map_ecr_resources()

        self.ocp_release_ecr_uri = self._get_image_uri(
            account=ocp_release_info['account'],
            repository=ocp_release_identifier)
        if self.ocp_release_ecr_uri is None:
            raise OcpReleaseMirrorError(f"Could not find the "
                                        f"ECR repository "
                                        f"{ocp_release_identifier}")

        self.ocp_art_dev_ecr_uri = self._get_image_uri(
            account=ocp_art_dev_info['account'],
            repository=ocp_art_dev_identifier)
        if self.ocp_art_dev_ecr_uri is None:
            raise OcpReleaseMirrorError(f"Could not find the "
                                        f"ECR repository "
                                        f"{ocp_art_dev_identifier}")

        # Process all the quayOrgTargets
        quay_api_store = get_quay_api_store()
        self.quay_target_orgs = []
        for quayTargetOrg in instance['quayTargetOrgs']:
            org_name = quayTargetOrg['name']
            instance_name = quayTargetOrg['instance']['name']
            org_key = OrgKey(instance_name, org_name)
            org_info = quay_api_store[org_key]

            if not org_info['push_token']:
                raise OcpReleaseMirrorError(
                    f'{org_key} has no push_token defined.')

            url = org_info['url']
            user = org_info['push_token']['user']
            token = org_info['push_token']['token']

            self.quay_target_orgs.append({
                'url':
                url,
                'dest_ocp_release':
                f"{url}/{org_name}/ocp-release",
                'dest_ocp_art_dev':
                f"{url}/{org_name}/ocp-v4.0-art-dev",
                'auths':
                self._build_quay_auths(url, user, token)
            })

        # Getting all the credentials
        quay_creds = self._get_quay_creds()
        ocp_release_creds = self._get_ecr_creds(
            account=ocp_release_info['account'],
            region=ocp_release_info['region'])
        ocp_art_dev_creds = self._get_ecr_creds(
            account=ocp_art_dev_info['account'],
            region=ocp_art_dev_info['region'])

        # Creating a single dictionary with all credentials to be used by the
        # "oc adm release mirror" command
        self.registry_creds = {
            'auths': {
                **quay_creds['auths'],
                **ocp_release_creds['auths'],
                **ocp_art_dev_creds['auths'],
            }
        }

        # Append quay_target_orgs auths to registry_creds
        for quay_target_org in self.quay_target_orgs:
            url = quay_target_org['url']

            if url in self.registry_creds['auths'].keys():
                OcpReleaseMirrorError('Cannot mirror to the same Quay '
                                      f'instance multiple times: {url}')

            self.registry_creds['auths'].update(quay_target_org['auths'])

        # Initiate channel groups
        self.channel_groups = instance['mirrorChannels']
def build_desired_state_tgw_attachments(clusters, ocm_map: OCMMap,
                                        awsapi: AWSApi):
    """
    Fetch state for TGW attachments between a cluster and all TGWs
    in an account in the same region as the cluster
    """
    desired_state = []
    error = False

    for cluster_info in clusters:
        cluster = cluster_info["name"]
        ocm = ocm_map.get(cluster)
        peering_info = cluster_info["peering"]
        peer_connections = peering_info["connections"]
        for peer_connection in peer_connections:
            # We only care about account-tgw peering providers
            peer_connection_provider = peer_connection["provider"]
            if not peer_connection_provider == "account-tgw":
                continue
            # accepter is the cluster's AWS account
            cluster_region = cluster_info["spec"]["region"]
            cluster_cidr_block = cluster_info["network"]["vpc"]
            accepter = {
                "cidr_block": cluster_cidr_block,
                "region": cluster_region
            }

            account = peer_connection["account"]
            # assume_role is the role to assume to provision the
            # peering connection request, through the accepter AWS account.
            provided_assume_role = peer_connection.get("assumeRole")
            # if an assume_role is provided, it means we don't need
            # to get the information from OCM. it likely means that
            # there is no OCM at all.
            if provided_assume_role:
                account["assume_role"] = provided_assume_role
            else:
                account[
                    "assume_role"] = ocm.get_aws_infrastructure_access_terraform_assume_role(
                        cluster, account["uid"], account["terraformUsername"])
            account["assume_region"] = accepter["region"]
            account["assume_cidr"] = accepter["cidr_block"]
            (
                accepter_vpc_id,
                accepter_route_table_ids,
                accepter_subnets_id_az,
            ) = awsapi.get_cluster_vpc_details(
                account,
                route_tables=peer_connection.get("manageRoutes"),
                subnets=True,
            )

            if accepter_vpc_id is None:
                logging.error(f"[{cluster} could not find VPC ID for cluster")
                error = True
                continue
            accepter["vpc_id"] = accepter_vpc_id
            accepter["route_table_ids"] = accepter_route_table_ids
            accepter["subnets_id_az"] = accepter_subnets_id_az
            accepter["account"] = account

            account_tgws = awsapi.get_tgws_details(
                account,
                cluster_region,
                cluster_cidr_block,
                tags=json.loads(peer_connection.get("tags") or "{}"),
                route_tables=peer_connection.get("manageRoutes"),
                security_groups=peer_connection.get("manageSecurityGroups"),
            )
            for tgw in account_tgws:
                tgw_id = tgw["tgw_id"]
                connection_name = (f"{peer_connection['name']}_" +
                                   f"{account['name']}-{tgw_id}")
                requester = {
                    "tgw_id": tgw_id,
                    "tgw_arn": tgw["tgw_arn"],
                    "region": tgw["region"],
                    "routes": tgw.get("routes"),
                    "rules": tgw.get("rules"),
                    "cidr_block": peer_connection.get("cidrBlock"),
                    "account": account,
                }
                item = {
                    "connection_provider": peer_connection_provider,
                    "connection_name": connection_name,
                    "requester": requester,
                    "accepter": accepter,
                    "deleted": peer_connection.get("delete", False),
                }
                desired_state.append(item)

    return desired_state, error
Exemplo n.º 30
0
class OcpReleaseMirror:
    def __init__(self, dry_run, instance):
        self.dry_run = dry_run
        self.settings = queries.get_app_interface_settings()

        cluster_info = instance['hiveCluster']
        hive_cluster = instance['hiveCluster']['name']

        # Getting the OCM Client for the hive cluster
        ocm_map = OCMMap(clusters=[cluster_info],
                         integration=QONTRACT_INTEGRATION,
                         settings=self.settings)

        self.ocm_cli = ocm_map.get(hive_cluster)
        if not self.ocm_cli:
            raise OcpReleaseMirrorError(f"Can't create ocm client for "
                                        f"cluster {hive_cluster}")

        # Getting the OC Client for the hive cluster
        oc_map = OC_Map(clusters=[cluster_info],
                        integration=QONTRACT_INTEGRATION,
                        settings=self.settings)
        self.oc_cli = oc_map.get(hive_cluster)
        if not self.oc_cli:
            raise OcpReleaseMirrorError(f"Can't create oc client for "
                                        f"cluster {hive_cluster}")

        namespace = instance['ecrResourcesNamespace']
        ocp_release_identifier = instance['ocpReleaseEcrIdentifier']
        ocp_art_dev_identifier = instance['ocpArtDevEcrIdentifier']

        ocp_release_info = self._get_tf_resource_info(namespace,
                                                      ocp_release_identifier)
        if ocp_release_info is None:
            raise OcpReleaseMirrorError(f"Could not find rds "
                                        f"identifier "
                                        f"{ocp_release_identifier} in "
                                        f"namespace {namespace['name']}")

        ocp_art_dev_info = self._get_tf_resource_info(namespace,
                                                      ocp_art_dev_identifier)
        if ocp_art_dev_info is None:
            raise OcpReleaseMirrorError(f"Could not find rds identifier"
                                        f" {ocp_art_dev_identifier} in"
                                        f"namespace {namespace['name']}")

        # Getting the AWS Client for the accounts
        aws_accounts = [
            self._get_aws_account_info(account=ocp_release_info['account']),
            self._get_aws_account_info(account=ocp_art_dev_info['account'])
        ]
        self.aws_cli = AWSApi(thread_pool_size=1,
                              accounts=aws_accounts,
                              settings=self.settings,
                              init_ecr_auth_tokens=True)
        self.aws_cli.map_ecr_resources()

        self.ocp_release_ecr_uri = self._get_image_uri(
            account=ocp_release_info['account'],
            repository=ocp_release_identifier)
        if self.ocp_release_ecr_uri is None:
            raise OcpReleaseMirrorError(f"Could not find the "
                                        f"ECR repository "
                                        f"{ocp_release_identifier}")

        self.ocp_art_dev_ecr_uri = self._get_image_uri(
            account=ocp_art_dev_info['account'],
            repository=ocp_art_dev_identifier)
        if self.ocp_art_dev_ecr_uri is None:
            raise OcpReleaseMirrorError(f"Could not find the "
                                        f"ECR repository "
                                        f"{ocp_art_dev_identifier}")

        # Process all the quayOrgTargets
        quay_api_store = get_quay_api_store()
        self.quay_target_orgs = []
        for quayTargetOrg in instance['quayTargetOrgs']:
            org_name = quayTargetOrg['name']
            instance_name = quayTargetOrg['instance']['name']
            org_key = OrgKey(instance_name, org_name)
            org_info = quay_api_store[org_key]

            if not org_info['push_token']:
                raise OcpReleaseMirrorError(
                    f'{org_key} has no push_token defined.')

            url = org_info['url']
            user = org_info['push_token']['user']
            token = org_info['push_token']['token']

            self.quay_target_orgs.append({
                'url':
                url,
                'dest_ocp_release':
                f"{url}/{org_name}/ocp-release",
                'dest_ocp_art_dev':
                f"{url}/{org_name}/ocp-v4.0-art-dev",
                'auths':
                self._build_quay_auths(url, user, token)
            })

        # Getting all the credentials
        quay_creds = self._get_quay_creds()
        ocp_release_creds = self._get_ecr_creds(
            account=ocp_release_info['account'],
            region=ocp_release_info['region'])
        ocp_art_dev_creds = self._get_ecr_creds(
            account=ocp_art_dev_info['account'],
            region=ocp_art_dev_info['region'])

        # Creating a single dictionary with all credentials to be used by the
        # "oc adm release mirror" command
        self.registry_creds = {
            'auths': {
                **quay_creds['auths'],
                **ocp_release_creds['auths'],
                **ocp_art_dev_creds['auths'],
            }
        }

        # Append quay_target_orgs auths to registry_creds
        for quay_target_org in self.quay_target_orgs:
            url = quay_target_org['url']

            if url in self.registry_creds['auths'].keys():
                OcpReleaseMirrorError('Cannot mirror to the same Quay '
                                      f'instance multiple times: {url}')

            self.registry_creds['auths'].update(quay_target_org['auths'])

        # Initiate channel groups
        self.channel_groups = instance['mirrorChannels']

    def run(self):
        ocp_releases = self._get_ocp_releases()
        if not ocp_releases:
            raise RuntimeError('No OCP Releases found')

        for ocp_release_info in ocp_releases:
            ocp_release = ocp_release_info.ocp_release
            tag = ocp_release_info.tag

            # mirror to ecr
            dest_ocp_release = f'{self.ocp_release_ecr_uri}:{tag}'
            self._run_mirror(ocp_release=ocp_release,
                             dest_ocp_release=dest_ocp_release,
                             dest_ocp_art_dev=self.ocp_art_dev_ecr_uri)

            # mirror to all quay target orgs
            for quay_target_org in self.quay_target_orgs:
                dest_ocp_release = (f'{quay_target_org["dest_ocp_release"]}:'
                                    f'{tag}')
                dest_ocp_art_dev = quay_target_org["dest_ocp_art_dev"]
                self._run_mirror(ocp_release=ocp_release,
                                 dest_ocp_release=dest_ocp_release,
                                 dest_ocp_art_dev=dest_ocp_art_dev)

    def _run_mirror(self, ocp_release, dest_ocp_release, dest_ocp_art_dev):
        # Checking if the image is already there
        if self._is_image_there(dest_ocp_release):
            LOG.debug(f'Image {ocp_release} already in '
                      f'the mirror. Skipping.')
            return

        LOG.info(f'Mirroring {ocp_release} to {dest_ocp_art_dev} '
                 f'to_release {dest_ocp_release}')

        if self.dry_run:
            return

        # Creating a new, bare, OC client since we don't
        # want to run this against any cluster or via
        # a jump host
        oc_cli = OC('cluster', None, None, local=True)
        oc_cli.release_mirror(from_release=ocp_release,
                              to=dest_ocp_art_dev,
                              to_release=dest_ocp_release,
                              dockerconfig=self.registry_creds)

    def _is_image_there(self, image):
        image_obj = Image(image)

        for registry, creds in self.registry_creds['auths'].items():
            # Getting the credentials for the image_obj
            if '//' not in registry:
                registry = '//' + registry
            registry_obj = urlparse(registry)

            if registry_obj.netloc != image_obj.registry:
                continue
            image_obj.auth = (creds['username'], creds['password'])

            # Checking if the image is already
            # in the registry
            if image_obj:
                return True

        return False

    @staticmethod
    def _get_aws_account_info(account):
        for account_info in queries.get_aws_accounts():
            if 'name' not in account_info:
                continue
            if account_info['name'] != account:
                continue
            return account_info

    def _get_ocp_releases(self):
        ocp_releases = []
        clusterimagesets = self.oc_cli.get_all('ClusterImageSet')
        for clusterimageset in clusterimagesets['items']:
            release_image = clusterimageset['spec']['releaseImage']
            # There are images in some ClusterImagesSets not coming
            # from quay.io, e.g.:
            # registry.svc.ci.openshift.org/ocp/release:4.2.0-0.nightly-2020-11-04-053758
            # Let's filter out everything not from quay.io
            if not release_image.startswith('quay.io'):
                continue
            labels = clusterimageset['metadata']['labels']
            name = clusterimageset['metadata']['name']
            # ClusterImagesSets may be enabled or disabled.
            # Let's only mirror enabled ones
            enabled = labels['api.openshift.com/enabled']
            if enabled == 'false':
                continue
            # ClusterImageSets may be in different channels.
            channel_group = labels['api.openshift.com/channel-group']
            if channel_group not in self.channel_groups:
                continue
            ocp_releases.append(OcpReleaseInfo(release_image, name))
        return ocp_releases

    def _get_quay_creds(self):
        return self.ocm_cli.get_pull_secrets()

    def _get_ecr_creds(self, account, region):
        if region is None:
            region = self.aws_cli.accounts[account]['resourcesDefaultRegion']
        auth_token = f'{account}/{region}'
        data = self.aws_cli.auth_tokens[auth_token]
        auth_data = data['authorizationData'][0]
        server = auth_data['proxyEndpoint']
        token = auth_data['authorizationToken']
        password = base64.b64decode(token).decode('utf-8').split(':')[1]

        return {
            'auths': {
                server: {
                    'username': '******',
                    'password': password,
                    'email': '*****@*****.**',
                    'auth': token
                }
            }
        }

    @staticmethod
    def _get_tf_resource_info(namespace, identifier):
        tf_resources = namespace['terraformResources']
        for tf_resource in tf_resources:
            if 'identifier' not in tf_resource:
                continue

            if tf_resource['identifier'] != identifier:
                continue

            if tf_resource['provider'] != 'ecr':
                continue

            return {
                'account': tf_resource['account'],
                'region': tf_resource.get('region'),
            }

    def _get_image_uri(self, account, repository):
        for repo in self.aws_cli.resources[account]['ecr']:
            if repo['repositoryName'] == repository:
                return repo['repositoryUri']

    @staticmethod
    def _build_quay_auths(url, user, token):
        auth_bytes = bytes(f"{user}:{token}", 'utf-8')
        auth = base64.b64encode(auth_bytes).decode('utf-8')
        return {
            url: {
                'username': user,
                'password': token,
                'email': '',
                'auth': auth
            }
        }