def main():
    args = handle_arguments()

    if args.sosreport:
        gather_sosreport_data(output_dir=args.dest)

    client = create_client(url=args.inventory_url, timeout=CONNECTION_TIMEOUT)
    if args.cluster_id:
        cluster = client.cluster_get(args.cluster_id)
        download_logs(client, json.loads(json.dumps(cluster.to_dict(), sort_keys=True, default=str)), args.dest,
                      args.must_gather, args.update_by_events, pull_secret=args.pull_secret)
    else:
        clusters = get_clusters(client, args.download_all)

        if not clusters:
            log.info('No clusters were found')
            return

        for cluster in clusters:
            if args.download_all or should_download_logs(cluster):
                download_logs(client, cluster, args.dest, args.must_gather, args.update_by_events,
                              pull_secret=args.pull_secret)

        log.info("Cluster installation statuses: %s",
                 dict(Counter(cluster["status"] for cluster in clusters).items()))
Beispiel #2
0
def execute_day2_flow(cluster_id, args, day2_type_flag, has_ipv4):
    utils.recreate_folder(consts.IMAGE_FOLDER, force_recreate=False)
    client = assisted_service_api.create_client(
        url=utils.get_assisted_service_url_by_args(args=args))
    cluster = client.cluster_get(cluster_id=cluster_id)
    cluster_name = cluster.name
    openshift_version = cluster.openshift_version
    api_vip_dnsname = "api." + cluster_name + "." + cluster.base_dns_domain
    api_vip_ip = cluster.api_vip
    terraform_cluster_dir_prefix = cluster_name
    if day2_type_flag == "ocp":
        terraform_cluster_dir_prefix = "test-infra-cluster-assisted-installer"
    else:
        cluster_id = str(uuid.uuid4())
        copy_proxy_from_cluster = cluster
        cluster = client.create_day2_cluster(
            cluster_name + "-day2", cluster_id,
            **_day2_cluster_create_params(openshift_version, api_vip_dnsname))
        set_cluster_pull_secret(client, cluster_id, args.pull_secret)
        set_cluster_proxy(client, cluster_id, copy_proxy_from_cluster, args)

    config_etc_hosts(api_vip_ip, api_vip_dnsname)
    image_path = os.path.join(consts.IMAGE_FOLDER,
                              f'{args.namespace}-installer-image.iso')
    client.generate_and_download_image(
        cluster_id=cluster.id,
        image_path=image_path,
        ssh_key=args.ssh_key,
    )

    day2_nodes_flow(client, terraform_cluster_dir_prefix, cluster, has_ipv4,
                    args.number_of_day2_workers, api_vip_ip, api_vip_dnsname,
                    args.namespace, args.install_cluster, day2_type_flag)
Beispiel #3
0
def get_api_client(offline_token=env_variables['offline_token'], **kwargs):
    url = env_variables['remote_service_url']

    if not url:
        url = utils.get_local_assisted_service_url(
            utils.get_env('PROFILE'), utils.get_env('NAMESPACE'), 'assisted-service', utils.get_env('DEPLOY_TARGET'))
    
    return assisted_service_api.create_client(url, offline_token, **kwargs)
Beispiel #4
0
def get_ocp_cluster(args):
    if not args.cluster_id:
        cluster_name = f'{args.cluster_name or consts.CLUSTER_PREFIX}-{args.namespace}'
        tf_folder = utils.get_tf_folder(cluster_name, args.namespace)
        args.cluster_id = utils.get_tfvars(tf_folder).get('cluster_inventory_id')
    client = assisted_service_api.create_client(
            url=utils.get_assisted_service_url_by_args(args=args)
    )
    return client.cluster_get(cluster_id=args.cluster_id)
def api_client():
    url = env_variables['remote_service_url']

    if not url:
        url = utils.get_local_assisted_service_url(
            utils.get_env('PROFILE'), utils.get_env('NAMESPACE'),
            'assisted-service', utils.get_env('DEPLOY_TARGET'))

    yield assisted_service_api.create_client(url)
Beispiel #6
0
def execute_day2_flow(cluster_id, args, day2_type_flag, has_ipv6):
    utils.recreate_folder(consts.IMAGE_FOLDER, force_recreate=False)
    client = assisted_service_api.create_client(
        url=utils.get_assisted_service_url_by_args(args=args))
    cluster = client.cluster_get(cluster_id=cluster_id)
    cluster_name = cluster.name
    openshift_version = cluster.openshift_version
    api_vip_dnsname = "api." + cluster_name + "." + cluster.base_dns_domain
    api_vip_ip = cluster.api_vip
    terraform_cluster_dir_prefix = cluster_name
    if day2_type_flag == "ocp":
        terraform_cluster_dir_prefix = f"{consts.CLUSTER_PREFIX}-{consts.DEFAULT_NAMESPACE}"
    else:
        cluster_id = str(uuid.uuid4())
        copy_proxy_from_cluster = cluster
        cluster = client.create_day2_cluster(
            cluster_name + "-day2", cluster_id,
            **_day2_cluster_create_params(openshift_version, api_vip_dnsname))
        set_cluster_pull_secret(client, cluster_id, args.pull_secret)
        set_cluster_proxy(client, cluster_id, copy_proxy_from_cluster, args)

    config_etc_hosts(api_vip_ip, api_vip_dnsname)
    image_path = os.path.join(consts.IMAGE_FOLDER,
                              f'{args.namespace}-installer-image.iso')

    tf_folder = utils.get_tf_folder(terraform_cluster_dir_prefix,
                                    args.namespace)
    set_day2_tf_configuration(tf_folder, args.number_of_day2_workers,
                              api_vip_ip, api_vip_dnsname)

    static_network_config = None
    if args.with_static_network_config:
        static_network_config = static_network.generate_day2_static_network_data_from_tf(
            tf_folder, args.number_of_day2_workers)

    client.generate_and_download_image(
        cluster_id=cluster.id,
        image_path=image_path,
        ssh_key=args.ssh_key,
        static_network_config=static_network_config)

    day2_nodes_flow(
        client,
        terraform_cluster_dir_prefix,
        tf_folder,
        cluster,
        has_ipv6,
        args.number_of_day2_workers,
        api_vip_ip,
        api_vip_dnsname,
        args.install_cluster,
        day2_type_flag,
        args.with_static_network_config,
        cluster_name,
    )
def try_get_cluster():
    if args.cluster_id:
        try:
            client = assisted_service_api.create_client(
                url=utils.get_assisted_service_url_by_args(args=args))
            return client, client.cluster_get(cluster_id=args.cluster_id)

        except ApiException as e:
            log.warning(f"Can't retrieve cluster_id={args.cluster_id}, {e}")

    return None, None
    def __init__(self, inventory_url: str, es_server: str, es_user: str,
                 es_pass: str, backup_destination: str):
        self.client = create_client(url=inventory_url)

        self.index = INDEX
        self.es = elasticsearch.Elasticsearch(es_server,
                                              http_auth=(es_user, es_pass))

        self.backup_destination = backup_destination
        if self.backup_destination and not os.path.exists(
                self.backup_destination):
            os.makedirs(self.backup_destination)
def try_to_delete_cluster(namespace, tfvars):
    """ Try to delete cluster if assisted-service is up and such cluster
        exists.
    """
    cluster_id = tfvars.get('cluster_inventory_id')
    if args.kube_api or not cluster_id:
        return

    args.namespace = namespace
    client = assisted_service_api.create_client(
        url=utils.get_assisted_service_url_by_args(args=args, wait=False))
    client.delete_cluster(cluster_id=cluster_id)
def execute_day1_flow(cluster_name):
    client = None
    cluster = {}
    if args.managed_dns_domains:
        args.base_dns_domain = args.managed_dns_domains.split(":")[0]

    if not args.vm_network_cidr:
        net_cidr = IPNetwork('192.168.126.0/24')
        net_cidr += args.ns_index
        args.vm_network_cidr = str(net_cidr)

    if not args.vm_network_cidr6:
        net_cidr = IPNetwork('1001:db8::/120')
        net_cidr += args.ns_index
        args.vm_network_cidr6 = str(net_cidr)

    if not args.network_bridge:
        args.network_bridge = f'tt{args.ns_index}'

    image_path = None

    if not args.image:
        utils.recreate_folder(consts.IMAGE_FOLDER, force_recreate=False)
        client = assisted_service_api.create_client(
            url=utils.get_assisted_service_url_by_args(args=args))
        if args.cluster_id:
            cluster = client.cluster_get(cluster_id=args.cluster_id)
        else:

            cluster = client.create_cluster(cluster_name,
                                            ssh_public_key=args.ssh_key,
                                            **_cluster_create_params())

        image_path = os.path.join(consts.IMAGE_FOLDER,
                                  f'{args.namespace}-installer-image.iso')
        client.generate_and_download_image(
            cluster_id=cluster.id,
            image_path=image_path,
            ssh_key=args.ssh_key,
        )

    # Iso only, cluster will be up and iso downloaded but vm will not be created
    if not args.iso_only:
        try:
            nodes_flow(client, cluster_name, cluster, args.image or image_path)
        finally:
            if not image_path or args.keep_iso:
                return
            log.info('deleting iso: %s', image_path)
            os.unlink(image_path)

    return cluster.id
def execute_day1_flow():
    client, cluster = try_get_cluster()
    cluster_name = f'{args.cluster_name or consts.CLUSTER_PREFIX}-{args.namespace}'

    if cluster:
        args.base_dns_domain = cluster.base_dns_domain
        cluster_name = cluster.name

    elif args.managed_dns_domains:
        args.base_dns_domain = args.managed_dns_domains.split(":")[0]

    log.info('Cluster name: %s', cluster_name)

    machine_net = MachineNetwork(args.ipv4, args.ipv6, args.vm_network_cidr,
                                 args.vm_network_cidr6, args.ns_index)
    image_path = os.path.join(consts.IMAGE_FOLDER,
                              f'{args.namespace}-installer-image.iso')
    set_tf_config(cluster_name)

    if not args.image:
        utils.recreate_folder(consts.IMAGE_FOLDER, force_recreate=False)
        if not client:
            client = assisted_service_api.create_client(
                url=utils.get_assisted_service_url_by_args(args=args))
        if args.cluster_id:
            cluster = client.cluster_get(cluster_id=args.cluster_id)
        else:
            cluster = client.create_cluster(cluster_name,
                                            ssh_public_key=args.ssh_key,
                                            **_cluster_create_params())

        static_network_config = apply_static_network_config(
            cluster_name=cluster_name,
            kube_client=None,
        )

        client.generate_and_download_image(
            cluster_id=cluster.id,
            image_path=image_path,
            image_type=args.iso_image_type,
            ssh_key=args.ssh_key,
            static_network_config=static_network_config,
        )

    # Iso only, cluster will be up and iso downloaded but vm will not be created
    if not args.iso_only:
        run_nodes_flow(client, cluster_name, cluster, machine_net, image_path)

    return cluster.id
Beispiel #12
0
def main():
    args = handle_arguments()
    client = create_client(url=args.inventory_url)

    if args.cluster_id:
        cluster = client.cluster_get(args.cluster_id)
        download_logs(client, json.loads(json.dumps(cluster.to_dict(), sort_keys=True, default=str)), args.dest, args.must_gather)
    else:
        clusters = client.clusters_list()

        if not clusters:
            log.info('No clusters were found')
            return

        for cluster in clusters:
            if args.download_all or should_download_logs(cluster):
                download_logs(client, cluster, args.dest, args.must_gather)

        print(Counter(map(lambda cluster: cluster['status'], clusters)))
Beispiel #13
0
def main():
    _verify_kube_download_folder(args.kubeconfig_path)
    log.info("Creating assisted service client")
    # if not cluster id is given, reads it from latest run
    tf = None
    if not args.cluster_id:
        cluster_name = f'{args.cluster_name or consts.CLUSTER_PREFIX}-{args.namespace}'
        tf_folder = utils.get_tf_folder(cluster_name, args.namespace)
        args.cluster_id = utils.get_tfvars(tf_folder).get(
            'cluster_inventory_id')
        tf = terraform_utils.TerraformUtils(working_dir=tf_folder)

    client = assisted_service_api.create_client(
        url=utils.get_assisted_service_url_by_args(args=args, wait=False))
    run_install_flow(client=client,
                     cluster_id=args.cluster_id,
                     kubeconfig_path=args.kubeconfig_path,
                     pull_secret=args.pull_secret,
                     tf=tf)
Beispiel #14
0
def execute_day1_flow(cluster_name):
    client = None
    cluster = {}
    if args.managed_dns_domains:
        args.base_dns_domain = args.managed_dns_domains.split(":")[0]

    if not args.vm_network_cidr:
        net_cidr = IPNetwork('192.168.126.0/24')
        net_cidr += args.ns_index
        args.vm_network_cidr = str(net_cidr)

    if not args.vm_network_cidr6:
        net_cidr = IPNetwork('1001:db8::/120')
        net_cidr += args.ns_index
        args.vm_network_cidr6 = str(net_cidr)

    if not args.network_bridge:
        args.network_bridge = f'tt{args.ns_index}'

    set_tf_config(cluster_name)
    image_path = None
    image_url = None
    image_type = args.iso_image_type
    kube_client = None
    cluster_deployment = None

    machine_net = MachineNetwork(args.ipv4, args.ipv6, args.vm_network_cidr,
                                 args.vm_network_cidr6, args.ns_index)

    if not args.image:
        utils.recreate_folder(consts.IMAGE_FOLDER, force_recreate=False)
        client = assisted_service_api.create_client(
            url=utils.get_assisted_service_url_by_args(args=args))
        if args.cluster_id:
            cluster = client.cluster_get(cluster_id=args.cluster_id)

        elif args.kube_api:
            kube_client = create_kube_api_client(
                str(pathlib.Path("~/.kube/config").expanduser()))
            cluster_deployment = ClusterDeployment(kube_api_client=kube_client,
                                                   name=cluster_name,
                                                   namespace=args.namespace)

            secret = Secret(
                kube_api_client=kube_client,
                name=cluster_name,
                namespace=args.namespace,
            )
            with contextlib.suppress(ApiException):
                secret.delete()

            secret.create(pull_secret=args.pull_secret)

            ipv4 = args.ipv4 and args.ipv4.lower() in MachineNetwork.YES_VALUES
            ipv6 = args.ipv6 and args.ipv6.lower() in MachineNetwork.YES_VALUES
            api_vip, ingress_vip = "", ""

            with contextlib.suppress(ApiException):
                cluster_deployment.delete()

            cluster_deployment.create(
                platform=Platform(
                    api_vip=api_vip,
                    ingress_vip=ingress_vip,
                ),
                install_strategy=InstallStrategy(
                    host_prefix=args.host_prefix
                    if ipv4 else args.host_prefix6,
                    machine_cidr=machine_net.machine_cidr_addresses[0],
                    cluster_cidr=args.cluster_network
                    if ipv4 else args.cluster_network6,
                    service_cidr=args.service_network
                    if ipv4 else args.service_network6,
                    ssh_public_key=args.ssh_key,
                    control_plane_agents=args.master_count,
                    worker_agents=args.number_of_workers,
                ),
                secret=secret,
                base_domain=args.base_dns_domain,
            )
            cluster_deployment.wait_for_state("insufficient")

            http_proxy, https_proxy, no_proxy = _get_http_proxy_params(
                ipv4=ipv4, ipv6=ipv6)
            install_env = InstallEnv(kube_api_client=kube_client,
                                     name=f"{cluster_name}-install-env",
                                     namespace=args.namespace)
            with contextlib.suppress(ApiException):
                install_env.delete()

            install_env.create(cluster_deployment=cluster_deployment,
                               secret=secret,
                               proxy=Proxy(http_proxy=http_proxy,
                                           https_proxy=https_proxy,
                                           no_proxy=no_proxy))
            install_env.status()
            image_url = install_env.get_iso_download_url()
            cluster = client.cluster_get(
                cluster_id=install_env.get_cluster_id())

        else:
            cluster = client.create_cluster(cluster_name,
                                            ssh_public_key=args.ssh_key,
                                            **_cluster_create_params())

        image_path = os.path.join(consts.IMAGE_FOLDER,
                                  f'{args.namespace}-installer-image.iso')

        if args.with_static_network_config:
            tf_folder = utils.get_tf_folder(cluster_name, args.namespace)
            static_network_config = static_network.generate_static_network_data_from_tf(
                tf_folder)
        else:
            static_network_config = None

        if image_url is not None:
            utils.download_iso(image_url, image_path)
        else:
            client.generate_and_download_image(
                cluster_id=cluster.id,
                image_path=image_path,
                image_type=image_type,
                ssh_key=args.ssh_key,
                static_network_config=static_network_config,
            )

    # Iso only, cluster will be up and iso downloaded but vm will not be created
    if not args.iso_only:
        try:
            nodes_flow(client, cluster_name, cluster, machine_net, kube_client,
                       cluster_deployment)
        finally:
            if not image_path or args.keep_iso:
                return
            log.info('deleting iso: %s', image_path)
            os.unlink(image_path)

    return cluster.id