예제 #1
0
def execute_day2_flow(cluster_id, args, day2_type_flag, has_ipv6):
    utils.recreate_folder(consts.IMAGE_FOLDER, force_recreate=False)

    client = ClientFactory.create_client(
        url=utils.get_assisted_service_url_by_args(args=args),
        offline_token=utils.get_env("OFFLINE_TOKEN"))

    cluster = client.cluster_get(cluster_id=cluster_id)
    cluster_name = cluster.name
    openshift_version = cluster.openshift_version
    api_vip_dnsname = "api." + cluster_name + "." + cluster.base_dns_domain
    api_vip_ip = cluster.api_vip
    terraform_cluster_dir_prefix = cluster_name
    if day2_type_flag == "ocp":
        terraform_cluster_dir_prefix = f"{consts.CLUSTER_PREFIX}-{consts.DEFAULT_NAMESPACE}"
    else:
        cluster_id = str(uuid.uuid4())
        copy_proxy_from_cluster = cluster
        cluster = client.create_day2_cluster(
            cluster_name + "-day2", cluster_id,
            **_day2_cluster_create_params(openshift_version, api_vip_dnsname))
        set_cluster_pull_secret(client, cluster_id, args.pull_secret)
        set_cluster_proxy(client, cluster_id, copy_proxy_from_cluster, args)

    config_etc_hosts(api_vip_ip, api_vip_dnsname)
    image_path = os.path.join(consts.IMAGE_FOLDER,
                              f'{args.namespace}-installer-image.iso')

    tf_folder = os.path.join(
        utils.get_tf_folder(terraform_cluster_dir_prefix, args.namespace),
        consts.Platforms.BARE_METAL)
    set_day2_tf_configuration(tf_folder, args.number_of_day2_workers,
                              api_vip_ip, api_vip_dnsname)

    static_network_config = None
    if args.with_static_network_config:
        static_network_config = static_network.generate_day2_static_network_data_from_tf(
            tf_folder, args.number_of_day2_workers)

    client.generate_and_download_image(
        cluster_id=cluster.id,
        image_path=image_path,
        ssh_key=args.ssh_key,
        static_network_config=static_network_config)

    day2_nodes_flow(
        client,
        terraform_cluster_dir_prefix,
        tf_folder,
        cluster,
        has_ipv6,
        args.number_of_day2_workers,
        api_vip_ip,
        api_vip_dnsname,
        args.install_cluster,
        day2_type_flag,
        args.with_static_network_config,
        cluster_name,
    )
예제 #2
0
def get_api_client(offline_token=None, **kwargs) -> InventoryClient:
    url = global_variables.remote_service_url
    offline_token = offline_token or global_variables.offline_token

    if not url:
        url = utils.get_local_assisted_service_url(
            global_variables.namespace, 'assisted-service',
            utils.get_env('DEPLOY_TARGET'))

    return ClientFactory.create_client(url, offline_token, **kwargs)
예제 #3
0
    def get_api_client(self, offline_token=None, **kwargs) -> InventoryClient:
        url = self.remote_service_url
        offline_token = offline_token or self.offline_token

        if not url:
            url = utils.get_local_assisted_service_url(
                self.namespace, "assisted-service", utils.get_env("DEPLOY_TARGET")
            )

        return ClientFactory.create_client(url, offline_token, **kwargs)
def try_get_cluster():
    if args.cluster_id:
        try:
            client = ClientFactory.create_client(
                url=utils.get_assisted_service_url_by_args(args=args),
                offline_token=utils.get_env("OFFLINE_TOKEN"))

            return client, client.cluster_get(cluster_id=args.cluster_id)

        except ApiException as e:
            log.warning(f"Can't retrieve cluster_id={args.cluster_id}, {e}")

    return None, None
예제 #5
0
    def __init__(self, inventory_url: str, offline_token: str, index: str, es_server: str, es_user:str, es_pass:str, backup_destination: str):

        self.inventory_url = inventory_url
        self.client = ClientFactory.create_client(url=self.inventory_url, offline_token=offline_token)

        self.index = index
        self.es = elasticsearch.Elasticsearch(es_server, http_auth=(es_user, es_pass))

        self.backup_destination = backup_destination
        if self.backup_destination and not os.path.exists(self.backup_destination):
            os.makedirs(self.backup_destination)

        self.cache_event_count_per_cluster = dict()
def execute_day1_flow():
    client, cluster = try_get_cluster()
    cluster_name = f'{args.cluster_name or consts.CLUSTER_PREFIX}-{args.namespace}'

    if cluster:
        args.base_dns_domain = cluster.base_dns_domain
        cluster_name = cluster.name

    elif args.managed_dns_domains:
        args.base_dns_domain = args.managed_dns_domains.split(":")[0]

    log.info('Cluster name: %s', cluster_name)

    machine_net = MachineNetwork(args.ipv4, args.ipv6, args.vm_network_cidr,
                                 args.vm_network_cidr6, args.ns_index)
    image_path = args.image or os.path.join(
        consts.IMAGE_FOLDER, f'{args.namespace}-installer-image.iso')
    set_tf_config(cluster_name)

    if not args.image:
        utils.recreate_folder(consts.IMAGE_FOLDER, force_recreate=False)
        if not client:
            client = ClientFactory.create_client(
                url=utils.get_assisted_service_url_by_args(args=args),
                offline_token=utils.get_env("OFFLINE_TOKEN"))

        if args.cluster_id:
            cluster = client.cluster_get(cluster_id=args.cluster_id)
        else:
            cluster = client.create_cluster(cluster_name,
                                            ssh_public_key=args.ssh_key,
                                            **_cluster_create_params(client))

        static_network_config = apply_static_network_config(
            cluster_name=cluster_name,
            kube_client=None,
        )

        client.generate_and_download_image(
            cluster_id=cluster.id,
            image_path=image_path,
            image_type=args.iso_image_type,
            ssh_key=args.ssh_key,
            static_network_config=static_network_config,
        )

    # Iso only, cluster will be up and iso downloaded but vm will not be created
    if not args.iso_only:
        run_nodes_flow(client, cluster_name, cluster, machine_net, image_path)

    return cluster.id if cluster else None
예제 #7
0
def try_to_delete_cluster(namespace, tfvars):
    """ Try to delete cluster if assisted-service is up and such cluster
        exists.
    """
    cluster_id = tfvars.get('cluster_inventory_id')
    if args.kube_api or not cluster_id:
        return

    args.namespace = namespace
    client = ClientFactory.create_client(
        url=utils.get_assisted_service_url_by_args(args=args, wait=False),
        offline_token=utils.get_env("OFFLINE_TOKEN"))

    client.delete_cluster(cluster_id=cluster_id)
def main():
    _verify_kube_download_folder(args.kubeconfig_path)
    log.info("Creating assisted service client")
    # if not cluster id is given, reads it from latest run
    tf = None
    if not args.cluster_id:
        cluster_name = f'{args.cluster_name or consts.CLUSTER_PREFIX}-{args.namespace}'
        tf_folder = utils.get_tf_folder(cluster_name, args.namespace)
        args.cluster_id = utils.get_tfvars(tf_folder).get(
            'cluster_inventory_id')
        tf = terraform_utils.TerraformUtils(working_dir=tf_folder)

    client = ClientFactory.create_client(
        url=utils.get_assisted_service_url_by_args(args=args, wait=False),
        offline_token=utils.get_env("OFFLINE_TOKEN"))

    run_install_flow(client=client,
                     cluster_id=args.cluster_id,
                     kubeconfig_path=args.kubeconfig_path,
                     pull_secret=args.pull_secret,
                     tf=tf)
예제 #9
0
def main():
    args = handle_arguments()

    if args.sosreport:
        gather_sosreport_data(output_dir=args.dest)

    client = ClientFactory.create_client(
        url=args.inventory_url,
        timeout=CONNECTION_TIMEOUT,
        offline_token=utils.get_env("OFFLINE_TOKEN"))
    if args.cluster_id:
        cluster = client.cluster_get(args.cluster_id)
        download_cluster_logs(client,
                              json.loads(
                                  json.dumps(cluster.to_dict(),
                                             sort_keys=True,
                                             default=str)),
                              args.dest,
                              args.must_gather,
                              args.update_by_events,
                              pull_secret=args.pull_secret)
    else:
        clusters = get_clusters(client, args.download_all)

        if not clusters:
            log.info('No clusters were found')
            return

        for cluster in clusters:
            if args.download_all or should_download_logs(cluster):
                download_cluster_logs(client,
                                      cluster,
                                      args.dest,
                                      args.must_gather,
                                      args.update_by_events,
                                      pull_secret=args.pull_secret)

        log.info(
            "Cluster installation statuses: %s",
            dict(Counter(cluster["status"] for cluster in clusters).items()))
예제 #10
0
    def __init__(self, inventory_url: str, type: str, offline_token: str):

        self.client = ClientFactory.create_client(url=inventory_url,
                                                  offline_token=offline_token)

        with open("discovery-infra/manage/manageable_options.yaml", "r") as f:
            options = yaml.load(f, Loader=yaml.FullLoader)

        manage_config = options.get(type, None)

        if not manage_config:
            raise ValueError(
                f"{type} is not a valid manageable_options option")

        days_back = manage_config["days_back"]
        measure_field = manage_config["measure_field"]

        clusters = self.get_clusters()
        clusters_to_process = list()

        for cluster in clusters:
            if is_older_then(cluster[measure_field], days_back):
                clusters_to_process.append(cluster["id"])

        len_of_clusters_to_prcess = len(clusters_to_process)

        log.info(f"Running {type} of {len_of_clusters_to_prcess} clusters")

        if not query_yes_no():
            return

        method = getattr(self.client, manage_config["method"])

        for cluster_id in clusters_to_process:
            try:
                method(cluster_id=cluster_id)
            except ApiException as e:
                log.warning(f"Can't process cluster_id={cluster_id}, {e}")
예제 #11
0
def create_kube_api_client(kubeconfig_path: Optional[str] = None) -> ApiClient:
    warnings.warn("create_kube_api_client is deprecated. Use ClientFactory.create_kube_api_client instead.",
                  DeprecationWarning)
    return ClientFactory.create_kube_api_client(kubeconfig_path or env_variables["installer_kubeconfig_path"])