def update_config(config, operators=None): if operators is None: operators = operators_utils.parse_olm_operators_from_env() config.worker_memory = operators_utils.resource_param( config.worker_memory, consts.OperatorResource.WORKER_MEMORY_KEY, operators) config.master_memory = operators_utils.resource_param( config.master_memory, consts.OperatorResource.MASTER_MEMORY_KEY, operators) config.worker_cpu = operators_utils.resource_param( config.worker_vcpu, consts.OperatorResource.WORKER_VCPU_KEY, operators) config.master_cpu = operators_utils.resource_param( config.master_vcpu, consts.OperatorResource.MASTER_VCPU_KEY, operators) config.workers_count = operators_utils.resource_param( config.workers_count, consts.OperatorResource.WORKER_COUNT_KEY, operators) config.worker_disk = operators_utils.resource_param( config.worker_disk, consts.OperatorResource.WORKER_DISK_KEY, operators) config.master_disk = operators_utils.resource_param( config.master_disk, consts.OperatorResource.MASTER_DISK_KEY, operators) config.master_disk_count = operators_utils.resource_param( config.master_disk_count, consts.OperatorResource.MASTER_DISK_COUNT_KEY, operators) config.worker_disk_count = operators_utils.resource_param( config.worker_disk_count, consts.OperatorResource.WORKER_DISK_COUNT_KEY, operators) return config
def update_config(tf_config: TerraformConfig = TerraformConfig(), cluster_config: ClusterConfig = ClusterConfig(), operators=None): if operators is None: operators = parse_olm_operators_from_env() tf_config.worker_memory = resource_param(tf_config.worker_memory, OperatorResource.WORKER_MEMORY_KEY, operators) tf_config.master_memory = resource_param(tf_config.master_memory, OperatorResource.MASTER_MEMORY_KEY, operators) tf_config.worker_vcpu = resource_param(tf_config.worker_vcpu, OperatorResource.WORKER_VCPU_KEY, operators) tf_config.master_vcpu = resource_param(tf_config.master_vcpu, OperatorResource.MASTER_VCPU_KEY, operators) tf_config.workers_count = resource_param(tf_config.workers_count, OperatorResource.WORKER_COUNT_KEY, operators) tf_config.worker_disk = resource_param(tf_config.worker_disk, OperatorResource.WORKER_DISK_KEY, operators) tf_config.master_disk = resource_param(tf_config.master_disk, OperatorResource.MASTER_DISK_KEY, operators) tf_config.master_disk_count = resource_param(tf_config.master_disk_count, OperatorResource.MASTER_DISK_COUNT_KEY, operators) tf_config.worker_disk_count = resource_param(tf_config.worker_disk_count, OperatorResource.WORKER_DISK_COUNT_KEY, operators) cluster_config.workers_count = resource_param(cluster_config.workers_count, OperatorResource.WORKER_COUNT_KEY, operators) cluster_config.nodes_count = cluster_config.masters_count + cluster_config.workers_count cluster_config.olm_operators = [operators]
def _cluster_create_params(client: assisted_service_api.InventoryClient): ipv4 = args.ipv4 and args.ipv4.lower() in MachineNetwork.YES_VALUES ipv6 = args.ipv6 and args.ipv6.lower() in MachineNetwork.YES_VALUES ntp_source = _get_host_ip_from_cidr( args.vm_network_cidr6 if ipv6 and not ipv4 else args.vm_network_cidr) user_managed_networking = is_user_managed_networking() http_proxy, https_proxy, no_proxy = _get_http_proxy_params(ipv4=ipv4, ipv6=ipv6) params = { "openshift_version": utils.get_openshift_version(allow_default=True, client=client), "base_dns_domain": args.base_dns_domain, "cluster_network_cidr": args.cluster_network if ipv4 else args.cluster_network6, "cluster_network_host_prefix": args.host_prefix if ipv4 else args.host_prefix6, "service_network_cidr": args.service_network if ipv4 else args.service_network6, "pull_secret": args.pull_secret, "http_proxy": http_proxy, "https_proxy": https_proxy, "no_proxy": no_proxy, "vip_dhcp_allocation": bool(args.vip_dhcp_allocation) and not user_managed_networking, "additional_ntp_source": ntp_source, "user_managed_networking": user_managed_networking, "high_availability_mode": consts.HighAvailabilityMode.NONE if args.master_count == 1 else consts.HighAvailabilityMode.FULL, "hyperthreading": args.hyperthreading, "olm_operators": [{ 'name': name } for name in operators_utils.parse_olm_operators_from_env()] } return params
def _create_node_details(cluster_name): operators = operators_utils.parse_olm_operators_from_env() return { "libvirt_worker_memory": operators_utils.resource_param(args.worker_memory, consts.OperatorResource.WORKER_MEMORY_KEY, operators), "libvirt_master_memory": operators_utils.resource_param( args.master_memory if not args.master_count == 1 else args.master_memory * 2, consts.OperatorResource.MASTER_MEMORY_KEY, operators), "libvirt_worker_vcpu": operators_utils.resource_param(args.worker_cpu, consts.OperatorResource.WORKER_VCPU_KEY, operators), "libvirt_master_vcpu": operators_utils.resource_param(args.master_cpu if not args.master_count == 1 else args.master_cpu * 2, consts.OperatorResource.MASTER_VCPU_KEY, operators), "worker_count": operators_utils.resource_param(args.number_of_workers, consts.OperatorResource.WORKER_COUNT_KEY, operators), "cluster_name": cluster_name, "cluster_domain": args.base_dns_domain, "libvirt_network_name": consts.TEST_NETWORK + args.namespace, "libvirt_network_mtu": args.network_mtu, "libvirt_network_if": args.network_bridge, "libvirt_worker_disk": operators_utils.resource_param(args.worker_disk, consts.OperatorResource.WORKER_DISK_KEY, operators), "libvirt_master_disk": operators_utils.resource_param(args.master_disk, consts.OperatorResource.MASTER_DISK_KEY, operators), "libvirt_secondary_network_name": consts.TEST_SECONDARY_NETWORK + args.namespace, "libvirt_secondary_network_if": f's{args.network_bridge}', "bootstrap_in_place": args.master_count == 1, "master_disk_count": operators_utils.resource_param(args.master_disk_count, consts.OperatorResource.MASTER_DISK_COUNT_KEY, operators), "worker_disk_count": operators_utils.resource_param(args.worker_disk_count, consts.OperatorResource.WORKER_DISK_COUNT_KEY, operators), }
def __post_init__(self): self._set("olm_operators", operators_utils.parse_olm_operators_from_env())
"cluster_cidr": utils.get_env('CLUSTER_CIDR', '10.128.0.0/14'), "host_prefix": int(utils.get_env('HOST_PREFIX', '23')), "iso_image_type": utils.get_env('ISO_IMAGE_TYPE', consts.ImageType.FULL_ISO), "worker_vcpu": utils.get_env('WORKER_CPU', consts.WORKER_CPU), "master_vcpu": utils.get_env('MASTER_CPU', consts.MASTER_CPU), "test_teardown": bool(util.strtobool(utils.get_env('TEST_TEARDOWN', 'true'))), "namespace": utils.get_env('NAMESPACE', consts.DEFAULT_NAMESPACE), "olm_operators": operators_utils.parse_olm_operators_from_env(), "platform": utils.get_env("PLATFORM", consts.Platforms.BARE_METAL), "hyperthreading": utils.get_env("HYPERTHREADING"), "user_managed_networking": False, "high_availability_mode": consts.HighAvailabilityMode.FULL, "download_image": bool(util.strtobool(utils.get_env("DOWNLOAD_IMAGE", default="True"))), "is_ipv6": bool(util.strtobool(utils.get_env("IPv6", default="False"))), "cluster_id": utils.get_env("CLUSTER_ID"), "additional_ntp_source":