def main(): args = handle_arguments() if args.sosreport: gather_sosreport_data(output_dir=args.dest) client = ClientFactory.create_client( url=args.inventory_url, timeout=CONNECTION_TIMEOUT, offline_token=get_env("OFFLINE_TOKEN") ) if args.cluster_id: cluster = client.cluster_get(args.cluster_id) download_cluster_logs( client, json.loads(json.dumps(cluster.to_dict(), sort_keys=True, default=str)), args.dest, args.must_gather, args.update_by_events, pull_secret=args.pull_secret, ) else: clusters = get_clusters(client, args.download_all) if not clusters: log.info("No clusters were found") return for cluster in clusters: if args.download_all or should_download_logs(cluster): download_cluster_logs( client, cluster, args.dest, args.must_gather, args.update_by_events, pull_secret=args.pull_secret ) log.info("Cluster installation statuses: %s", dict(Counter(cluster["status"] for cluster in clusters).items()))
def __init__(self, env_var: str, default_prefix: str, prefix: str = None, suffix: str = None): self._default_prefix = default_prefix self.prefix = prefix if prefix is not None else utils.get_env( env_var, default_prefix) self.suffix = suffix if suffix is not None else get_name_suffix()
def try_get_cluster(): if args.cluster_id: try: client = ClientFactory.create_client(url=utils.get_assisted_service_url_by_args(args=args), offline_token=utils.get_env("OFFLINE_TOKEN")) return client, client.cluster_get(cluster_id=args.cluster_id) except ApiException as e: log.warning(f"Can't retrieve cluster_id={args.cluster_id}, {e}") return None, None
def get_ocp_cluster(args): if not args.cluster_id: cluster_name = f'{args.cluster_name or consts.CLUSTER_PREFIX}-{args.namespace}' tf_folder = utils.get_tf_folder(cluster_name, args.namespace) args.cluster_id = utils.get_tfvars(tf_folder).get( 'cluster_inventory_id') client = ClientFactory.create_client( url=utils.get_assisted_service_url_by_args(args=args), offline_token=utils.get_env("OFFLINE_TOKEN")) return client.cluster_get(cluster_id=args.cluster_id)
def execute_day1_flow(): client, cluster = try_get_cluster() cluster_name = f'{args.cluster_name or consts.CLUSTER_PREFIX}-{args.namespace}' if cluster: args.base_dns_domain = cluster.base_dns_domain cluster_name = cluster.name elif args.managed_dns_domains: args.base_dns_domain = args.managed_dns_domains.split(":")[0] log.info('Cluster name: %s', cluster_name) machine_net = MachineNetwork(args.ipv4, args.ipv6, args.vm_network_cidr, args.vm_network_cidr6, args.ns_index) image_path = args.image or os.path.join( consts.IMAGE_FOLDER, f'{args.namespace}-installer-image.iso' ) set_tf_config(cluster_name) if not args.image: utils.recreate_folder(consts.IMAGE_FOLDER, force_recreate=False) if not client: client = ClientFactory.create_client(url=utils.get_assisted_service_url_by_args(args=args), offline_token=utils.get_env("OFFLINE_TOKEN")) if args.cluster_id: cluster = client.cluster_get(cluster_id=args.cluster_id) else: cluster = client.create_cluster(cluster_name, ssh_public_key=args.ssh_key, **_cluster_create_params(client)) static_network_config = apply_static_network_config( cluster_name=cluster_name, kube_client=None, ) client.generate_and_download_image( cluster_id=cluster.id, image_path=image_path, image_type=args.iso_image_type, ssh_key=args.ssh_key, static_network_config=static_network_config, ) # Iso only, cluster will be up and iso downloaded but vm will not be created if not args.iso_only: run_nodes_flow(client, cluster_name, cluster, machine_net, image_path) return cluster.id if cluster else None
def main(): _verify_kube_download_folder(args.kubeconfig_path) log.info("Creating assisted service client") # if not cluster id is given, reads it from latest run tf = None if not args.cluster_id: cluster_name = f'{args.cluster_name or consts.CLUSTER_PREFIX}-{args.namespace}' tf_folder = utils.get_tf_folder(cluster_name, args.namespace) args.cluster_id = utils.get_tfvars(tf_folder).get('cluster_inventory_id') tf = terraform_utils.TerraformUtils(working_dir=tf_folder) client = ClientFactory.create_client(url=utils.get_assisted_service_url_by_args(args=args, wait=False), offline_token=utils.get_env("OFFLINE_TOKEN")) run_install_flow( client=client, cluster_id=args.cluster_id, kubeconfig_path=args.kubeconfig_path, pull_secret=args.pull_secret, tf=tf )
class _EnvVariablesDefaults(ABC): ssh_public_key: str = get_env("SSH_PUB_KEY") remote_service_url: str = get_env("REMOTE_SERVICE_URL") pull_secret: str = get_env("PULL_SECRET") offline_token: str = get_env("OFFLINE_TOKEN") openshift_version: str = "" base_dns_domain: str = get_env("BASE_DOMAIN", env_defaults.DEFAULT_BASE_DNS_DOMAIN) masters_count: int = int( get_env("MASTERS_COUNT", get_env("NUM_MASTERS", env_defaults.DEFAULT_NUMBER_OF_MASTERS))) workers_count: int = int( get_env("WORKERS_COUNT", get_env("NUM_WORKERS", env_defaults.DEFAULT_WORKERS_COUNT))) nodes_count: int = masters_count + workers_count num_day2_workers: int = int( get_env("NUM_DAY2_WORKERS", env_defaults.DEFAULT_DAY2_WORKERS_COUNT)) vip_dhcp_allocation: bool = bool( strtobool( get_env("VIP_DHCP_ALLOCATION", str(env_defaults.DEFAULT_VIP_DHCP_ALLOCATION)))) worker_memory: int = int( get_env("WORKER_MEMORY", resources.DEFAULT_WORKER_MEMORY)) master_memory: int = int( get_env("MASTER_MEMORY", resources.DEFAULT_MASTER_MEMORY)) network_mtu: int = int(get_env("NETWORK_MTU", resources.DEFAULT_MTU)) worker_disk: int = int( get_env("WORKER_DISK", resources.DEFAULT_WORKER_DISK)) master_disk: int = int( get_env("MASTER_DISK", resources.DEFAULT_MASTER_DISK)) master_disk_count: int = int( get_env("MASTER_DISK_COUNT", resources.DEFAULT_DISK_COUNT)) worker_disk_count: int = int( get_env("WORKER_DISK_COUNT", resources.DEFAULT_DISK_COUNT)) storage_pool_path: str = get_env("STORAGE_POOL_PATH", env_defaults.DEFAULT_STORAGE_POOL_PATH) private_ssh_key_path: Path = Path( get_env("PRIVATE_KEY_PATH", env_defaults.DEFAULT_SSH_PRIVATE_KEY_PATH)) installer_kubeconfig_path: str = get_env( "INSTALLER_KUBECONFIG", env_defaults.DEFAULT_INSTALLER_KUBECONFIG) log_folder: str = get_env("LOG_FOLDER", env_defaults.DEFAULT_LOG_FOLDER) is_static_ip: bool = bool( strtobool( get_env("STATIC_IPS", default=str(env_defaults.DEFAULT_STATIC_IPS)))) iso_image_type: str = get_env("ISO_IMAGE_TYPE", env_defaults.DEFAULT_IMAGE_TYPE) worker_vcpu: str = get_env("WORKER_CPU", resources.DEFAULT_WORKER_CPU) master_vcpu: str = get_env("MASTER_CPU", resources.DEFAULT_MASTER_CPU) test_teardown: bool = bool( strtobool( get_env("TEST_TEARDOWN", str(env_defaults.DEFAULT_TEST_TEARDOWN)))) namespace: str = get_env("NAMESPACE", consts.DEFAULT_NAMESPACE) spoke_namespace: str = get_env("SPOKE_NAMESPACE", consts.DEFAULT_SPOKE_NAMESPACE) olm_operators: List[str] = field(default_factory=list) platform: str = get_env("PLATFORM", env_defaults.DEFAULT_PLATFORM) user_managed_networking: bool = env_defaults.DEFAULT_USER_MANAGED_NETWORKING high_availability_mode: str = env_defaults.DEFAULT_HIGH_AVAILABILITY_MODE download_image: bool = bool( strtobool( get_env("DOWNLOAD_IMAGE", str(env_defaults.DEFAULT_DOWNLOAD_IMAGE)))) verify_download_iso_ssl: bool = bool( strtobool( get_env("VERIFY_DOWNLOAD_ISO_SSL", str(env_defaults.DEFAULT_VERIFY_SSL)))) is_ipv4: bool = bool( strtobool(get_env("IPv4", str(env_defaults.DEFAULT_IS_IPV4)))) is_ipv6: bool = bool( strtobool(get_env("IPv6", str(env_defaults.DEFAULT_IS_IPV6)))) cluster_id: str = get_env("CLUSTER_ID") additional_ntp_source: str = get_env( "ADDITIONAL_NTP_SOURCE", env_defaults.DEFAULT_ADDITIONAL_NTP_SOURCE) network_name: str = get_env("NETWORK_NAME", env_defaults.DEFAULT_NETWORK_NAME) bootstrap_in_place: bool = bool( strtobool( get_env("BOOTSTRAP_IN_PLACE", str(env_defaults.DEFAULT_BOOTSTRAP_IN_PLACE)))) single_node_ip: str = get_env("SINGLE_NODE_IP", env_defaults.DEFAULT_SINGLE_NODE_IP) worker_cpu_mode: str = get_env("WORKER_CPU_MODE", env_defaults.DEFAULT_TF_CPU_MODE) master_cpu_mode: str = get_env("MASTER_CPU_MODE", env_defaults.DEFAULT_TF_CPU_MODE) iso_download_path: str = get_env( "ISO_DOWNLOAD_PATH", get_env("ISO")) # todo replace ISO env var->ISO_DOWNLOAD_PATH hyperthreading: str = get_env("HYPERTHREADING") network_type: str = get_env("NETWORK_TYPE", env_defaults.DEFAULT_NETWORK_TYPE) disk_encryption_mode: str = get_env( "DISK_ENCRYPTION_MODE", env_defaults.DEFAULT_DISK_ENCRYPTION_MODE) disk_encryption_roles: str = get_env( "DISK_ENCRYPTION_ROLES", env_defaults.DEFAULT_DISK_ENCRYPTION_ROLES) is_kube_api: bool = bool( strtobool(get_env("KUBE_API", str(env_defaults.DEFAULT_IS_KUBE_API)))) vsphere_cluster: str = get_env("VSPHERE_CLUSTER") vsphere_username: str = get_env("VSPHERE_USERNAME") vsphere_password: str = get_env("VSPHERE_PASSWORD") vsphere_network: str = get_env("VSPHERE_NETWORK") vsphere_vcenter: str = get_env("VSPHERE_VCENTER") vsphere_datacenter: str = get_env("VSPHERE_DATACENTER") vsphere_datastore: str = get_env("VSPHERE_DATASTORE") __instance: ClassVar = None def __new__(cls, *args, **kwargs): """Prevent creating another env_var instance""" if isinstance(cls.__instance, cls): raise Exception( "Can't initialized more then one global configuration object") cls.__instance = object.__new__(cls, *args, **kwargs) return cls.__instance def __post_init__(self): self._set("olm_operators", operators_utils.parse_olm_operators_from_env()) def _set(self, key: str, value: Any): if not hasattr(self, key): raise AttributeError(f"Invalid key {key}") super().__setattr__(key, value)