def update_ovdc_k8s_provider_metadata(sysadmin_client: vcd_client.Client, ovdc_id, k8s_provider_data=None, k8s_provider=None): """Set the k8s provider metadata for given ovdc. :param pyvcloud.vcd.client.Client sysadmin_client: :param str ovdc_id: :param dict k8s_provider_data: k8s provider context details :param K8sProvider k8s_provider: :return: """ vcd_utils.raise_error_if_user_not_from_system_org(sysadmin_client) ovdc = vcd_utils.get_vdc(sysadmin_client, vdc_id=ovdc_id) ovdc_name = ovdc.get_resource().get('name') metadata = {K8S_PROVIDER_KEY: k8s_provider or K8sProvider.NONE} if k8s_provider != K8sProvider.PKS: LOGGER.debug(f"Remove existing metadata for ovdc:{ovdc_name}") _remove_metadata_from_ovdc(ovdc, PksCache.get_pks_keys()) LOGGER.debug(f"Updated metadata for {k8s_provider}:" f"{metadata}") else: k8s_provider_data.pop('username') k8s_provider_data.pop('secret') k8s_provider_data.pop('nsxt') metadata.update(k8s_provider_data) # set ovdc metadata into Vcd LOGGER.debug(f"On ovdc:{ovdc_name}, setting metadata:{metadata}") return ovdc.set_multiple_metadata(metadata, vcd_client.MetadataDomain.SYSTEM, vcd_client.MetadataVisibility.PRIVATE)
def _follow_task(op_ctx: ctx.OperationContext, task_href: str, ovdc_id: str): try: task = vcd_task.Task(client=op_ctx.sysadmin_client) session = op_ctx.sysadmin_client.get_vcloud_session() vdc = vcd_utils.get_vdc(op_ctx.sysadmin_client, vdc_id=ovdc_id) org = vcd_utils.get_org(op_ctx.sysadmin_client) user_name = session.get('user') user_href = org.get_user(user_name).get('href') msg = "Remove ovdc compute policy" # TODO(pyvcloud): Add method to retireve task from task href t = task.update(status=vcd_task.TaskStatus.RUNNING.value, namespace='vcloud.cse', operation=msg, operation_name=msg, details='', progress=None, owner_href=vdc.href, owner_name=vdc.name, owner_type=vcd_client.EntityType.VDC.value, user_href=user_href, user_name=user_name, org_href=op_ctx.user.org_href, task_href=task_href) op_ctx.sysadmin_client.get_task_monitor().wait_for_status(t) except Exception as err: logger.SERVER_LOGGER.error(f"{err}") finally: if op_ctx.sysadmin_client: op_ctx.end()
def update_ovdc(operation_context: ctx.OperationContext, ovdc_id: str, ovdc_spec: common_models.Ovdc) -> dict: # noqa: 501 """Update ovdc with the updated k8s runtimes list. :param ctx.OperationContext operation_context: context for the request :param common_models.Ovdc ovdc_spec: Ovdc object having the updated k8s runtime list :return: dictionary containing the task href for the update operation :rtype: dict """ # NOTE: For CSE 3.0, if `enable_tkg_plus` flag in config is set to false, # Prevent enable/disable of OVDC for TKG+ k8s runtime by throwing an # exception msg = "Updating OVDC placement policies" task = vcd_task.Task(operation_context.sysadmin_client) org = vcd_utils.get_org(operation_context.client) user_href = org.get_user(operation_context.user.name).get('href') vdc = vcd_utils.get_vdc(operation_context.sysadmin_client, vdc_id=ovdc_id, # noqa: E501 is_admin_operation=True) logger.SERVER_LOGGER.debug(msg) task_resource = task.update( status=vcd_client.TaskStatus.RUNNING.value, namespace='vcloud.cse', operation=msg, operation_name='OVDC Update', details='', progress=None, owner_href=vdc.href, owner_name=vdc.name, owner_type=vcd_client.EntityType.VDC.value, user_href=user_href, user_name=operation_context.user.name, org_href=operation_context.user.org_href, task_href=None, error_message=None, stack_trace=None) task_href = task_resource.get('href') operation_context.is_async = True # NOTE: Telemetry is currently handled in the async function as it is not # possible to know the operation (enable/disable) without comparing it to # current k8s runtimes. if ClusterEntityKind.TKG_PLUS.value in ovdc_spec.k8s_runtime and \ not server_utils.is_tkg_plus_enabled(): msg = "TKG+ is not enabled on CSE server. Please enable TKG+ in the " \ "server and try again." logger.SERVER_LOGGER.debug(msg) raise Exception(msg) policy_list = [RUNTIME_DISPLAY_NAME_TO_INTERNAL_NAME_MAP[p] for p in ovdc_spec.k8s_runtime] # noqa: E501 _update_ovdc_using_placement_policy_async(operation_context=operation_context, # noqa:E501 task=task, task_href=task_href, user_href=user_href, policy_list=policy_list, # noqa:E501 ovdc_id=ovdc_id, vdc=vdc, org_name=ovdc_spec.org_name, remove_cp_from_vms_on_disable=ovdc_spec.remove_cp_from_vms_on_disable) # noqa:E501 return {'task_href': task_href}
def _remove_compute_policy_from_vdc_async(self, *args, ovdc_id, compute_policy_href, task_resource, force=False): vdc = vcd_utils.get_vdc(self._sysadmin_client, vdc_id=ovdc_id, is_admin_operation=True) task_href = task_resource.get('href') user_href = task_resource.User.get('href') org_href = task_resource.Organization.get('href') task = Task(client=self._sysadmin_client) try: self.remove_compute_policy_from_vdc_sync( vdc=vdc, compute_policy_href=compute_policy_href, task_resource=task_resource, force=force) task.update( status=vcd_client.TaskStatus.SUCCESS.value, namespace='vcloud.cse', operation=f"Removed compute policy (href: " f"{compute_policy_href}) from org VDC '{vdc.name}'", # noqa: E501 operation_name='Updating VDC', details='', progress=None, owner_href=vdc.href, owner_name=vdc.name, owner_type=vcd_client.EntityType.VDC.value, user_href=user_href, user_name=self._session.get('user'), task_href=task_href, org_href=org_href, ) except Exception as err: msg = f'Failed to remove compute policy: {compute_policy_href} ' \ f'from the OVDC: {vdc.name}' logger.SERVER_LOGGER.error(msg) # noqa: E501 task.update(status=vcd_client.TaskStatus.ERROR.value, namespace='vcloud.cse', operation=msg, operation_name='Remove org VDC compute policy', details='', progress=None, owner_href=vdc.href, owner_name=vdc.name, owner_type=vcd_client.EntityType.VDC.value, user_href=user_href, user_name=self._session.get('user'), task_href=task_href, org_href=org_href, error_message=f"{err}", stack_trace='')
def list_ovdc_compute_policies(self, ovdc_name, org_name): """List an ovdc's compute policies. :param str ovdc_name: Name of org VDC to update :param str org_name: Name of org that @ovdc_name belongs to :rtype: dict """ ovdc = get_vdc(self.client, vdc_name=ovdc_name, org_name=org_name, is_admin_operation=True) ovdc_id = utils.extract_id(ovdc.get_resource().get('id')) return self._ovdc_api.list_ovdc_compute_policies(ovdc_id)
def get_ovdc_k8s_runtime_details( sysadmin_client: vcd_client.Client, ovdc_id=None, ovdc_name=None, org_name=None, cpm: Optional[ compute_policy_manager.ComputePolicyManager] = None, # noqa: E501 log_wire=False) -> common_models.Ovdc: """Get k8s runtime details for an ovdc. At least ovdc_id and ovdc_name or org_name and ovdc_name should be provided. Additional call to get ovdc details can be avoided by providing ovdc_id and ovdc_name. :param vcd_client.Client sysadmin_client: vcd sysadmin client :param str ovdc_id: :param str ovdc_name: :param str org_name: :param compute_policy_manager.ComputePolicyManager cpm: :param bool log_wire: :return: Ovdc object with k8s runtimes :rtype: common_models.Ovdc """ vcd_utils.raise_error_if_user_not_from_system_org(sysadmin_client) if not cpm: cpm = compute_policy_manager.ComputePolicyManager(sysadmin_client, log_wire=log_wire) if not (org_name and ovdc_name) and not ovdc_id: msg = "Unable to fetch OVDC k8 runtime details with the " \ "provided parameters" logger.SERVER_LOGGER.error(msg) raise Exception(msg) if not ovdc_id or not ovdc_name: # populate ovdc_id and ovdc_name ovdc = vcd_utils.get_vdc(client=sysadmin_client, vdc_id=ovdc_id, vdc_name=ovdc_name, org_name=org_name, is_admin_operation=True) ovdc_id = vcd_utils.extract_id(ovdc.get_resource().get('id')) ovdc_name = ovdc.get_resource().get('name') policies = [] for cse_policy in \ compute_policy_manager.list_cse_placement_policies_on_vdc(cpm, ovdc_id): # noqa: E501 policies.append(RUNTIME_INTERNAL_NAME_TO_DISPLAY_NAME_MAP[ cse_policy['display_name']]) # noqa: E501 return common_models.Ovdc(ovdc_name=ovdc_name, ovdc_id=ovdc_id, k8s_runtime=policies) # noqa: E501
def info_ovdc(self, ovdc_name, org_name): """Disable ovdc for k8s for the given container provider. :param str ovdc_name: Name of the org VDC to be enabled :param str org_name: Name of org that @ovdc_name belongs to :rtype: dict """ ovdc = get_vdc(self.client, vdc_name=ovdc_name, org_name=org_name, is_admin_operation=True) ovdc_id = utils.extract_id(ovdc.get_resource().get('id')) return self._pks_ovdc_api.get_ovdc(ovdc_id)
def get_ovdc_k8s_provider_metadata(sysadmin_client: vcd_client.Client, org_name=None, ovdc_name=None, ovdc_id=None, include_credentials=False, include_nsxt_info=False): """Get k8s provider metadata for an org VDC. :param sysadmin_client: :param org_name: :param ovdc_name: :param ovdc_id: :param include_credentials: :param include_nsxt_info: :return: Dictionary with k8s provider metadata """ vcd_utils.raise_error_if_user_not_from_system_org(sysadmin_client) ovdc = vcd_utils.get_vdc(client=sysadmin_client, vdc_name=ovdc_name, vdc_id=ovdc_id, org_name=org_name, is_admin_operation=True) all_metadata = pyvcd_utils.metadata_to_dict(ovdc.get_all_metadata()) k8s_provider = all_metadata.get(K8S_PROVIDER_KEY, K8sProvider.NONE) result = {K8S_PROVIDER_KEY: k8s_provider} if k8s_provider == K8sProvider.PKS: result.update({k: all_metadata[k] for k in PksCache.get_pks_keys()}) # noqa: E501 result[PKS_PLANS_KEY] = result[PKS_PLANS_KEY].split(',') # Get the credentials from PksCache if include_credentials or include_nsxt_info: pks_cache = server_utils.get_pks_cache() pvdc_info = pks_cache.get_pvdc_info( vcd_utils.get_pvdc_id(sysadmin_client, ovdc)) if include_credentials: # noqa: E501 TODO in case only ovdc_id is provided, we need a way to get org_name pks_info = pks_cache.get_pks_account_info( org_name, pvdc_info.vc) result.update(pks_info.credentials._asdict()) if include_nsxt_info: nsxt_info = pks_cache.get_nsxt_info(pvdc_info.vc) result['nsxt'] = nsxt_info return result
def _construct_pks_compute_profile_name(sysadmin_client: vcd_client.Client, vdc_id): """Construct pks compute profile name. :param pyvcloud.vcd.client.Client sysadmin_client: :param str vdc_id: UUID of the vdc in vcd :return: pks compute profile name :rtype: str """ vcd_utils.raise_error_if_user_not_from_system_org(sysadmin_client) vdc = vcd_utils.get_vdc(client=sysadmin_client, vdc_id=vdc_id) return f"cp--{vdc_id}--{vdc.name}"
def remove_vdc_compute_policy_from_vdc( self, # noqa: E501 ovdc_id, compute_policy_href, force=False): # noqa: E501 """Delete the compute policy from the specified vdc. :param str ovdc_id: id of the vdc to assign the policy :param compute_policy_href: policy href to remove :param bool force: If True, will set affected VMs' compute policy to 'System Default' :return: dictionary containing 'task_href'. """ vdc = vcd_utils.get_vdc(self._sysadmin_client, vdc_id=ovdc_id) # TODO the following org will be associated with 'System' org. # task created should be associated with the corresponding org of the # vdc object. org = vcd_utils.get_org(self._sysadmin_client) org.reload() user_name = self._session.get('user') user_href = org.get_user(user_name).get('href') task = Task(self._sysadmin_client) task_resource = task.update( status=vcd_client.TaskStatus.RUNNING.value, namespace='vcloud.cse', operation=f"Removing compute policy (href: {compute_policy_href})" f" from org VDC (vdc id: {ovdc_id})", operation_name='Remove org VDC compute policy', details='', progress=None, owner_href=vdc.href, owner_name=vdc.name, owner_type=vcd_client.EntityType.VDC.value, user_href=user_href, user_name=user_name, org_href=org.href) task_href = task_resource.get('href') self._remove_compute_policy_from_vdc_async( ovdc_id=ovdc_id, compute_policy_href=compute_policy_href, task_resource=task_resource, force=force) return {'task_href': task_href}
def add_compute_policy_to_vdc(self, vdc_id, compute_policy_href): """Add compute policy to the given vdc. :param str vdc_id: id of the vdc to assign the policy :param compute_policy_href: policy href that is created using cloudapi :return: an object containing VdcComputePolicyReferences XML element that refers to individual VdcComputePolicies. :rtype: lxml.objectify.ObjectifiedElement """ self._raise_error_if_not_supported() vdc = vcd_utils.get_vdc(self._sysadmin_client, vdc_id=vdc_id, is_admin_operation=True) return vdc.add_compute_policy(compute_policy_href)
def update_ovdc(self, ovdc_name, k8s_runtime, enable=True, org_name=None, remove_cp_from_vms_on_disable=False): """Enable/Disable ovdc for k8s for the given k8s provider. :param str ovdc_name: Name of org VDC to update :param List[str] k8s_runtime: k8s_runtime of the k8s provider to enable / disable for the ovdc :param bool enable: If set to True will enable the vdc for the paricular k8s_runtime else if set to False, K8 support on the vdc will be disabled. :param str org_name: Name of org that @ovdc_name belongs to :param bool remove_cp_from_vms_on_disable: If set to True and enable is False, then all the vms in the ovdc having policies for the k8s_runtime is deleted. :rtype: dict """ ovdc = get_vdc(self.client, vdc_name=ovdc_name, org_name=org_name, is_admin_operation=True) ovdc_id = utils.extract_id(ovdc.get_resource().get('id')) curr_ovdc = self._ovdc_api.get_ovdc(ovdc_id) runtimes = curr_ovdc.k8s_runtime for k in k8s_runtime: if enable: if k in runtimes: raise Exception( f"OVDC {ovdc_name} already enabled for {k8s_runtime}" ) # noqa: E501 runtimes.append(k) else: if k not in runtimes: raise Exception( f"OVDC {ovdc_name} already disabled for {k8s_runtime}" ) # noqa: E501 runtimes.remove(k) updated_ovdc = common_models.Ovdc( k8s_runtime=runtimes, org_name=org_name, remove_cp_from_vms_on_disable=remove_cp_from_vms_on_disable) return self._ovdc_api.update_ovdc(ovdc_id, updated_ovdc)
def construct_k8s_metadata_from_pks_cache(sysadmin_client: vcd_client.Client, ovdc_id, org_name, pks_plans, pks_cluster_domain, k8s_provider): vcd_utils.raise_error_if_user_not_from_system_org(sysadmin_client) ctr_prov_context = { K8S_PROVIDER_KEY: k8s_provider, } if k8s_provider == K8sProvider.PKS: if not server_utils.is_pks_enabled(): raise e.CseServerError('CSE is not configured to work with PKS.') ovdc = vcd_utils.get_vdc(client=sysadmin_client, vdc_id=ovdc_id, is_admin_operation=True) pks_cache = server_utils.get_pks_cache() pvdc_id = vcd_utils.get_pvdc_id(ovdc) pvdc_info = pks_cache.get_pvdc_info(pvdc_id) if not pvdc_info: LOGGER.debug(f"pvdc '{pvdc_id}' is not backed " f"by PKS-managed-vSphere resources") raise e.CseServerError(f"VDC '{ovdc.get_resource().get('name')}'" " is not eligible to provide resources" " for PKS clusters.") pks_account_info = pks_cache.get_pks_account_info( org_name, pvdc_info.vc) nsxt_info = pks_cache.get_nsxt_info(pvdc_info.vc) pks_compute_profile_name = _construct_pks_compute_profile_name( sysadmin_client, ovdc_id) ctr_prov_context = construct_pks_context( pks_account_info=pks_account_info, pvdc_info=pvdc_info, nsxt_info=nsxt_info, pks_compute_profile_name=pks_compute_profile_name, pks_plans=pks_plans, pks_cluster_domain=pks_cluster_domain, credentials_required=True) return ctr_prov_context
def update_ovdc_compute_policies(self, ovdc_name, org_name, compute_policy_name, action, remove_compute_policy_from_vms): """Update an ovdc's compute policies. :param str ovdc_name: Name of org VDC to update :param str org_name: Name of org that @ovdc_name belongs to :param str compute_policy_name: Name of compute policy to add or remove :param ComputePolicyAction action: :rtype: dict """ ovdc = get_vdc(self.client, vdc_name=ovdc_name, org_name=org_name, is_admin_operation=True) ovdc_id = utils.extract_id(ovdc.get_resource().get('id')) return self._ovdc_api.update_ovdc_compute_policies( ovdc_id, compute_policy_name, action, force_remove=remove_compute_policy_from_vms) # noqa: E501
def update_ovdc(self, enable, ovdc_name, org_name=None, pks_plan=None, pks_cluster_domain=None): """Enable/Disable ovdc for k8s for the given container provider. :param bool enable: If set to True will enable the vdc for the paricular k8s_provider else if set to False, K8 support on the vdc will be disabled. :param str ovdc_name: Name of org VDC to update :param str org_name: Name of org that @ovdc_name belongs to :param str pks_plan: PKS plan :param str pks_cluster_domain: Suffix of the domain name, which will be used to construct FQDN of the clusters. :rtype: dict """ ovdc = get_vdc(self.client, vdc_name=ovdc_name, org_name=org_name, is_admin_operation=True) ovdc_id = utils.extract_id(ovdc.get_resource().get('id')) k8s_provider = server_constants.K8sProvider.PKS if not enable: k8s_provider = server_constants.K8sProvider.NONE pks_plan = None pks_cluster_domain = None return self._pks_ovdc_api.update_ovdc_by_ovdc_id( ovdc_id, k8s_provider, ovdc_name=ovdc_name, org_name=org_name, pks_plan=pks_plan, pks_cluster_domain=pks_cluster_domain) # noqa: E501
def init_environment(config_filepath=BASE_CONFIG_FILEPATH): """Set up module variables according to config dict. :param str config_filepath: """ global AMQP_USERNAME, AMQP_PASSWORD, CLIENT, ORG_HREF, VDC_HREF, \ CATALOG_NAME, TEARDOWN_INSTALLATION, TEARDOWN_CLUSTERS, \ TEMPLATE_DEFINITIONS, TEST_ALL_TEMPLATES, SYS_ADMIN_LOGIN_CMD, \ ORG_ADMIN_LOGIN_CMD, K8_AUTHOR_LOGIN_CMD, USERNAME_TO_LOGIN_CMD, \ USERNAME_TO_CLUSTER_NAME, TEST_ORG_HREF, TEST_VDC_HREF, \ VCD_API_VERSION_TO_USE, TEMPLATE_COOKBOOK_VERSION config = testutils.yaml_to_dict(config_filepath) rtm = \ RemoteTemplateManager(config['broker']['remote_template_cookbook_url'], legacy_mode=config['service']['legacy_mode']) template_cookbook = rtm.get_filtered_remote_template_cookbook() TEMPLATE_COOKBOOK_VERSION = rtm.cookbook_version TEMPLATE_DEFINITIONS = template_cookbook['templates'] rtm.download_all_template_scripts(force_overwrite=True) CLIENT = Client(config['vcd']['host'], api_version=config['vcd']['api_version'], verify_ssl_certs=config['vcd']['verify']) credentials = BasicLoginCredentials(config['vcd']['username'], shared_constants.SYSTEM_ORG_NAME, config['vcd']['password']) CLIENT.set_credentials(credentials) VCD_API_VERSION_TO_USE = config['vcd']['api_version'] CATALOG_NAME = config['broker']['catalog'] AMQP_USERNAME = config['amqp']['username'] AMQP_PASSWORD = config['amqp']['password'] SYS_ADMIN_LOGIN_CMD = f"login {config['vcd']['host']} system " \ f"{config['vcd']['username']} " \ f"-iwp {config['vcd']['password']} " \ f"-V {VCD_API_VERSION_TO_USE}" ORG_ADMIN_LOGIN_CMD = f"login {config['vcd']['host']} " \ f"{TEST_ORG}" \ f" {ORG_ADMIN_NAME} -iwp {ORG_ADMIN_PASSWORD} " \ f"-V {VCD_API_VERSION_TO_USE}" K8_AUTHOR_LOGIN_CMD = f"login {config['vcd']['host']} " \ f"{TEST_ORG} " \ f"{K8_AUTHOR_NAME} -iwp {K8_AUTHOR_PASSWORD}" \ f" -V {VCD_API_VERSION_TO_USE}" USERNAME_TO_LOGIN_CMD = { 'sys_admin': SYS_ADMIN_LOGIN_CMD, 'org_admin': ORG_ADMIN_LOGIN_CMD, 'k8_author': K8_AUTHOR_LOGIN_CMD } USERNAME_TO_CLUSTER_NAME = { 'sys_admin': SYS_ADMIN_TEST_CLUSTER_NAME, 'org_admin': ORG_ADMIN_TEST_CLUSTER_NAME, 'k8_author': K8_AUTHOR_TEST_CLUSTER_NAME } # hrefs for Org and VDC that hosts the catalog org = pyvcloud_utils.get_org(CLIENT, org_name=config['broker']['org']) vdc = pyvcloud_utils.get_vdc(CLIENT, vdc_name=config['broker']['vdc'], org=org) ORG_HREF = org.href VDC_HREF = vdc.href # hrefs for Org and VDC that tests cluster operations test_org = pyvcloud_utils.get_org(CLIENT, org_name=TEST_ORG) test_vdc = pyvcloud_utils.get_vdc(CLIENT, vdc_name=TEST_VDC, org=test_org) TEST_ORG_HREF = test_org.href TEST_VDC_HREF = test_vdc.href create_k8_author_role(config['vcd'])
def init_rde_environment(config_filepath=BASE_CONFIG_FILEPATH, logger=NULL_LOGGER): # noqa: E501 """Set up module variables according to config dict. :param str config_filepath: :param logging.Logger logger: """ global CLIENT, ORG_HREF, VDC_HREF, \ CATALOG_NAME, TEARDOWN_INSTALLATION, TEARDOWN_CLUSTERS, \ TEST_ALL_TEMPLATES, SYS_ADMIN_LOGIN_CMD, \ CLUSTER_ADMIN_LOGIN_CMD, CLUSTER_AUTHOR_LOGIN_CMD, \ USERNAME_TO_LOGIN_CMD, USERNAME_TO_CLUSTER_NAME, TEST_ORG_HREF, \ TEST_VDC_HREF, VCD_API_VERSION_TO_USE, VCD_SITE logger.debug("Setting RDE environement") config = testutils.yaml_to_dict(config_filepath) logger.debug(f"Config file used: {config}") sysadmin_client = Client(config['vcd']['host'], verify_ssl_certs=config['vcd']['verify']) sysadmin_client.set_credentials( BasicLoginCredentials(config['vcd']['username'], shared_constants.SYSTEM_ORG_NAME, config['vcd']['password'])) vcd_supported_api_versions = \ set(sysadmin_client.get_supported_versions_list()) cse_supported_api_versions = set( shared_constants.SUPPORTED_VCD_API_VERSIONS) # noqa: E501 common_supported_api_versions = \ list(cse_supported_api_versions.intersection(vcd_supported_api_versions)) # noqa: E501 common_supported_api_versions.sort() max_api_version = get_max_api_version(common_supported_api_versions) CLIENT = Client(config['vcd']['host'], api_version=max_api_version, verify_ssl_certs=config['vcd']['verify']) credentials = BasicLoginCredentials(config['vcd']['username'], shared_constants.SYSTEM_ORG_NAME, config['vcd']['password']) CLIENT.set_credentials(credentials) VCD_API_VERSION_TO_USE = max_api_version logger.debug(f"Using VCD api version: {VCD_API_VERSION_TO_USE}") CATALOG_NAME = config['broker']['catalog'] VCD_SITE = f"https://{config['vcd']['host']}" SYS_ADMIN_LOGIN_CMD = f"login {config['vcd']['host']} system " \ f"{config['vcd']['username']} " \ f"-iwp {config['vcd']['password']} " \ f"-V {VCD_API_VERSION_TO_USE}" CLUSTER_ADMIN_LOGIN_CMD = f"login {config['vcd']['host']} " \ f"{TEST_ORG}" \ f" {CLUSTER_ADMIN_NAME} " \ f"-iwp {CLUSTER_ADMIN_PASSWORD} " \ f"-V {VCD_API_VERSION_TO_USE}" CLUSTER_AUTHOR_LOGIN_CMD = f"login {config['vcd']['host']} " \ f"{TEST_ORG}" \ f" {CLUSTER_AUTHOR_NAME} " \ f"-iwp {CLUSTER_AUTHOR_PASSWORD} " \ f"-V {VCD_API_VERSION_TO_USE}" USERNAME_TO_LOGIN_CMD = { SYS_ADMIN_NAME: SYS_ADMIN_LOGIN_CMD, CLUSTER_ADMIN_NAME: CLUSTER_ADMIN_LOGIN_CMD, CLUSTER_AUTHOR_NAME: CLUSTER_AUTHOR_LOGIN_CMD } # hrefs for Org and VDC that hosts the catalog org = pyvcloud_utils.get_org(CLIENT, org_name=config['broker']['org']) vdc = pyvcloud_utils.get_vdc(CLIENT, vdc_name=config['broker']['vdc'], org=org) ORG_HREF = org.href VDC_HREF = vdc.href logger.debug(f"Using template org {org.get_name()} with href {ORG_HREF}") logger.debug(f"Using template vdc {vdc.name} with href {VDC_HREF}") # hrefs for Org and VDC that tests cluster operations test_org = pyvcloud_utils.get_org(CLIENT, org_name=TEST_ORG) test_vdc = pyvcloud_utils.get_vdc(CLIENT, vdc_name=TEST_VDC, org=test_org) TEST_ORG_HREF = test_org.href TEST_VDC_HREF = test_vdc.href logger.debug(f"Using test org {test_org.get_name()} " f"with href {TEST_ORG_HREF}") logger.debug(f"Using test vdc {test_vdc.name} with href {TEST_VDC_HREF}") if SHOULD_INSTALL_PREREQUISITES: create_cluster_admin_role(config['vcd'], logger=logger) create_cluster_author_role(config['vcd'], logger=logger) # create and publish sizing class sc1 to TEST_VDC cpm = ComputePolicyManager(sysadmin_client=sysadmin_client, log_wire=True) created_policy = None try: created_policy = cpm.add_vdc_compute_policy( SIZING_CLASS_NAME, description=SIZING_CLASS_DESCRIPTION, cpu_count=2, memory_mb=2048) except HTTPError as err: if 'already exists' in err.response.text: logger.debug( f"Compute policy {SIZING_CLASS_NAME} already exists" ) # noqa: E501 created_policy = cpm.get_vdc_compute_policy(SIZING_CLASS_NAME) else: logger.error( f"Request to create sizing policy {SIZING_CLASS_NAME} failed." ) # noqa: E501 raise try: cpm.add_compute_policy_to_vdc( pyvcloud_utils.extract_id( test_vdc.get_resource_admin().get('id')), # noqa: E501 created_policy['id']) except Exception as err: logger.error( f"Error publishing sizing policy {SIZING_CLASS_NAME} to vdc {TEST_VDC}: {err}" ) # noqa: E501 create_cluster_admin_role(config['vcd'], logger=logger) create_cluster_author_role(config['vcd'], logger=logger)
def __init__(self, client, sys_admin_client, build_params, org=None, vdc=None, ssh_key=None, logger=NULL_LOGGER, msg_update_callback=NullPrinter(), log_wire=False): """. :param pyvcloud.vcd.Client client: :param pyvcloud.vcd.Client sys_admin_client: :param dict build_params: :param pyvcloud.vcd.org.Org org: specific org to use. Will override the org_name specified in build_params, can be used to save few vCD calls to create the Org object. :param pyvcloud.vcd.vdc.VDC vdc: specific vdc to use. Will override the vdc_name specified in build_params, can be used to save few vCD calls to create the Vdc object. :param str ssh_key: public ssh key to place into the template vApp(s). :param logging.Logger logger: logger object. :param utils.ConsoleMessagePrinter msg_update_callback: Callback object. """ self._is_valid = False self.client = client self.sys_admin_client = sys_admin_client self.ssh_key = ssh_key self.logger = logger self.msg_update_callback = msg_update_callback if self.client is None or self.sys_admin_client is None: return # validate and populate required fields self.template_name = build_params.get( TemplateBuildKey.TEMPLATE_NAME) # noqa: E501 self.template_revision = build_params.get( TemplateBuildKey.TEMPLATE_REVISION) # noqa: E501 self.ova_name = build_params.get( TemplateBuildKey.SOURCE_OVA_NAME) # noqa: E501 self.ova_href = build_params.get( TemplateBuildKey.SOURCE_OVA_HREF) # noqa: E501 self.ova_sha256 = build_params.get( TemplateBuildKey.SOURCE_OVA_SHA256) # noqa: E501 if org: self.org = org self.org_name = org.get_name() else: self.org_name = build_params.get( TemplateBuildKey.ORG_NAME) # noqa: E501 self.org = get_org(self.client, org_name=self.org_name) if vdc: self.vdc = vdc self.vdc.get_resource() # to make sure vdc.resource is populated self.vdc_name = vdc.name else: self.vdc_name = build_params.get( TemplateBuildKey.VDC_NAME) # noqa: E501 self.vdc = get_vdc(self.client, vdc_name=self.vdc_name, org=self.org) self.catalog_name = build_params.get( TemplateBuildKey.CATALOG_NAME) # noqa: E501 self.catalog_item_name = build_params.get( TemplateBuildKey.CATALOG_ITEM_NAME) # noqa: E501 self.catalog_item_description = \ build_params.get(TemplateBuildKey.CATALOG_ITEM_DESCRIPTION) # noqa: E501 self.temp_vapp_name = build_params.get( TemplateBuildKey.TEMP_VAPP_NAME) # noqa: E501 self.temp_vm_name = build_params.get( TemplateBuildKey.TEMP_VM_NAME) # noqa: E501 self.cpu = build_params.get(TemplateBuildKey.CPU) self.memory = build_params.get(TemplateBuildKey.MEMORY) self.network_name = build_params.get( TemplateBuildKey.NETWORK_NAME) # noqa: E501 self.ip_allocation_mode = build_params.get( TemplateBuildKey.IP_ALLOCATION_MODE) # noqa: E501 self.storage_profile = build_params.get( TemplateBuildKey.STORAGE_PROFILE) # noqa: E501 self.cse_placement_policy = build_params.get( TemplateBuildKey.CSE_PLACEMENT_POLICY) # noqa: E501 self.remote_cookbook_version = build_params.get( TemplateBuildKey.REMOTE_COOKBOOK_VERSION) # noqa: E501 self.log_wire = log_wire if self.template_name and self.template_revision and \ self.ova_name and self.ova_href and self.ova_sha256 and \ self.org and self.org_name and self.vdc and self.vdc_name and \ self.catalog_name and self.catalog_item_name and \ self.catalog_item_description and self.temp_vapp_name and \ self.temp_vm_name and self.cpu and self.memory and \ self.network_name and self.ip_allocation_mode and \ self.storage_profile: self._is_valid = True
def validate( self, cloudapi_client: CloudApiClient, sysadmin_client: Client, entity_id: str = None, entity: dict = None, operation: BehaviorOperation = BehaviorOperation.CREATE_CLUSTER, **kwargs) -> bool: """Validate the input request. This method performs 1. Basic validation of the entity by simply casting the input entity dict to the model class dictated by the api_version specified in the request. This is usually performed for the "create" operation. 2. Operation (create, update, delete) specific validation. - create: "entity" is the only required parameter. - update: both "entity" and "entity_id" are required parameters. - delete: "entity_id" is the only required parameter. - kubeconfig: "entity_id" is the only required parameter. :param cloudapi_client: cloud api client :param sysadmin_client: :param dict entity: dict form of the native entity to be validated :param entity_id: entity id to be validated :param BehaviorOperation operation: CSE operation key :return: is validation successful or failure :rtype: bool """ is_tkgm_cluster = kwargs.get('is_tkgm_cluster', False) if not entity_id and not entity: raise ValueError( 'Either entity_id or entity is required to validate.' ) # noqa: E501 entity_svc = DefEntityService(cloudapi_client=cloudapi_client) api_version: str = cloudapi_client.get_api_version() rde_version_introduced_at_api_version: str = rde_utils.get_rde_version_introduced_at_api_version( api_version) # noqa: E501 # TODO Reject the request if payload_version does not match with # either rde_in_use (or) rde_version_introduced_at_api_version # Cast the entity to the model class based on the user-specified # api_version. This can be considered as a basic request validation. # Any operation specific validation is handled further down native_entity_class: AbstractNativeEntity = rde_factory. \ get_rde_model(rde_version_introduced_at_api_version) input_entity = None if entity: try: input_entity: AbstractNativeEntity = native_entity_class.from_dict( entity) # noqa: E501 except Exception as err: msg = f"Failed to parse request body: {err}" raise BadRequestError(msg) # Need to ensure that sizing class along with cpu/memory is not # present in the request if isinstance(input_entity, rde_2_1_0.NativeEntity): # cpu and mem are properties of only rde 2.0.0 bad_request_msg = "" if input_entity.spec.topology.workers.sizing_class and \ (input_entity.spec.topology.workers.cpu or input_entity.spec.topology.workers.memory): # noqa: E501 bad_request_msg = "Cannot specify both sizing class and cpu/memory for Workers nodes." # noqa: E501 if input_entity.spec.topology.control_plane.sizing_class and ( input_entity.spec.topology.control_plane.cpu or input_entity.spec.topology.control_plane.memory ): # noqa: E501 bad_request_msg = "Cannot specify both sizing class and cpu/memory for Control Plane nodes." # noqa: E501 if bad_request_msg: raise BadRequestError(bad_request_msg) # Return True if the operation is not specified. if operation == BehaviorOperation.CREATE_CLUSTER: return True # TODO: validators for rest of the CSE operations in V36 will be # implemented as and when v36/def_cluster_handler.py get other handler # functions if operation == BehaviorOperation.UPDATE_CLUSTER: if not entity_id or not entity: raise ValueError( 'Both entity_id and entity are required to validate the Update operation.' ) # noqa: E501 current_entity: AbstractNativeEntity = entity_svc.get_entity( entity_id).entity # noqa: E501 input_entity_spec: rde_2_1_0.ClusterSpec = input_entity.spec current_entity_status: rde_2_1_0.Status = current_entity.status is_tkgm_with_default_sizing_in_control_plane = False is_tkgm_with_default_sizing_in_workers = False if is_tkgm_cluster: # NOTE: Since for TKGm cluster, if cluster is created without # a sizing class, default sizing class is assigned by VCD, # If we find the default sizing policy in the status section, # validate cpu/memory and sizing policy. # Also note that at this point in code, we are sure that only # one of sizing class or cpu/mem will be associated with # control plane and workers. vdc: VDC = get_vdc( sysadmin_client, vdc_name=current_entity_status.cloud_properties. virtual_data_center_name, # noqa: E501 org_name=current_entity_status.cloud_properties.org_name) vdc_resource = vdc.get_resource_admin() default_cp_name = vdc_resource.DefaultComputePolicy.get('name') control_plane_sizing_class = current_entity_status.nodes.control_plane.sizing_class # noqa: E501 is_tkgm_with_default_sizing_in_control_plane = \ (control_plane_sizing_class == default_cp_name) is_tkgm_with_default_sizing_in_workers = \ (len(current_entity_status.nodes.workers) > 0 and current_entity_status.nodes.workers[0].sizing_class == default_cp_name) # noqa: E501 current_entity_spec = \ rde_utils.construct_cluster_spec_from_entity_status( current_entity_status, rde_constants.RDEVersion.RDE_2_1_0.value, is_tkgm_with_default_sizing_in_control_plane=is_tkgm_with_default_sizing_in_control_plane, # noqa: E501 is_tkgm_with_default_sizing_in_workers=is_tkgm_with_default_sizing_in_workers) # noqa: E501 return validate_cluster_update_request_and_check_cluster_upgrade( input_entity_spec, current_entity_spec, is_tkgm_cluster) # TODO check the reason why there was an unreachable raise statement raise NotImplementedError(f"Validator for {operation.name} not found")
def init_rde_environment(config_filepath=BASE_CONFIG_FILEPATH, logger=NULL_LOGGER): # noqa: E501 """Set up module variables according to config dict. :param str config_filepath: """ global CLIENT, ORG_HREF, VDC_HREF, \ CATALOG_NAME, TEARDOWN_INSTALLATION, TEARDOWN_CLUSTERS, \ TEMPLATE_DEFINITIONS, TEST_ALL_TEMPLATES, SYS_ADMIN_LOGIN_CMD, \ CLUSTER_ADMIN_LOGIN_CMD, CLUSTER_AUTHOR_LOGIN_CMD, \ USERNAME_TO_LOGIN_CMD, USERNAME_TO_CLUSTER_NAME, TEST_ORG_HREF, \ TEST_VDC_HREF, VCD_API_VERSION_TO_USE logger.debug("Setting RDE environement") config = testutils.yaml_to_dict(config_filepath) logger.debug(f"Config file used: {config}") # download all remote template scripts rtm = RemoteTemplateManager( config['broker']['remote_template_cookbook_url'], legacy_mode=config['service']['legacy_mode']) template_cookbook = rtm.get_filtered_remote_template_cookbook() TEMPLATE_DEFINITIONS = template_cookbook['templates'] rtm.download_all_template_scripts(force_overwrite=True) sysadmin_client = Client(config['vcd']['host'], verify_ssl_certs=config['vcd']['verify']) sysadmin_client.set_credentials( BasicLoginCredentials(config['vcd']['username'], shared_constants.SYSTEM_ORG_NAME, config['vcd']['password'])) vcd_supported_api_versions = \ set(sysadmin_client.get_supported_versions_list()) cse_supported_api_versions = set( shared_constants.SUPPORTED_VCD_API_VERSIONS) # noqa: E501 common_supported_api_versions = \ list(cse_supported_api_versions.intersection(vcd_supported_api_versions)) # noqa: E501 common_supported_api_versions.sort() max_api_version = get_max_api_version(common_supported_api_versions) CLIENT = Client(config['vcd']['host'], api_version=max_api_version, verify_ssl_certs=config['vcd']['verify']) credentials = BasicLoginCredentials(config['vcd']['username'], shared_constants.SYSTEM_ORG_NAME, config['vcd']['password']) CLIENT.set_credentials(credentials) VCD_API_VERSION_TO_USE = max_api_version logger.debug(f"Using VCD api version: {VCD_API_VERSION_TO_USE}") CATALOG_NAME = config['broker']['catalog'] SYS_ADMIN_LOGIN_CMD = f"login {config['vcd']['host']} system " \ f"{config['vcd']['username']} " \ f"-iwp {config['vcd']['password']} " \ f"-V {VCD_API_VERSION_TO_USE}" CLUSTER_ADMIN_LOGIN_CMD = f"login {config['vcd']['host']} " \ f"{TEST_ORG}" \ f" {CLUSTER_ADMIN_NAME} " \ f"-iwp {CLUSTER_ADMIN_PASSWORD} " \ f"-V {VCD_API_VERSION_TO_USE}" CLUSTER_AUTHOR_LOGIN_CMD = f"login {config['vcd']['host']} " \ f"{TEST_ORG}" \ f" {CLUSTER_AUTHOR_NAME} " \ f"-iwp {CLUSTER_AUTHOR_PASSWORD} " \ f"-V {VCD_API_VERSION_TO_USE}" USERNAME_TO_LOGIN_CMD = { SYS_ADMIN_NAME: SYS_ADMIN_LOGIN_CMD, CLUSTER_ADMIN_NAME: CLUSTER_ADMIN_LOGIN_CMD, CLUSTER_AUTHOR_NAME: CLUSTER_AUTHOR_LOGIN_CMD } # hrefs for Org and VDC that hosts the catalog org = pyvcloud_utils.get_org(CLIENT, org_name=config['broker']['org']) vdc = pyvcloud_utils.get_vdc(CLIENT, vdc_name=config['broker']['vdc'], org=org) ORG_HREF = org.href VDC_HREF = vdc.href logger.debug(f"Using template org {org.get_name()} with href {ORG_HREF}") logger.debug(f"Using template vdc {vdc.name} with href {VDC_HREF}") # hrefs for Org and VDC that tests cluster operations test_org = pyvcloud_utils.get_org(CLIENT, org_name=TEST_ORG) test_vdc = pyvcloud_utils.get_vdc(CLIENT, vdc_name=TEST_VDC, org=test_org) TEST_ORG_HREF = test_org.href TEST_VDC_HREF = test_vdc.href logger.debug(f"Using test org {test_org.get_name()} " f"with href {TEST_ORG_HREF}") logger.debug(f"Using test vdc {test_vdc.name} with href {TEST_VDC_HREF}") create_cluster_admin_role(config['vcd'], logger=logger) create_cluster_author_role(config['vcd'], logger=logger)