def get_ovdc(operation_context: ctx.OperationContext, ovdc_id: str) -> dict: """Get ovdc info for a particular ovdc. :param ctx.OperationContext operation_context: context for the request :param str ovdc_id: ID of the ovdc :return: dictionary containing the ovdc information :rtype: dict """ # NOTE: For CSE 3.0, if `enable_tkg_plus` flag in config is set to false, # Prevent showing information about TKG+ by skipping TKG+ from the result. cse_params = { RequestKey.OVDC_ID: ovdc_id, PayloadKey.SOURCE_DESCRIPTION: thread_local_data.get_thread_local_data( ThreadLocalData.USER_AGENT) # noqa: E501 } telemetry_handler.record_user_action_details( cse_operation=CseOperation.OVDC_INFO, # noqa: E501 cse_params=cse_params) config = server_utils.get_server_runtime_config() log_wire = utils.str_to_bool(config.get('service', {}).get('log_wire')) result = asdict( get_ovdc_k8s_runtime_details( operation_context.sysadmin_client, # noqa: E501 ovdc_id=ovdc_id, log_wire=log_wire)) # TODO: Find a better way to avoid sending remove_cp_from_vms_on_disable # flag if ClusterEntityKind.TKG_PLUS.value in result['k8s_runtime'] \ and not server_utils.is_tkg_plus_enabled(): result['k8s_runtime'].remove(ClusterEntityKind.TKG_PLUS.value) del result['remove_cp_from_vms_on_disable'] return result
def connect_vcd_user_via_token( tenant_auth_token: str, is_jwt_token: bool, api_version: Optional[str]): server_config = get_server_runtime_config() if not api_version: api_version = server_config['service']['default_api_version'] verify_ssl_certs = server_config['vcd']['verify'] if not verify_ssl_certs: requests.packages.urllib3.disable_warnings() log_filename = None log_wire = str_to_bool(server_config['service'].get('log_wire')) if log_wire: log_filename = SERVER_DEBUG_WIRELOG_FILEPATH client_tenant = vcd_client.Client( uri=server_config['vcd']['host'], api_version=api_version, verify_ssl_certs=verify_ssl_certs, log_file=log_filename, log_requests=log_wire, log_headers=log_wire, log_bodies=log_wire) client_tenant.rehydrate_from_token(tenant_auth_token, is_jwt_token) return client_tenant
def _get_cse_ovdc_list(sysadmin_client: vcd_client.Client, ovdc_list: list): ovdcs = [] config = server_utils.get_server_runtime_config() log_wire = utils.str_to_bool(config.get('service', {}).get('log_wire')) cpm = compute_policy_manager.ComputePolicyManager(sysadmin_client, log_wire=log_wire) for ovdc in ovdc_list: ovdc_name = ovdc.get('name') ovdc_id = vcd_utils.extract_id(ovdc.get('id')) # obtain ovdc runtime details for the ovdc ovdc_details = asdict( get_ovdc_k8s_runtime_details(sysadmin_client, ovdc_id=ovdc_id, ovdc_name=ovdc_name, cpm=cpm, log_wire=log_wire)) # NOTE: For CSE 3.0, if `enable_tkg_plus` flag in # config is set to false, Prevent showing information # about TKG+ by skipping TKG+ from the result. if ClusterEntityKind.TKG_PLUS.value in ovdc_details['k8s_runtime'] \ and not server_utils.is_tkg_plus_enabled(): # noqa: E501 ovdc_details['k8s_runtime'].remove( ClusterEntityKind.TKG_PLUS.value) # noqa: E501 # TODO: Find a better way to remove remove_cp_from_vms_on_disable del ovdc_details['remove_cp_from_vms_on_disable'] ovdcs.append(ovdc_details) return ovdcs
def get_sys_admin_client(api_version: Optional[str]): server_config = get_server_runtime_config() if not api_version: api_version = server_config.get_value_at('service.default_api_version') verify_ssl_certs = server_config.get_value_at('vcd.verify') if not verify_ssl_certs: requests.packages.urllib3.disable_warnings() log_filename = None log_wire = str_to_bool(server_config.get_value_at('service.log_wire')) if log_wire: log_filename = SERVER_DEBUG_WIRELOG_FILEPATH client = vcd_client.Client(uri=server_config.get_value_at('vcd.host'), api_version=api_version, verify_ssl_certs=verify_ssl_certs, log_file=log_filename, log_requests=log_wire, log_headers=log_wire, log_bodies=log_wire) credentials = vcd_client.BasicLoginCredentials( server_config.get_value_at('vcd.username'), shared_constants.SYSTEM_ORG_NAME, server_config.get_value_at('vcd.password')) client.set_credentials(credentials) return client
def decorator_wrapper(*args, **kwargs): server_config = server_utils.get_server_runtime_config() if (server_config.get_value_at('service.enforce_authorization') and required_rights is not None and len(required_rights) > 0): class_instance: abstract_broker.AbstractBroker = args[0] user_rights = class_instance.context.user.rights missing_rights = [] for right_name in required_rights: right_name_with_namespace = \ f"{{{CSE_SERVICE_NAMESPACE}}}:{right_name}" if right_name_with_namespace not in user_rights: missing_rights.append(right_name_with_namespace) if len(missing_rights) > 0: LOGGER.debug(f"Authorization failed for user " f"'{class_instance.context.user.name}'. " f"Missing required rights: " f"{missing_rights}") raise Exception(f'Access forbidden. Missing required ' f'rights: {missing_rights}') return func(*args, **kwargs)
def record_user_action(cse_operation, status=OperationStatus.SUCCESS, message=None, telemetry_settings=None): """Record CSE user action information in telemetry server. No exceptions should be leaked. Catch all exceptions and log them. :param CseOperation cse_operation: :param OperationStatus status: SUCCESS/FAILURE of the user action :param str message: any information about failure or custom message :param dict telemetry_settings: telemetry section CSE config->service """ try: if not telemetry_settings: server_config = get_server_runtime_config() telemetry_settings = None if not server_config else \ server_config.get_value_at('service.telemetry') if telemetry_settings: if telemetry_settings.get('enable'): payload = get_payload_for_user_action(cse_operation, status, message) # noqa: E501 _send_data_to_telemetry_server(payload, telemetry_settings) else: LOGGER.debug('No telemetry settings found.') except Exception as err: LOGGER.warning( f"Error in recording user action information:{str(err)}", exc_info=True) # noqa: E501
def ovdc_compute_policy_list(request_data, op_ctx: ctx.OperationContext): """Request handler for ovdc compute-policy list operation. Required data: ovdc_id :return: Dictionary with task href. """ required = [RequestKey.OVDC_ID] req_utils.validate_payload(request_data, required) config = server_utils.get_server_runtime_config() cpm = compute_policy_manager.ComputePolicyManager( op_ctx.sysadmin_client, log_wire=utils.str_to_bool(config['service'].get('log_wire'))) compute_policies = [] for cp in \ compute_policy_manager.list_cse_sizing_policies_on_vdc( cpm, request_data[RequestKey.OVDC_ID]): policy = { 'name': cp['display_name'], 'id': cp['id'], 'href': cp['href'] } compute_policies.append(policy) return compute_policies
def template_list(request_data, op_ctx): """Request handler for template list operation. :return: List of dictionaries with template info. """ config = server_utils.get_server_runtime_config() templates = [] default_template_name = config['broker']['default_template_name'] default_template_revision = str(config['broker']['default_template_revision']) # noqa: E501 for t in config['broker']['templates']: template_name = t[LocalTemplateKey.NAME] template_revision = str(t[LocalTemplateKey.REVISION]) is_default = (template_name, template_revision) == (default_template_name, default_template_revision) # noqa: E501 templates.append({ 'catalog': config['broker']['catalog'], 'catalog_item': t[LocalTemplateKey.CATALOG_ITEM_NAME], 'cni': t[LocalTemplateKey.CNI], 'cni_version': t[LocalTemplateKey.CNI_VERSION], 'deprecated': t[LocalTemplateKey.DEPRECATED], 'description': t[LocalTemplateKey.DESCRIPTION].replace("\\n", ", "), # noqa: E501 'docker_version': t[LocalTemplateKey.DOCKER_VERSION], 'is_default': 'Yes' if is_default else 'No', 'kind': t[LocalTemplateKey.KIND], 'kubernetes': t[LocalTemplateKey.KUBERNETES], 'kubernetes_version': t[LocalTemplateKey.KUBERNETES_VERSION], 'name': template_name, 'os': t[LocalTemplateKey.OS], 'revision': template_revision }) return sorted(templates, key=lambda i: (i['name'], i['revision']), reverse=True) # noqa: E501
def __init__(self, pks_ctx, op_ctx: ctx.OperationContext): """Initialize PKS broker. :param dict pks_ctx: A dictionary with which should at least have the following keys in it ['username', 'secret', 'host', 'port', 'uaac_port'], 'proxy' and 'pks_compute_profile_name' are optional keys. Currently all callers of this method is using ovdc cache (subject to change) to initialize PKS broker. """ self.context: ctx.OperationContext = None # populates above attributes super().__init__(op_ctx) if not pks_ctx: raise ValueError( "PKS context is required to establish connection to PKS") self.username = pks_ctx['username'] self.secret = pks_ctx['secret'] self.pks_host_uri = f"https://{pks_ctx['host']}:{pks_ctx['port']}" self.uaac_uri = f"https://{pks_ctx['host']}:{pks_ctx['uaac_port']}" self.proxy_uri = None if pks_ctx.get('proxy'): self.proxy_uri = f"http://{pks_ctx['proxy']}:80" self.compute_profile = pks_ctx.get(PKS_COMPUTE_PROFILE_KEY, None) self.nsxt_server = \ server_utils.get_pks_cache().get_nsxt_info(pks_ctx.get('vc')) self.nsxt_client = None self.pks_wire_logger = NULL_LOGGER nsxt_wire_logger = NULL_LOGGER config = server_utils.get_server_runtime_config() if utils.str_to_bool(config.get_value_at('service.log_wire')): nsxt_wire_logger = SERVER_NSXT_WIRE_LOGGER self.pks_wire_logger = SERVER_PKS_WIRE_LOGGER if self.nsxt_server: self.nsxt_client = NSXTClient( host=self.nsxt_server.get('host'), username=self.nsxt_server.get('username'), password=self.nsxt_server.get('password'), logger_debug=SERVER_LOGGER, logger_wire=nsxt_wire_logger, http_proxy=self.nsxt_server.get('proxy'), https_proxy=self.nsxt_server.get('proxy'), verify_ssl=self.nsxt_server.get('verify')) # TODO() Add support in pyvcloud to send metadata values with their # types intact. verify_ssl = pks_ctx.get('verify') self.verify = True if isinstance(verify_ssl, bool): self.verify = verify_ssl elif isinstance(verify_ssl, str): self.verify = utils.str_to_bool(verify_ssl) self.pks_client = self._get_pks_client(self._get_token())
def cloudapi_client(self): if self._cloudapi_client is None: log_wire = server_utils.get_server_runtime_config().get( 'service', {}).get('log_wire', False) # noqa: E501 logger_wire = logger.NULL_LOGGER if log_wire: logger_wire = logger.SERVER_CLOUDAPI_WIRE_LOGGER self._cloudapi_client = \ vcd_utils.get_cloudapi_client_from_vcd_client(self.client, logger.SERVER_LOGGER, # noqa: E501 logger_wire) return self._cloudapi_client
def tkgm_template_list(request_data, op_ctx): """Request handler for template list operation. :return: List of dictionaries with template info. """ config = server_utils.get_server_runtime_config() tkgm_templates = [] for t in config.get_value_at('broker.tkgm_templates'): tkgm_templates.append({ 'catalog': config.get_value_at('broker.catalog'), 'catalog_item': t[TKGmTemplateKey.NAME], 'cni': t[TKGmTemplateKey.CNI], 'cni_version': t[TKGmTemplateKey.CNI_VERSION], 'cse_version': t[TKGmTemplateKey.CSE_VERSION], 'deprecated': None, 'description': None, 'docker_version': None, 'container_runtime': t[TKGmTemplateKey.CONTAINER_RUNTIME], 'container_runtime_version': t[TKGmTemplateKey.CONTAINER_RUNTIME_VERSION], # noqa: E501 'is_default': False, 'kind': t[TKGmTemplateKey.KIND], 'kubernetes': t[TKGmTemplateKey.KUBERNETES], 'kubernetes_version': t[TKGmTemplateKey.KUBERNETES_VERSION], 'name': t[TKGmTemplateKey.NAME], 'os': f"{t[TKGmTemplateKey.OS]}-{t[TKGmTemplateKey.OS_VERSION]}", 'revision': int(t[TKGmTemplateKey.REVISION]) }) return sorted(tkgm_templates, key=lambda i: (i['name'], i['revision']), reverse=True) # noqa: E501
def info(self, get_sysadmin_info=False): result = utils.get_cse_info() server_config = server_utils.get_server_runtime_config() result[shared_constants.CSE_SERVER_API_VERSION] = server_config['service']['default_api_version'] # noqa: E501 result[shared_constants.CSE_SERVER_SUPPORTED_API_VERSIONS] = server_config['service']['supported_api_versions'] # noqa: E501 result[shared_constants.CSE_SERVER_LEGACY_MODE] = server_config['service']['legacy_mode'] # noqa: E501 if get_sysadmin_info: result['all_consumer_threads'] = 0 if self.consumer is None else \ self.consumer.get_num_total_threads() result['all_threads'] = threading.activeCount() result['requests_in_progress'] = self.active_requests_count() result['config_file'] = self.config_file result['status'] = self.get_status() else: del result['python'] return result
def _get_gateway( client: vcd_client.Client, org_name: str, network_name: str, ): config = server_utils.get_server_runtime_config() logger_wire = NULL_LOGGER if utils.str_to_bool(config.get_value_at('service.log_wire')): logger_wire = SERVER_CLOUDAPI_WIRE_LOGGER cloudapi_client = vcd_utils.get_cloudapi_client_from_vcd_client( client=client, logger_debug=LOGGER, logger_wire=logger_wire) gateway_name, gateway_href, gateway_exists = None, None, False page, page_count = 1, 1 base_path = f'{cloudapi_constants.CloudApiResource.ORG_VDC_NETWORKS}?filter=name=={network_name};_context==includeAccessible&pageSize=1&page=' # noqa: E501 while page <= page_count: response, headers = cloudapi_client.do_request( method=RequestMethod.GET, cloudapi_version=cloudapi_constants.CloudApiVersion.VERSION_1_0_0, resource_url_relative_path=base_path + f'{page}', return_response_headers=True) for entry in response['values']: # only routed networks allowed is_target_network = entry['orgRef']['name'] == org_name and \ entry['networkType'] == 'NAT_ROUTED' if is_target_network: if gateway_exists: raise MultipleRecordsException( f"Multiple routed networks named {network_name} found. CSE Server expects unique network names." ) # noqa: E501 gateway_exists = True gateway_name = entry['connection']['routerRef']['name'] gateway_id = entry['connection']['routerRef']['id'].split( ':').pop() # noqa: E501 gateway_href = headers['Content-Location'].split('cloudapi')[ 0] + f'api/admin/edgeGateway/{gateway_id}' # noqa: E501 page += 1 page_count = response['pageCount'] if not gateway_exists: raise EntityNotFoundException( f"No routed networks named {network_name} found.") # noqa: E501 gateway = vcd_gateway.Gateway(client, name=gateway_name, href=gateway_href) return gateway
def _update_user_context_map(self, api_version: Optional[str]): _client = vcd_utils.connect_vcd_user_via_token( tenant_auth_token=self._auth_token, is_jwt_token=self._is_jwt, api_version=api_version) log_wire = server_utils.get_server_runtime_config() \ .get('service', {}).get('log_wire', False) # noqa: E501 logger_wire = logger.NULL_LOGGER if log_wire: logger_wire = logger.SERVER_CLOUDAPI_WIRE_LOGGER _cloudapi_client = vcd_utils.get_cloudapi_client_from_vcd_client( _client, logger.SERVER_LOGGER, logger_wire) _user_context = user_context.UserContext( client=_client, cloud_api_client=_cloudapi_client) self._user_context_map[api_version] = _user_context
def connect_vcd_user_via_token(tenant_auth_token, is_jwt_token): server_config = get_server_runtime_config() vcd_uri = server_config['vcd']['host'] version = server_config['vcd']['api_version'] verify_ssl_certs = server_config['vcd']['verify'] log_filename = None log_wire = str_to_bool(server_config['service'].get('log_wire')) if log_wire: log_filename = SERVER_DEBUG_WIRELOG_FILEPATH client_tenant = vcd_client.Client(uri=vcd_uri, api_version=version, verify_ssl_certs=verify_ssl_certs, log_file=log_filename, log_requests=log_wire, log_headers=log_wire, log_bodies=log_wire) client_tenant.rehydrate_from_token(tenant_auth_token, is_jwt_token) return client_tenant
def _get_nsxt_backed_gateway_service(client: vcd_client.Client, org_name: str, network_name: str): # Check if NSX-T backed gateway gateway: vcd_gateway.Gateway = _get_gateway(client=client, org_name=org_name, network_name=network_name) if not gateway: raise Exception(f'No gateway found for network: {network_name}') if not gateway.is_nsxt_backed(): raise Exception('Gateway is not NSX-T backed for exposing cluster.') config = server_utils.get_server_runtime_config() logger_wire = NULL_LOGGER if utils.str_to_bool(config.get_value_at('service.log_wire')): logger_wire = SERVER_CLOUDAPI_WIRE_LOGGER return NsxtBackedGatewayService(gateway=gateway, client=client, logger_debug=LOGGER, logger_wire=logger_wire)
def record_user_action_details(cse_operation, cse_params, telemetry_settings=None): """Record CSE user operation details in telemetry server. No exception should be leaked. Catch all exceptions and log them. :param CseOperation cse_operation: CSE operation information :param dict cse_params: CSE operation parameters :param dict telemetry_settings: telemetry section of config->service """ try: if not telemetry_settings: telemetry_settings = get_server_runtime_config()['service']['telemetry'] # noqa: E501 if telemetry_settings['enable']: payload = OPERATION_TO_PAYLOAD_GENERATOR[cse_operation](cse_params) _send_data_to_telemetry_server(payload, telemetry_settings) except Exception as err: LOGGER.warning(f"Error in recording CSE operation details :{str(err)}") # noqa: E501
def _update_sysadmin_user_context_map(self, api_version: Optional[str]): _sysadmin_client = vcd_utils.get_sys_admin_client( api_version=api_version) log_wire = server_utils.get_server_runtime_config() \ .get('service', {}).get('log_wire', False) logger_wire = logger.NULL_LOGGER if log_wire: logger_wire = logger.SERVER_CLOUDAPI_WIRE_LOGGER _sysadmin_cloudapi_client = \ vcd_utils.get_cloudapi_client_from_vcd_client( _sysadmin_client, logger.SERVER_LOGGER, logger_wire) _sysadmin_user_context = user_context.UserContext( client=_sysadmin_client, cloud_api_client=_sysadmin_cloudapi_client) self._sysadmin_user_context_map[api_version] = _sysadmin_user_context
def get_sys_admin_client(): server_config = get_server_runtime_config() if not server_config['vcd']['verify']: SERVER_LOGGER.warning("InsecureRequestWarning: Unverified HTTPS " "request is being made. Adding certificate " "verification is strongly advised.") requests.packages.urllib3.disable_warnings() log_filename = None log_wire = str_to_bool(server_config['service'].get('log_wire')) if log_wire: log_filename = SERVER_DEBUG_WIRELOG_FILEPATH client = vcd_client.Client(uri=server_config['vcd']['host'], api_version=server_config['vcd']['api_version'], verify_ssl_certs=server_config['vcd']['verify'], log_file=log_filename, log_requests=log_wire, log_headers=log_wire, log_bodies=log_wire) credentials = vcd_client.BasicLoginCredentials( server_config['vcd']['username'], SYSTEM_ORG_NAME, server_config['vcd']['password']) client.set_credentials(credentials) return client
def get_server_config(request_data, op_ctx: ctx.OperationContext): """.""" if op_ctx.client.is_sysadmin: # TODO: Find a better way to access to the config dict # in ServerConfig object server_config = deepcopy( server_utils.get_server_runtime_config()._config) server_config['mqtt']['token'] = "REDACTED" server_config['mqtt']['token_id'] = "REDACTED" for vc in server_config.get('vcs', []): vc['password'] = "******" server_config['vcd']['password'] = "******" rde_version = server_config['service']['rde_version_in_use'] rde_version_str = f"{rde_version.major}.{rde_version.minor}.{rde_version.patch}" # noqa: E501 server_config['service']['rde_version_in_use'] = rde_version_str return server_config raise e.UnauthorizedRequestError( error_message='Unauthorized to access CSE server configuration.')
def ovdc_compute_policy_update(request_data, op_ctx: ctx.OperationContext): """Request handler for ovdc compute-policy update operation. Required data: ovdc_id, compute_policy_action, compute_policy_names :return: Dictionary with task href. """ required = [ RequestKey.OVDC_ID, RequestKey.COMPUTE_POLICY_ACTION, RequestKey.COMPUTE_POLICY_NAME ] defaults = { RequestKey.REMOVE_COMPUTE_POLICY_FROM_VMS: False, } validated_data = {**defaults, **request_data} req_utils.validate_payload(validated_data, required) action = validated_data[RequestKey.COMPUTE_POLICY_ACTION] cp_name = validated_data[RequestKey.COMPUTE_POLICY_NAME] ovdc_id = validated_data[RequestKey.OVDC_ID] remove_compute_policy_from_vms = validated_data[ RequestKey.REMOVE_COMPUTE_POLICY_FROM_VMS] # noqa: E501 try: config = server_utils.get_server_runtime_config() cpm = compute_policy_manager.ComputePolicyManager( op_ctx.sysadmin_client, log_wire=utils.str_to_bool( config['service'].get('log_wire'))) # noqa: E501 cp_href = None cp_id = None if cp_name == SYSTEM_DEFAULT_COMPUTE_POLICY_NAME: for _cp in cpm.list_compute_policies_on_vdc(ovdc_id): if _cp['name'] == cp_name: cp_href = _cp['href'] cp_id = _cp['id'] else: try: _cp = compute_policy_manager.get_cse_vdc_compute_policy( cpm, cp_name) # noqa: E501 cp_href = _cp['href'] cp_id = _cp['id'] except vcd_e.EntityNotFoundException: pass if cp_href is None: raise e.BadRequestError(f"Compute policy '{cp_name}' not found.") if action == ComputePolicyAction.ADD: cpm.add_compute_policy_to_vdc(ovdc_id, cp_href) # Record telemetry data record_user_action(CseOperation.OVDC_COMPUTE_POLICY_ADD) return f"Added compute policy '{cp_name}' ({cp_id}) to ovdc " \ f"({ovdc_id})" if action == ComputePolicyAction.REMOVE: # TODO: fix remove_compute_policy by implementing a proper way # for calling async methods without having to pass op_ctx # outside handlers. op_ctx.is_async = True response = cpm.remove_vdc_compute_policy_from_vdc( ovdc_id, cp_href, force=remove_compute_policy_from_vms) # Follow task_href to completion in a different thread and end # operation context _follow_task(op_ctx, response['task_href'], ovdc_id) # Record telemetry data record_user_action(CseOperation.OVDC_COMPUTE_POLICY_REMOVE) return response raise e.BadRequestError("Unsupported compute policy action") except Exception as err: # Record telemetry data failure` if action == ComputePolicyAction.ADD: record_user_action(CseOperation.OVDC_COMPUTE_POLICY_ADD, status=OperationStatus.FAILED) elif action == ComputePolicyAction.REMOVE: record_user_action(CseOperation.OVDC_COMPUTE_POLICY_REMOVE, status=OperationStatus.FAILED) raise err
def _update_ovdc_using_placement_policy_async( operation_context: ctx.OperationContext, # noqa: E501 task: vcd_task.Task, task_href, user_href, policy_list, ovdc_id, vdc, org_name, remove_cp_from_vms_on_disable=False): # noqa: E501 """Enable ovdc using placement policies. :param ctx.OperationContext operation_context: operation context object :param vcd_task.Task task: Task resource to track progress :param str task_href: href of the task :param str user_href: :param List[str] policy_list: The new list of policies associated with the ovdc :param str ovdc_id: :param pyvcloud.vcd.vdc.VDC vdc: VDC object :param str org_name: name of the organization that vdc provides resource :param bool remove_cp_from_vms_on_disable: Set to true if placement policies need to be removed from the vms before removing from the VDC. """ operation_name = "Update OVDC with placement policies" k8s_runtimes_added = '' k8s_runtimes_deleted = '' try: config = server_utils.get_server_runtime_config() log_wire = utils.str_to_bool(config.get('service', {}).get('log_wire')) cpm = compute_policy_manager.ComputePolicyManager( operation_context.sysadmin_client, log_wire=log_wire) existing_policies = [] for cse_policy in \ compute_policy_manager.list_cse_placement_policies_on_vdc(cpm, ovdc_id): # noqa: E501 existing_policies.append(cse_policy['display_name']) logger.SERVER_LOGGER.debug(policy_list) logger.SERVER_LOGGER.debug(existing_policies) policies_to_add = set(policy_list) - set(existing_policies) policies_to_delete = set(existing_policies) - set(policy_list) # Telemetry for 'vcd cse ovdc enable' command # TODO: Update telemetry request to handle 'k8s_runtime' array k8s_runtimes_added = ','.join(policies_to_add) if k8s_runtimes_added: cse_params = { RequestKey.K8S_PROVIDER: k8s_runtimes_added, RequestKey.OVDC_ID: ovdc_id, RequestKey.ORG_NAME: org_name, PayloadKey.SOURCE_DESCRIPTION: thread_local_data.get_thread_local_data( ThreadLocalData.USER_AGENT) # noqa: E501 } telemetry_handler.record_user_action_details( cse_operation=CseOperation.OVDC_ENABLE, # noqa: E501 cse_params=cse_params) # Telemetry for 'vcd cse ovdc enable' command # TODO: Update telemetry request to handle 'k8s_runtime' array k8s_runtimes_deleted = '.'.join(policies_to_delete) if k8s_runtimes_deleted: cse_params = { RequestKey.K8S_PROVIDER: k8s_runtimes_deleted, RequestKey.OVDC_ID: ovdc_id, RequestKey.ORG_NAME: org_name, RequestKey.REMOVE_COMPUTE_POLICY_FROM_VMS: remove_cp_from_vms_on_disable, # noqa: E501 PayloadKey.SOURCE_DESCRIPTION: thread_local_data.get_thread_local_data( ThreadLocalData.USER_AGENT) # noqa: E501 } telemetry_handler.record_user_action_details( cse_operation=CseOperation.OVDC_DISABLE, # noqa: E501 cse_params=cse_params) for cp_name in policies_to_add: msg = f"Adding k8s provider {cp_name} to OVDC {vdc.name}" logger.SERVER_LOGGER.debug(msg) task.update(status=vcd_client.TaskStatus.RUNNING.value, namespace='vcloud.cse', operation=msg, operation_name=operation_name, details='', progress=None, owner_href=vdc.href, owner_name=vdc.name, owner_type=vcd_client.EntityType.VDC.value, user_href=user_href, user_name=operation_context.user.name, task_href=task_href, org_href=operation_context.user.org_href) policy = compute_policy_manager.get_cse_vdc_compute_policy( cpm, cp_name, is_placement_policy=True) cpm.add_compute_policy_to_vdc(vdc_id=ovdc_id, compute_policy_href=policy['href']) for cp_name in policies_to_delete: msg = f"Removing k8s provider {RUNTIME_INTERNAL_NAME_TO_DISPLAY_NAME_MAP[cp_name]} from OVDC {ovdc_id}" # noqa: E501 logger.SERVER_LOGGER.debug(msg) task_resource = \ task.update(status=vcd_client.TaskStatus.RUNNING.value, namespace='vcloud.cse', operation=msg, operation_name=operation_name, details='', progress=None, owner_href=vdc.href, owner_name=vdc.name, owner_type=vcd_client.EntityType.VDC.value, user_href=user_href, user_name=operation_context.user.name, task_href=task_href, org_href=operation_context.user.org_href) policy = compute_policy_manager.get_cse_vdc_compute_policy( cpm, cp_name, is_placement_policy=True) # noqa: E501 cpm.remove_compute_policy_from_vdc_sync( vdc=vdc, compute_policy_href=policy['href'], # noqa: E501 force=remove_cp_from_vms_on_disable, # noqa: E501 is_placement_policy=True, task_resource=task_resource) # noqa: E501 msg = f"Successfully updated OVDC: {vdc.name}" logger.SERVER_LOGGER.debug(msg) task.update(status=vcd_client.TaskStatus.SUCCESS.value, namespace='vcloud.cse', operation="Operation success", operation_name=operation_name, details=msg, progress=None, owner_href=vdc.href, owner_name=vdc.name, owner_type=vcd_client.EntityType.VDC.value, user_href=user_href, user_name=operation_context.user.name, task_href=task_href, org_href=operation_context.user.org_href) # Record telemetry if k8s_runtimes_added: telemetry_handler.record_user_action( CseOperation.OVDC_ENABLE, status=OperationStatus.SUCCESS) # noqa: E501 if k8s_runtimes_deleted: telemetry_handler.record_user_action( CseOperation.OVDC_DISABLE, status=OperationStatus.SUCCESS) # noqa: E501 except Exception as err: # Record telemetry if k8s_runtimes_added: telemetry_handler.record_user_action(CseOperation.OVDC_ENABLE, status=OperationStatus.FAILED) if k8s_runtimes_deleted: telemetry_handler.record_user_action(CseOperation.OVDC_DISABLE, status=OperationStatus.FAILED) logger.SERVER_LOGGER.error(err) task.update(status=vcd_client.TaskStatus.ERROR.value, namespace='vcloud.cse', operation='Failed to update OVDC', operation_name=operation_name, details=f'Failed with error: {err}', progress=None, owner_href=vdc.href, owner_name=vdc.name, owner_type=vcd_client.EntityType.VDC.value, user_href=user_href, user_name=operation_context.user.name, task_href=task_href, org_href=operation_context.user.org_href, error_message=f"{err}") finally: if operation_context.sysadmin_client: operation_context.end()
def _reload_templates_async(op_ctx, task_href): user_context = None task = None user_href = None try: user_context = op_ctx.get_user_context(api_version=None) user_client = user_context.client org = vcd_utils.get_org(user_client, user_context.org_name) user_href = org.get_user(user_context.name).get('href') task = Task(user_client) server_config = server_utils.get_server_runtime_config() if not server_utils.is_no_vc_communication_mode(): native_templates = \ template_reader.read_native_template_definition_from_catalog( config=server_config ) server_config.set_value_at('broker.templates', native_templates) task.update(status=TaskStatus.RUNNING.value, namespace='vcloud.cse', operation="Finished reloading native templates.", operation_name='template reload', details='', progress=None, owner_href=user_context.org_href, owner_name=user_context.org_name, owner_type='application/vnd.vmware.vcloud.org+xml', user_href=user_href, user_name=user_context.name, org_href=user_context.org_href, task_href=task_href) else: msg = "Skipping loading k8s template definition from catalog " \ "since `No communication with VCenter` mode is on." logger.SERVER_LOGGER.info(msg) server_config.set_value_at('broker.templates', []) task.update(status=TaskStatus.RUNNING.value, namespace='vcloud.cse', operation=msg, operation_name='template reload', details='', progress=None, owner_href=user_context.org_href, owner_name=user_context.org_name, owner_type='application/vnd.vmware.vcloud.org+xml', user_href=user_href, user_name=user_context.name, org_href=user_context.org_href, task_href=task_href) task.update(status=TaskStatus.RUNNING.value, namespace='vcloud.cse', operation="Reloading TKG templates.", operation_name='template reload', details='', progress=None, owner_href=user_context.org_href, owner_name=user_context.org_name, owner_type='application/vnd.vmware.vcloud.org+xml', user_href=user_href, user_name=user_context.name, org_href=user_context.org_href, task_href=task_href) tkgm_templates = \ template_reader.read_tkgm_template_definition_from_catalog( config=server_config ) server_config.set_value_at('broker.tkgm_templates', tkgm_templates) task.update(status=TaskStatus.SUCCESS.value, namespace='vcloud.cse', operation="Finished reloading all templates.", operation_name='template reload', details='', progress=None, owner_href=user_context.org_href, owner_name=user_context.org_name, owner_type='application/vnd.vmware.vcloud.org+xml', user_href=user_href, user_name=user_context.name, org_href=user_context.org_href, task_href=task_href) except Exception: msg = "Error reloading templates." logger.SERVER_LOGGER.error(msg, exc_info=True) if task and user_context and user_href: task.update(status=TaskStatus.ERROR.value, namespace='vcloud.cse', operation=msg, operation_name='template reload', details='', progress=None, owner_href=user_context.org_href, owner_name=user_context.org_name, owner_type='application/vnd.vmware.vcloud.org+xml', user_href=user_href, user_name=user_context.name, org_href=user_context.org_href, task_href=task_href) finally: op_ctx.end()