Beispiel #1
0
    def __init__(self, pks_ctx, op_ctx: ctx.OperationContext):
        """Initialize PKS broker.

        :param dict pks_ctx: A dictionary with which should at least have the
            following keys in it ['username', 'secret', 'host', 'port',
            'uaac_port'], 'proxy' and 'pks_compute_profile_name' are optional
            keys. Currently all callers of this method is using ovdc cache
            (subject to change) to initialize PKS broker.
        """
        self.context: ctx.OperationContext = None
        # populates above attributes
        super().__init__(op_ctx)

        if not pks_ctx:
            raise ValueError(
                "PKS context is required to establish connection to PKS")

        self.username = pks_ctx['username']
        self.secret = pks_ctx['secret']
        self.pks_host_uri = f"https://{pks_ctx['host']}:{pks_ctx['port']}"
        self.uaac_uri = f"https://{pks_ctx['host']}:{pks_ctx['uaac_port']}"
        self.proxy_uri = None
        if pks_ctx.get('proxy'):
            self.proxy_uri = f"http://{pks_ctx['proxy']}:80"
        self.compute_profile = pks_ctx.get(PKS_COMPUTE_PROFILE_KEY, None)
        self.nsxt_server = \
            server_utils.get_pks_cache().get_nsxt_info(pks_ctx.get('vc'))
        self.nsxt_client = None
        self.pks_wire_logger = NULL_LOGGER
        nsxt_wire_logger = NULL_LOGGER
        config = server_utils.get_server_runtime_config()
        if utils.str_to_bool(config.get_value_at('service.log_wire')):
            nsxt_wire_logger = SERVER_NSXT_WIRE_LOGGER
            self.pks_wire_logger = SERVER_PKS_WIRE_LOGGER
        if self.nsxt_server:
            self.nsxt_client = NSXTClient(
                host=self.nsxt_server.get('host'),
                username=self.nsxt_server.get('username'),
                password=self.nsxt_server.get('password'),
                logger_debug=SERVER_LOGGER,
                logger_wire=nsxt_wire_logger,
                http_proxy=self.nsxt_server.get('proxy'),
                https_proxy=self.nsxt_server.get('proxy'),
                verify_ssl=self.nsxt_server.get('verify'))
        # TODO() Add support in pyvcloud to send metadata values with their
        # types intact.
        verify_ssl = pks_ctx.get('verify')
        self.verify = True
        if isinstance(verify_ssl, bool):
            self.verify = verify_ssl
        elif isinstance(verify_ssl, str):
            self.verify = utils.str_to_bool(verify_ssl)

        self.pks_client = self._get_pks_client(self._get_token())
Beispiel #2
0
def _get_cse_ovdc_list(sysadmin_client: vcd_client.Client, ovdc_list: list):
    ovdcs = []
    config = server_utils.get_server_runtime_config()
    log_wire = utils.str_to_bool(config.get('service', {}).get('log_wire'))
    cpm = compute_policy_manager.ComputePolicyManager(sysadmin_client,
                                                      log_wire=log_wire)
    for ovdc in ovdc_list:
        ovdc_name = ovdc.get('name')
        ovdc_id = vcd_utils.extract_id(ovdc.get('id'))
        # obtain ovdc runtime details for the ovdc
        ovdc_details = asdict(
            get_ovdc_k8s_runtime_details(sysadmin_client,
                                         ovdc_id=ovdc_id,
                                         ovdc_name=ovdc_name,
                                         cpm=cpm,
                                         log_wire=log_wire))
        # NOTE: For CSE 3.0, if `enable_tkg_plus` flag in
        # config is set to false, Prevent showing information
        # about TKG+ by skipping TKG+ from the result.
        if ClusterEntityKind.TKG_PLUS.value in ovdc_details['k8s_runtime'] \
                and not server_utils.is_tkg_plus_enabled():  # noqa: E501
            ovdc_details['k8s_runtime'].remove(
                ClusterEntityKind.TKG_PLUS.value)  # noqa: E501
        # TODO: Find a better way to remove remove_cp_from_vms_on_disable
        del ovdc_details['remove_cp_from_vms_on_disable']
        ovdcs.append(ovdc_details)
    return ovdcs
Beispiel #3
0
def get_sys_admin_client(api_version: Optional[str]):
    server_config = get_server_runtime_config()
    if not api_version:
        api_version = server_config.get_value_at('service.default_api_version')
    verify_ssl_certs = server_config.get_value_at('vcd.verify')
    if not verify_ssl_certs:
        requests.packages.urllib3.disable_warnings()
    log_filename = None
    log_wire = str_to_bool(server_config.get_value_at('service.log_wire'))
    if log_wire:
        log_filename = SERVER_DEBUG_WIRELOG_FILEPATH

    client = vcd_client.Client(uri=server_config.get_value_at('vcd.host'),
                               api_version=api_version,
                               verify_ssl_certs=verify_ssl_certs,
                               log_file=log_filename,
                               log_requests=log_wire,
                               log_headers=log_wire,
                               log_bodies=log_wire)
    credentials = vcd_client.BasicLoginCredentials(
        server_config.get_value_at('vcd.username'),
        shared_constants.SYSTEM_ORG_NAME,
        server_config.get_value_at('vcd.password'))
    client.set_credentials(credentials)
    return client
def ovdc_compute_policy_list(request_data, op_ctx: ctx.OperationContext):
    """Request handler for ovdc compute-policy list operation.

    Required data: ovdc_id

    :return: Dictionary with task href.
    """
    required = [RequestKey.OVDC_ID]
    req_utils.validate_payload(request_data, required)

    config = server_utils.get_server_runtime_config()
    cpm = compute_policy_manager.ComputePolicyManager(
        op_ctx.sysadmin_client,
        log_wire=utils.str_to_bool(config['service'].get('log_wire')))
    compute_policies = []
    for cp in \
            compute_policy_manager.list_cse_sizing_policies_on_vdc(
                cpm,
                request_data[RequestKey.OVDC_ID]):
        policy = {
            'name': cp['display_name'],
            'id': cp['id'],
            'href': cp['href']
        }
        compute_policies.append(policy)
    return compute_policies
Beispiel #5
0
def get_ovdc(operation_context: ctx.OperationContext, ovdc_id: str) -> dict:
    """Get ovdc info for a particular ovdc.

    :param ctx.OperationContext operation_context: context for the request
    :param str ovdc_id: ID of the ovdc
    :return: dictionary containing the ovdc information
    :rtype: dict
    """
    # NOTE: For CSE 3.0, if `enable_tkg_plus` flag in config is set to false,
    # Prevent showing information about TKG+ by skipping TKG+ from the result.
    cse_params = {
        RequestKey.OVDC_ID:
        ovdc_id,
        PayloadKey.SOURCE_DESCRIPTION:
        thread_local_data.get_thread_local_data(
            ThreadLocalData.USER_AGENT)  # noqa: E501
    }
    telemetry_handler.record_user_action_details(
        cse_operation=CseOperation.OVDC_INFO,  # noqa: E501
        cse_params=cse_params)
    config = server_utils.get_server_runtime_config()
    log_wire = utils.str_to_bool(config.get('service', {}).get('log_wire'))
    result = asdict(
        get_ovdc_k8s_runtime_details(
            operation_context.sysadmin_client,  # noqa: E501
            ovdc_id=ovdc_id,
            log_wire=log_wire))
    # TODO: Find a better way to avoid sending remove_cp_from_vms_on_disable
    # flag
    if ClusterEntityKind.TKG_PLUS.value in result['k8s_runtime'] \
            and not server_utils.is_tkg_plus_enabled():
        result['k8s_runtime'].remove(ClusterEntityKind.TKG_PLUS.value)
    del result['remove_cp_from_vms_on_disable']
    return result
def connect_vcd_user_via_token(
        tenant_auth_token: str,
        is_jwt_token: bool,
        api_version: Optional[str]):
    server_config = get_server_runtime_config()
    if not api_version:
        api_version = server_config['service']['default_api_version']
    verify_ssl_certs = server_config['vcd']['verify']
    if not verify_ssl_certs:
        requests.packages.urllib3.disable_warnings()
    log_filename = None
    log_wire = str_to_bool(server_config['service'].get('log_wire'))
    if log_wire:
        log_filename = SERVER_DEBUG_WIRELOG_FILEPATH

    client_tenant = vcd_client.Client(
        uri=server_config['vcd']['host'],
        api_version=api_version,
        verify_ssl_certs=verify_ssl_certs,
        log_file=log_filename,
        log_requests=log_wire,
        log_headers=log_wire,
        log_bodies=log_wire)
    client_tenant.rehydrate_from_token(tenant_auth_token, is_jwt_token)
    return client_tenant
Beispiel #7
0
    def share_cluster(self,
                      cluster_id,
                      cluster_name,
                      users: list,
                      access_level_id,
                      org=None,
                      vdc=None):
        """Share the cluster with the users in user_name_to_id_dict.

        :param str cluster_id: cluster id
        :param str cluster_name: cluster name
        :param list users: users to share cluster with
        :param str access_level_id: access level id of shared users
        :param str vdc: name of the vdc where the cluster is
        :param str org: name of the org where the users are
        """
        if not cluster_id:
            cluster_id = self.get_cluster_id_by_name(cluster_name, org, vdc)

        # Ensure current cluster user access level is not reduced
        org_href = self._client.get_org_by_name(org).get('href')
        name_to_id: dict = client_utils.create_user_name_to_id_dict(
            self._client, set(users), org_href)
        org_user_id_to_name_dict = vcd_utils.create_org_user_id_to_name_dict(
            self._client, org)
        logger_wire = logger.CLIENT_WIRE_LOGGER \
            if utils.str_to_bool(
                os.getenv(cli_constants.ENV_CSE_CLIENT_WIRE_LOGGING)
            ) \
            else logger.NULL_LOGGER
        acl_svc = cluster_acl_svc.ClusterACLService(
            cluster_id=cluster_id,
            client=self._client,
            logger_debug=logger.CLIENT_LOGGER,
            logger_wire=logger_wire)
        for acl_entry in acl_svc.list_def_entity_acl_entries():
            username = org_user_id_to_name_dict.get(acl_entry.memberId)
            if name_to_id.get(username):
                curr_access_level = acl_entry.accessLevelId  # noqa: E501
                if client_utils.access_level_reduced(access_level_id,
                                                     curr_access_level):
                    raise Exception(f'{username} currently has higher access '
                                    f'level: {curr_access_level}')

        # share TKG-S def entity
        acl_entry = common_models.ClusterAclEntry(
            grantType=shared_constants.MEMBERSHIP_GRANT_TYPE,
            accessLevelId=access_level_id,
            memberId=None)
        for _, user_id in name_to_id.items():
            acl_entry.memberId = user_id
            acl_svc.share_def_entity(acl_entry)
def should_use_mqtt_protocol(config):
    """Return true if should use the mqtt protocol; false otherwise.

    The MQTT protocol should be used if the config file contains "mqtt" key
        and the CSE server is not being run in legacy mode.

    :param dict config: config yaml file as a dictionary

    :return: whether to use the mqtt protocol
    :rtype: bool
    """
    return config.get('mqtt') is not None and \
        not utils.str_to_bool(config['service'].get('legacy_mode'))
def read_native_template_definition_from_catalog(
        config: ServerConfig,
        msg_update_callback=utils.NullPrinter()
):
    # NOTE: If `enable_tkg_plus` in the config file is set to false,
    # CSE server will skip loading the TKG+ template this will prevent
    # users from performing TKG+ related operations.
    msg = "Loading k8s template definition from catalog"
    logger.SERVER_LOGGER.info(msg)
    msg_update_callback.general_no_color(msg)

    client = None
    try:
        log_filename = None
        log_wire = \
            utils.str_to_bool(config.get_value_at('service.log_wire'))
        if log_wire:
            log_filename = logger.SERVER_DEBUG_WIRELOG_FILEPATH

        client = Client(
            uri=config.get_value_at('vcd.host'),
            api_version=config.get_value_at('service.default_api_version'),  # noqa: E501
            verify_ssl_certs=config.get_value_at('vcd.verify'),
            log_file=log_filename,
            log_requests=log_wire,
            log_headers=log_wire,
            log_bodies=log_wire
        )
        credentials = BasicLoginCredentials(
            config.get_value_at('vcd.username'),
            shared_constants.SYSTEM_ORG_NAME,
            config.get_value_at('vcd.password')
        )
        client.set_credentials(credentials)

        legacy_mode = config.get_value_at('service.legacy_mode')
        org_name = config.get_value_at('broker.org')
        catalog_name = config.get_value_at('broker.catalog')

        k8_templates = ltm.get_valid_k8s_local_template_definition(
            client=client, catalog_name=catalog_name, org_name=org_name,
            legacy_mode=legacy_mode,
            is_tkg_plus_enabled=server_utils.is_tkg_plus_enabled(config),
            logger_debug=logger.SERVER_LOGGER,
            msg_update_callback=msg_update_callback)

        return k8_templates
    finally:
        if client:
            client.logout()
Beispiel #10
0
def sanitize_typing(user_input: List[Tuple[str, str]],
                    known_extra_options: Dict):
    """Convert list of key-value pair to correctly typed dictionary."""
    sanitized_user_input = {}
    # sanitize user input to correct type
    for k, v in user_input:
        if k in known_extra_options:
            if known_extra_options[k]["type"] == bool:
                sanitized_user_input[k] = utils.str_to_bool(v)
            else:
                sanitized_user_input[k] = known_extra_options[k]["type"](v)
        else:
            sanitized_user_input[k] = v
    return sanitized_user_input
def read_tkgm_template_definition_from_catalog(
        config: ServerConfig,
        msg_update_callback=utils.NullPrinter()
):
    msg = "Loading TKGm template definition from catalog"
    logger.SERVER_LOGGER.info(msg)
    msg_update_callback.general_no_color(msg)

    client = None
    try:
        log_filename = None
        log_wire = utils.str_to_bool(
            config.get_value_at('service.log_wire')
        )
        if log_wire:
            log_filename = logger.SERVER_DEBUG_WIRELOG_FILEPATH

        client = Client(
            uri=config.get_value_at('vcd.host'),
            api_version=config.get_value_at('service.default_api_version'),  # noqa: E501
            verify_ssl_certs=config.get_value_at('vcd.verify'),
            log_file=log_filename,
            log_requests=log_wire,
            log_headers=log_wire,
            log_bodies=log_wire
        )
        credentials = BasicLoginCredentials(
            config.get_value_at('vcd.username'),
            shared_constants.SYSTEM_ORG_NAME,
            config.get_value_at('vcd.password')
        )
        client.set_credentials(credentials)

        org_name = config.get_value_at('broker.org')
        catalog_name = config.get_value_at('broker.catalog')

        tkgm_templates = ttm.read_all_tkgm_template(
            client=client,
            org_name=org_name,
            catalog_name=catalog_name,
            logger=logger.SERVER_LOGGER,
            msg_update_callback=msg_update_callback
        )

        return tkgm_templates
    finally:
        if client:
            client.logout()
 def __init__(self, client):
     logger_wire = logger.CLIENT_WIRE_LOGGER \
         if core_utils.str_to_bool(
             os.getenv(cli_constants.ENV_CSE_CLIENT_WIRE_LOGGING)
         ) \
         else logger.NULL_LOGGER
     self._cloudapi_client = \
         vcd_utils.get_cloudapi_client_from_vcd_client(
             client=client,
             logger_debug=logger.CLIENT_LOGGER,
             logger_wire=logger_wire
         )
     self._native_cluster_api = NativeClusterApi(client)
     self._client = client
     schema_service = def_schema_svc.DefSchemaService(self._cloudapi_client)
     self._server_rde_version = \
         schema_service.get_latest_registered_schema_version()
def _get_gateway(
    client: vcd_client.Client,
    org_name: str,
    network_name: str,
):
    config = server_utils.get_server_runtime_config()
    logger_wire = NULL_LOGGER
    if utils.str_to_bool(config.get_value_at('service.log_wire')):
        logger_wire = SERVER_CLOUDAPI_WIRE_LOGGER
    cloudapi_client = vcd_utils.get_cloudapi_client_from_vcd_client(
        client=client, logger_debug=LOGGER, logger_wire=logger_wire)

    gateway_name, gateway_href, gateway_exists = None, None, False
    page, page_count = 1, 1
    base_path = f'{cloudapi_constants.CloudApiResource.ORG_VDC_NETWORKS}?filter=name=={network_name};_context==includeAccessible&pageSize=1&page='  # noqa: E501

    while page <= page_count:
        response, headers = cloudapi_client.do_request(
            method=RequestMethod.GET,
            cloudapi_version=cloudapi_constants.CloudApiVersion.VERSION_1_0_0,
            resource_url_relative_path=base_path + f'{page}',
            return_response_headers=True)
        for entry in response['values']:
            # only routed networks allowed
            is_target_network = entry['orgRef']['name'] == org_name and \
                entry['networkType'] == 'NAT_ROUTED'
            if is_target_network:
                if gateway_exists:
                    raise MultipleRecordsException(
                        f"Multiple routed networks named {network_name} found. CSE Server expects unique network names."
                    )  # noqa: E501
                gateway_exists = True
                gateway_name = entry['connection']['routerRef']['name']
                gateway_id = entry['connection']['routerRef']['id'].split(
                    ':').pop()  # noqa: E501
                gateway_href = headers['Content-Location'].split('cloudapi')[
                    0] + f'api/admin/edgeGateway/{gateway_id}'  # noqa: E501
        page += 1
        page_count = response['pageCount']

    if not gateway_exists:
        raise EntityNotFoundException(
            f"No routed networks named {network_name} found.")  # noqa: E501

    gateway = vcd_gateway.Gateway(client, name=gateway_name, href=gateway_href)
    return gateway
Beispiel #14
0
    def list_share_entries(self, cluster_id, cluster_name, org=None, vdc=None):
        if not cluster_id:
            cluster_id = self.get_cluster_id_by_name(cluster_name, org, vdc)

        org_user_id_to_name_dict = vcd_utils.create_org_user_id_to_name_dict(
            self._client, org)
        # Consider system users if client is from system org
        if self._client.is_sysadmin():
            sys_org_user_id_to_name_dict = vcd_utils.create_org_user_id_to_name_dict(  # noqa:E501
                self._client, shared_constants.SYSTEM_ORG_NAME)
            org_user_id_to_name_dict.update(sys_org_user_id_to_name_dict)

        logger_wire = logger.CLIENT_WIRE_LOGGER \
            if utils.str_to_bool(
                os.getenv(cli_constants.ENV_CSE_CLIENT_WIRE_LOGGING)
            ) \
            else logger.NULL_LOGGER
        acl_svc = cluster_acl_svc.ClusterACLService(
            cluster_id=cluster_id,
            client=self._client,
            logger_debug=logger.CLIENT_LOGGER,
            logger_wire=logger_wire)
        page_num = result_count = 0
        while True:
            page_num += 1
            response_body = acl_svc.get_def_entity_acl_response(
                page_num, cli_constants.CLI_ENTRIES_PER_PAGE)
            result_total = response_body[
                shared_constants.PaginationKey.RESULT_TOTAL]  # noqa: E501
            values = response_body[shared_constants.PaginationKey.VALUES]
            if len(values) == 0:
                break
            acl_values = []
            for entry in values:
                acl_entry = common_models.ClusterAclEntry(**entry)
                # If there is no username found, the user must be a system
                # user, so a generic name is shown
                acl_entry.username = org_user_id_to_name_dict.get(
                    acl_entry.memberId,
                    shared_constants.SYSTEM_USER_GENERIC_NAME)  # noqa: E501
                acl_values.append(
                    acl_entry.construct_filtered_dict(
                        include=CLUSTER_ACL_LIST_FIELDS))
            result_count += len(values)
            yield acl_values, result_count < result_total
Beispiel #15
0
def connect_vcd_user_via_token(tenant_auth_token, is_jwt_token):
    server_config = get_server_runtime_config()
    vcd_uri = server_config['vcd']['host']
    version = server_config['vcd']['api_version']
    verify_ssl_certs = server_config['vcd']['verify']
    log_filename = None
    log_wire = str_to_bool(server_config['service'].get('log_wire'))
    if log_wire:
        log_filename = SERVER_DEBUG_WIRELOG_FILEPATH
    client_tenant = vcd_client.Client(uri=vcd_uri,
                                      api_version=version,
                                      verify_ssl_certs=verify_ssl_certs,
                                      log_file=log_filename,
                                      log_requests=log_wire,
                                      log_headers=log_wire,
                                      log_bodies=log_wire)
    client_tenant.rehydrate_from_token(tenant_auth_token, is_jwt_token)
    return client_tenant
Beispiel #16
0
def should_use_mqtt_protocol(config: ServerConfig) -> bool:
    """Return true if should use the mqtt protocol; false otherwise.

    The MQTT protocol should be used if the config file contains "mqtt" key
        and the CSE server is not being run in legacy mode.

    :param dict config: config yaml file as a dictionary

    :return: whether to use the mqtt protocol
    :rtype: bool
    """
    try:
        has_mqtt_section = config.get_value_at('mqtt') is not None
        running_in_legacy_mode = utils.str_to_bool(
            config.get_value_at('service.legacy_mode'))  # noqa: E501
        return has_mqtt_section and not running_in_legacy_mode
    except KeyError:
        return False
def is_tkg_plus_enabled(config: dict = None):
    """
    Check if TKG plus is enabled by the provider in the config.

    :param dict config: configuration provided by the user.
    :rtype: bool
    """
    if not config:
        try:
            config = get_server_runtime_config()
        except Exception:
            return False
    service_section = config.get('service', {})
    tkg_plus_enabled = service_section.get('enable_tkg_plus', False)
    if isinstance(tkg_plus_enabled, bool):
        return tkg_plus_enabled
    elif isinstance(tkg_plus_enabled, str):
        return utils.str_to_bool(tkg_plus_enabled)
    return False
def _get_nsxt_backed_gateway_service(client: vcd_client.Client, org_name: str,
                                     network_name: str):
    # Check if NSX-T backed gateway
    gateway: vcd_gateway.Gateway = _get_gateway(client=client,
                                                org_name=org_name,
                                                network_name=network_name)
    if not gateway:
        raise Exception(f'No gateway found for network: {network_name}')
    if not gateway.is_nsxt_backed():
        raise Exception('Gateway is not NSX-T backed for exposing cluster.')

    config = server_utils.get_server_runtime_config()
    logger_wire = NULL_LOGGER
    if utils.str_to_bool(config.get_value_at('service.log_wire')):
        logger_wire = SERVER_CLOUDAPI_WIRE_LOGGER
    return NsxtBackedGatewayService(gateway=gateway,
                                    client=client,
                                    logger_debug=LOGGER,
                                    logger_wire=logger_wire)
Beispiel #19
0
    def unshare_cluster(self,
                        cluster_id,
                        cluster_name,
                        users: list,
                        org=None,
                        vdc=None):
        if not cluster_id:
            cluster_id = self.get_cluster_id_by_name(cluster_name, org, vdc)

        # Get acl entry ids for users
        org_href = self._client.get_org_by_name(org).get('href')
        name_to_id: dict = client_utils.create_user_name_to_id_dict(
            self._client, set(users), org_href)
        users_ids: set = {user_id for _, user_id in name_to_id.items()}
        logger_wire = logger.CLIENT_WIRE_LOGGER \
            if utils.str_to_bool(
                os.getenv(cli_constants.ENV_CSE_CLIENT_WIRE_LOGGING)
            )\
            else logger.NULL_LOGGER
        acl_svc = cluster_acl_svc.ClusterACLService(
            cluster_id=cluster_id,
            client=self._client,
            logger_debug=logger.CLIENT_LOGGER,
            logger_wire=logger_wire)
        delete_acl_ids = []
        for acl_entry in acl_svc.list_def_entity_acl_entries():
            if acl_entry.memberId in users_ids:
                delete_acl_ids.append(acl_entry.id)
                users_ids.remove(acl_entry.memberId)

        if len(users_ids) > 0:
            org_user_id_to_name_dict = \
                vcd_utils.create_org_user_id_to_name_dict(self._client, org)
            missing_users = [
                org_user_id_to_name_dict[user_id] for user_id in users_ids
            ]  # noqa: E501
            raise Exception(f'Cluster {cluster_name or cluster_id} is not '
                            f'currently shared with: {missing_users}')

        # Delete cluster acl entries
        for acl_id in delete_acl_ids:
            acl_svc.unshare_def_entity(acl_id)
Beispiel #20
0
def is_tkg_plus_enabled(config: Optional[ServerConfig] = None) -> bool:
    """
    Check if TKG plus is enabled by the provider in the config.

    :param ServerConfig config: configuration provided by the user.

    :return: whether TKG+ is enabled or not.
    :rtype: bool
    """
    if not config:
        try:
            config = get_server_runtime_config()
        except Exception:
            return False
    try:
        tkg_plus_enabled = config.get_value_at('service.enable_tkg_plus')
    except KeyError:
        return False
    if isinstance(tkg_plus_enabled, bool):
        return tkg_plus_enabled
    elif isinstance(tkg_plus_enabled, str):
        return utils.str_to_bool(tkg_plus_enabled)
    return False
Beispiel #21
0
def get_sys_admin_client():
    server_config = get_server_runtime_config()
    if not server_config['vcd']['verify']:
        SERVER_LOGGER.warning("InsecureRequestWarning: Unverified HTTPS "
                              "request is being made. Adding certificate "
                              "verification is strongly advised.")
        requests.packages.urllib3.disable_warnings()
    log_filename = None
    log_wire = str_to_bool(server_config['service'].get('log_wire'))
    if log_wire:
        log_filename = SERVER_DEBUG_WIRELOG_FILEPATH
    client = vcd_client.Client(uri=server_config['vcd']['host'],
                               api_version=server_config['vcd']['api_version'],
                               verify_ssl_certs=server_config['vcd']['verify'],
                               log_file=log_filename,
                               log_requests=log_wire,
                               log_headers=log_wire,
                               log_bodies=log_wire)
    credentials = vcd_client.BasicLoginCredentials(
        server_config['vcd']['username'], SYSTEM_ORG_NAME,
        server_config['vcd']['password'])
    client.set_credentials(credentials)
    return client
Beispiel #22
0
def is_no_vc_communication_mode(
        config: Optional[ServerConfig] = None) -> bool:  # noqa: E501
    """Check if TKGm only mode is enabled by the provider in the config.

    :param ServerConfig config: configuration provided by the user.

    :return: whether TKGm only mode is enabled or not.
    :rtype: bool
    """
    if not config:
        try:
            config = get_server_runtime_config()
        except Exception:
            return False
    try:
        is_no_vc_comm = config.get_value_at('service.no_vc_communication_mode')
    except KeyError:
        return False

    if isinstance(is_no_vc_comm, bool):
        return is_no_vc_comm
    elif isinstance(is_no_vc_comm, str):
        return utils.str_to_bool(is_no_vc_comm)
    return False
def get_validated_config(
        config_file_name,
        pks_config_file_name=None,
        skip_config_decryption=False,
        decryption_password=None,
        log_wire_file=None,
        logger_debug=NULL_LOGGER,
        msg_update_callback=NullPrinter()
):
    """Get the config file as a dictionary and check for validity.

    Ensures that all properties exist and all values are the expected type.
    Checks that AMQP connection is available, and vCD/VCs are valid.
    Does not guarantee that CSE has been installed according to this
    config file. Additionally populates certain key-value pairs in the
    config dict to avoid repeated computation of those e.g.
    supported api versions, feature flags, RDE version in use etc.

    :param str config_file_name: path to config file.
    :param str pks_config_file_name: path to PKS config file.
    :param bool skip_config_decryption: do not decrypt the config file.
    :param str decryption_password: password to decrypt the config file.
    :param str log_wire_file: log_wire_file to use if needed to wire log
        pyvcloud requests and responses
    :param logging.Logger logger_debug: logger to log with.
    :param utils.ConsoleMessagePrinter msg_update_callback: Callback object.

    :return: CSE config

    :rtype: dict

    :raises KeyError: if config file has missing or extra properties.
    :raises TypeError: if the value type for a config file property
        is incorrect.
    :raises container_service_extension.exceptions.AmqpConnectionError:
        (when not using MQTT) if AMQP connection failed (host, password, port,
        username, vhost is invalid).
    :raises requests.exceptions.ConnectionError: if 'vcd' 'host' is invalid.
    :raises pyvcloud.vcd.exceptions.VcdException: if 'vcd' 'username' or
        'password' is invalid.
    :raises pyVmomi.vim.fault.InvalidLogin: if 'vcs' 'username' or 'password'
        is invalid.
    """
    check_file_permissions(config_file_name,
                           msg_update_callback=msg_update_callback)
    if skip_config_decryption:
        with open(config_file_name) as config_file:
            config = yaml.safe_load(config_file) or {}
    else:
        msg_update_callback.info(
            f"Decrypting '{config_file_name}'")
        try:
            config = yaml.safe_load(
                get_decrypted_file_contents(
                    config_file_name,
                    decryption_password
                )
            ) or {}
        except cryptography.fernet.InvalidToken:
            raise Exception(CONFIG_DECRYPTION_ERROR_MSG)

    msg_update_callback.info(
        f"Validating config file '{config_file_name}'"
    )

    is_no_vc_communication_mode = \
        server_utils.is_no_vc_communication_mode(ServerConfig(config))

    use_mqtt = server_utils.should_use_mqtt_protocol(ServerConfig(config))
    sample_message_queue_config = SAMPLE_AMQP_CONFIG if not use_mqtt \
        else SAMPLE_MQTT_CONFIG

    # This allows us to compare top-level config keys and value types
    sample_config = {
        **sample_message_queue_config,
        **SAMPLE_VCD_CONFIG,
        **SAMPLE_SERVICE_CONFIG,
        **SAMPLE_BROKER_CONFIG
    }
    if not is_no_vc_communication_mode:
        sample_config.update(SAMPLE_VCS_CONFIG)
    else:
        if 'vcs' in config:
            del config['vcs']

    log_wire = str_to_bool(config.get('service', {}).get('log_wire'))
    nsxt_wire_logger = NULL_LOGGER
    if not log_wire:
        log_wire_file = None
        nsxt_wire_logger = SERVER_NSXT_WIRE_LOGGER

    check_keys_and_value_types(
        config,
        sample_config,
        location='config file',
        msg_update_callback=msg_update_callback
    )
    # MQTT validation not required because no MQTT host, exchange, etc.
    # is needed in the config file since the server code creates and
    # registers the MQTT extension directly using server constants
    if not use_mqtt:
        _validate_amqp_config(config['amqp'], msg_update_callback)

    # Validation of service properties is done first as those properties are
    # used in broker validation.
    check_keys_and_value_types(
        config['service'],
        SAMPLE_SERVICE_CONFIG['service'],
        location="config file 'service' section",
        excluded_keys=['log_wire'],
        msg_update_callback=msg_update_callback
    )

    try:
        if is_no_vc_communication_mode:
            _validate_vcd_config(
                config['vcd'],
                msg_update_callback,
                log_file=log_wire_file,
                log_wire=log_wire
            )
        else:
            _validate_vcd_and_vcs_config(
                config['vcd'],
                config['vcs'],
                msg_update_callback,
                log_file=log_wire_file,
                log_wire=log_wire
            )
    except vim.fault.InvalidLogin:
        raise Exception(VCENTER_LOGIN_ERROR_MSG)
    except requests.exceptions.SSLError as err:
        raise Exception(f"SSL verification failed: {str(err)}")
    except requests.exceptions.ConnectionError as err:
        raise Exception(f"Cannot connect to {err.request.url}.")

    check_keys_and_value_types(
        config['service']['telemetry'],
        SAMPLE_SERVICE_CONFIG['service']['telemetry'],
        location="config file 'service->telemetry' section",
        msg_update_callback=msg_update_callback
    )

    _validate_broker_config(
        config['broker'],
        legacy_mode=config['service']['legacy_mode'],
        msg_update_callback=msg_update_callback,
        logger_debug=logger_debug
    )

    config = add_additional_details_to_config(
        config=config,
        vcd_host=config['vcd']['host'],
        vcd_username=config['vcd']['username'],
        vcd_password=config['vcd']['password'],
        verify_ssl=config['vcd']['verify'],
        is_legacy_mode=config['service']['legacy_mode'],
        is_mqtt_exchange=server_utils.should_use_mqtt_protocol(
            ServerConfig(config)
        ),
        log_wire=log_wire,
        log_wire_file=log_wire_file
    )
    _raise_error_if_amqp_not_supported(
        use_mqtt,
        config['service']['default_api_version'],
        logger=logger_debug
    )

    msg_update_callback.general(
        f"Config file '{config_file_name}' is valid"
    )

    if pks_config_file_name:
        check_file_permissions(pks_config_file_name,
                               msg_update_callback=msg_update_callback)
        if skip_config_decryption:
            with open(pks_config_file_name) as f:
                pks_config = yaml.safe_load(f) or {}
        else:
            msg_update_callback.info(
                f"Decrypting '{pks_config_file_name}'")
            pks_config = yaml.safe_load(
                get_decrypted_file_contents(pks_config_file_name,
                                            decryption_password)) or {}
        msg_update_callback.info(
            f"Validating PKS config file '{pks_config_file_name}'")
        _validate_pks_config_structure(pks_config, msg_update_callback)
        try:
            _validate_pks_config_data_integrity(pks_config,
                                                msg_update_callback,
                                                logger_debug=logger_debug,
                                                logger_wire=nsxt_wire_logger)
        except requests.exceptions.SSLError as err:
            raise Exception(f"SSL verification failed: {str(err)}")

        msg_update_callback.general(
            f"PKS Config file '{pks_config_file_name}' is valid")
        config['pks_config'] = pks_config
    else:
        config['pks_config'] = None

    return config
def add_additional_details_to_config(
    config: Dict,
    vcd_host: str,
    vcd_username: str,
    vcd_password: str,
    verify_ssl: bool,
    is_legacy_mode: bool,
    is_mqtt_exchange: bool,
    log_wire: bool,
    log_wire_file: str
):
    """Update config dict with computed key-value pairs.

    :param dict config:
    :param str vcd_host:
    :param str vcd_username:
    :param str vcd_password:
    :param bool verify_ssl:
    :param bool is_legacy_mode:
    :param bool is_mqtt_exchange:
    :param bool log_wire:
    :param str log_wire_file:

    :return: the updated config file
    :rtype: dict
    """
    # Compute common supported api versions by the CSE server and vCD
    sysadmin_client = None
    try:
        sysadmin_client = Client(
            vcd_host,
            verify_ssl_certs=verify_ssl,
            log_file=log_wire_file,
            log_requests=log_wire,
            log_headers=log_wire,
            log_bodies=log_wire
        )
        sysadmin_client.set_credentials(
            BasicLoginCredentials(
                vcd_username,
                SYSTEM_ORG_NAME,
                vcd_password
            )
        )

        vcd_supported_api_versions = \
            set(sysadmin_client.get_supported_versions_list())
        cse_supported_api_versions = set(SUPPORTED_VCD_API_VERSIONS)
        common_supported_api_versions = \
            list(cse_supported_api_versions.intersection(vcd_supported_api_versions))  # noqa: E501
        common_supported_api_versions.sort()

        if is_legacy_mode:
            common_supported_api_versions = \
                [x for x in common_supported_api_versions
                 if VCDApiVersion(x) < VcdApiVersionObj.VERSION_35.value]
        else:
            common_supported_api_versions = \
                [x for x in common_supported_api_versions
                 if VCDApiVersion(x) >= VcdApiVersionObj.VERSION_35.value]
        config['service']['supported_api_versions'] = \
            common_supported_api_versions
    finally:
        if sysadmin_client:
            sysadmin_client.logout()

    # Convert legacy_mode flag in service_section to corresponding
    # feature flags
    if 'feature_flags' not in config:
        config['feature_flags'] = {}
    config['feature_flags']['legacy_api'] = str_to_bool(is_legacy_mode)
    config['feature_flags']['non_legacy_api'] = \
        not str_to_bool(is_legacy_mode)

    # Compute the default api version as the max supported version
    # Also compute the RDE version in use
    max_vcd_api_version_supported: str = get_max_api_version(config['service']['supported_api_versions'])  # noqa: E501
    config['service']['default_api_version'] = max_vcd_api_version_supported
    config['service']['rde_version_in_use'] = semantic_version.Version(
        rde_utils.get_runtime_rde_version_by_vcd_api_version(
            max_vcd_api_version_supported
        )
    )

    # Update the config dict with telemetry specific key value pairs
    update_with_telemetry_settings(
        config_dict=config,
        vcd_host=vcd_host,
        vcd_username=vcd_username,
        vcd_password=vcd_password,
        verify_ssl=verify_ssl,
        is_mqtt_exchange=is_mqtt_exchange
    )

    return config
import requests

import container_service_extension.client.constants as cli_constants
from container_service_extension.common.constants.shared_constants import ERROR_DESCRIPTION_KEY  # noqa: E501
from container_service_extension.common.constants.shared_constants import ERROR_MINOR_CODE_KEY  # noqa: E501
from container_service_extension.common.constants.shared_constants import RESPONSE_MESSAGE_KEY  # noqa: E501
from container_service_extension.common.constants.shared_constants import UNKNOWN_ERROR_MESSAGE  # noqa: E501
from container_service_extension.common.utils.core_utils import str_to_bool
from container_service_extension.exception.exceptions import CseResponseError
from container_service_extension.exception.minor_error_codes import MinorErrorCode  # noqa: E501
from container_service_extension.logging.logger import CLIENT_WIRE_LOGGER
from container_service_extension.logging.logger import NULL_LOGGER

wire_logger = NULL_LOGGER
if str_to_bool(os.getenv(cli_constants.ENV_CSE_CLIENT_WIRE_LOGGING)):
    wire_logger = CLIENT_WIRE_LOGGER


def process_response(response):
    """Process the given response dictionary with following keys.

    Log the response if wire logging is enabled.

    If the value of status code is 2xx, return the response content, else
    raise exception with proper error message

    :param requests.models.Response response: object with attributes viz.
        status code and content
        status_code: http status code
        content: response result as string
Beispiel #26
0
    def _load_template_definition_from_catalog(
        self, msg_update_callback=utils.NullPrinter()):
        # NOTE: If `enable_tkg_plus` in the config file is set to false,
        # CSE server will skip loading the TKG+ template this will prevent
        # users from performing TKG+ related operations.
        msg = "Loading k8s template definition from catalog"
        logger.SERVER_LOGGER.info(msg)
        msg_update_callback.general_no_color(msg)

        client = None
        try:
            log_filename = None
            log_wire = \
                utils.str_to_bool(self.config['service'].get('log_wire'))
            if log_wire:
                log_filename = logger.SERVER_DEBUG_WIRELOG_FILEPATH

            # Since the config param has been read from file by
            # get_validated_config method, we can safely use the
            # default_api_version key, it will be set to the highest api
            # version supported by VCD and CSE.
            client = Client(
                self.config['vcd']['host'],
                api_version=self.config['service']['default_api_version'],
                verify_ssl_certs=self.config['vcd']['verify'],
                log_file=log_filename,
                log_requests=log_wire,
                log_headers=log_wire,
                log_bodies=log_wire)
            credentials = BasicLoginCredentials(
                self.config['vcd']['username'],
                shared_constants.SYSTEM_ORG_NAME,  # noqa: E501
                self.config['vcd']['password'])
            client.set_credentials(credentials)

            is_tkg_plus_enabled = server_utils.is_tkg_plus_enabled(self.config)
            legacy_mode = self.config['service']['legacy_mode']
            org_name = self.config['broker']['org']
            catalog_name = self.config['broker']['catalog']
            k8_templates = ltm.get_valid_k8s_local_template_definition(
                client=client,
                catalog_name=catalog_name,
                org_name=org_name,
                legacy_mode=legacy_mode,
                is_tkg_plus_enabled=is_tkg_plus_enabled,
                logger_debug=logger.SERVER_LOGGER,
                msg_update_callback=msg_update_callback)

            if not k8_templates:
                msg = "No valid K8 templates were found in catalog " \
                      f"'{catalog_name}'. Unable to start CSE server."
                msg_update_callback.error(msg)
                logger.SERVER_LOGGER.error(msg)
                sys.exit(1)

            # Check that default k8s template exists in vCD at the correct
            # revision
            default_template_name = \
                self.config['broker']['default_template_name']
            default_template_revision = \
                str(self.config['broker']['default_template_revision'])
            found_default_template = False
            for template in k8_templates:
                if str(template[server_constants.LocalTemplateKey.REVISION]) == default_template_revision and \
                        template[server_constants.LocalTemplateKey.NAME] == default_template_name:  # noqa: E501
                    found_default_template = True

            if not found_default_template:
                msg = f"Default template {default_template_name} with " \
                      f"revision {default_template_revision} not found." \
                      " Unable to start CSE server."
                msg_update_callback.error(msg)
                logger.SERVER_LOGGER.error(msg)
                sys.exit(1)

            self.config['broker']['templates'] = k8_templates
        finally:
            if client:
                client.logout()
Beispiel #27
0
    def _load_def_schema(self, msg_update_callback=utils.NullPrinter()):
        """Load cluster interface and cluster entity type to global context.

        If defined entity framework is supported by vCD api version, load
        defined entity interface and defined entity type registered during
        server install

        :param utils.NullMessagePrinter msg_update_callback:
        """
        sysadmin_client = None
        try:
            sysadmin_client = vcd_utils.get_sys_admin_client(api_version=None)
            logger_wire = logger.NULL_LOGGER
            if utils.str_to_bool(
                    utils.str_to_bool(self.config['service'].get(
                        'log_wire', False))):  # noqa: E501
                logger_wire = logger.SERVER_CLOUDAPI_WIRE_LOGGER

            cloudapi_client = \
                vcd_utils.get_cloudapi_client_from_vcd_client(sysadmin_client,
                                                              logger.SERVER_LOGGER,  # noqa: E501
                                                              logger_wire)
            raise_error_if_def_not_supported(cloudapi_client)

            server_rde_version = server_utils.get_rde_version_in_use()
            msg_update_callback.general(
                f"Using RDE version: {server_rde_version}")  # noqa: E501

            schema_svc = def_schema_svc.DefSchemaService(cloudapi_client)
            def_metadata_dict: dict = def_utils.get_rde_metadata(
                server_rde_version)  # noqa: E501
            entity_type: common_models.DefEntityType = \
                def_metadata_dict[def_constants.RDEMetadataKey.ENTITY_TYPE]  # noqa: E501
            interfaces: List[common_models.DefInterface] = \
                def_metadata_dict[def_constants.RDEMetadataKey.INTERFACES]  # noqa: E501

            for interface in interfaces:
                # TODO change _kubernetesInterface to an array once additional
                # interface for CSE is added.
                self._kubernetesInterface = \
                    schema_svc.get_interface(interface.get_id())

            self._nativeEntityType = \
                schema_svc.get_entity_type(entity_type.get_id())

            msg = f"Successfully loaded defined entity schema " \
                  f"{entity_type.get_id()} to global context"
            msg_update_callback.general(msg)
            logger.SERVER_LOGGER.debug(msg)
        except cse_exception.DefNotSupportedException:
            msg = "Skipping initialization of defined entity type" \
                  " and defined entity interface"
            msg_update_callback.info(msg)
            logger.SERVER_LOGGER.debug(msg)
        except cse_exception.DefSchemaServiceError as e:
            msg = f"Error while loading defined entity schema: {e.error_message}"  # noqa: E501
            msg_update_callback.error(msg)
            logger.SERVER_LOGGER.debug(msg)
            raise
        except Exception as e:
            msg = f"Failed to load defined entity schema to global context: {str(e)}"  # noqa: E501
            msg_update_callback.error(msg)
            logger.SERVER_LOGGER.error(msg)
            raise
        finally:
            if sysadmin_client:
                sysadmin_client.logout()
Beispiel #28
0
    vcd_client.ApiVersion.VERSION_34.value: {
        cli_constants.GroupKey.CLUSTER: {
            cli_constants.CommandNameKey.CREATE: ['sizing_class'],
            cli_constants.CommandNameKey.DELETE: ['k8_runtime', 'cluster_id'],
            cli_constants.CommandNameKey.INFO: ['k8_runtime', 'cluster_id'],
            cli_constants.CommandNameKey.UPGRADE: ['k8_runtime'],
            cli_constants.CommandNameKey.UPGRADE_PLAN: ['k8_runtime'],
            cli_constants.CommandNameKey.CONFIG: ['k8_runtime', 'cluster_id']
        }
    },
    vcd_client.ApiVersion.VERSION_35.value: {
        cli_constants.GroupKey.CLUSTER: {
            cli_constants.CommandNameKey.CREATE: ['cpu', 'memory']
        },
        cli_constants.GroupKey.OVDC: {
            cli_constants.CommandNameKey.ENABLE: [] if str_to_bool(
                os.getenv(cli_constants.ENV_CSE_TKG_PLUS_ENABLED)) else
            ['enable_tkg_plus'],  # noqa: E501
            cli_constants.CommandNameKey.DISABLE: [] if str_to_bool(
                os.getenv(cli_constants.ENV_CSE_TKG_PLUS_ENABLED)) else
            ['disable_tkg_plus']  # noqa: E501
        }
    }
}

UNSUPPORTED_COMMANDS_WITH_SERVER_NOT_RUNNING_BY_VERSION = {
    vcd_client.ApiVersion.VERSION_35.value: [
        cli_constants.GroupKey.VERSION, cli_constants.GroupKey.OVDC,
        cli_constants.GroupKey.SYSTEM, cli_constants.GroupKey.TEMPLATE,
        cli_constants.GroupKey.PKS
    ]
}
def ovdc_compute_policy_update(request_data, op_ctx: ctx.OperationContext):
    """Request handler for ovdc compute-policy update operation.

    Required data: ovdc_id, compute_policy_action, compute_policy_names

    :return: Dictionary with task href.
    """
    required = [
        RequestKey.OVDC_ID, RequestKey.COMPUTE_POLICY_ACTION,
        RequestKey.COMPUTE_POLICY_NAME
    ]
    defaults = {
        RequestKey.REMOVE_COMPUTE_POLICY_FROM_VMS: False,
    }
    validated_data = {**defaults, **request_data}
    req_utils.validate_payload(validated_data, required)

    action = validated_data[RequestKey.COMPUTE_POLICY_ACTION]
    cp_name = validated_data[RequestKey.COMPUTE_POLICY_NAME]
    ovdc_id = validated_data[RequestKey.OVDC_ID]
    remove_compute_policy_from_vms = validated_data[
        RequestKey.REMOVE_COMPUTE_POLICY_FROM_VMS]  # noqa: E501
    try:
        config = server_utils.get_server_runtime_config()
        cpm = compute_policy_manager.ComputePolicyManager(
            op_ctx.sysadmin_client,
            log_wire=utils.str_to_bool(
                config['service'].get('log_wire')))  # noqa: E501
        cp_href = None
        cp_id = None
        if cp_name == SYSTEM_DEFAULT_COMPUTE_POLICY_NAME:
            for _cp in cpm.list_compute_policies_on_vdc(ovdc_id):
                if _cp['name'] == cp_name:
                    cp_href = _cp['href']
                    cp_id = _cp['id']
        else:
            try:
                _cp = compute_policy_manager.get_cse_vdc_compute_policy(
                    cpm, cp_name)  # noqa: E501
                cp_href = _cp['href']
                cp_id = _cp['id']
            except vcd_e.EntityNotFoundException:
                pass

        if cp_href is None:
            raise e.BadRequestError(f"Compute policy '{cp_name}' not found.")

        if action == ComputePolicyAction.ADD:
            cpm.add_compute_policy_to_vdc(ovdc_id, cp_href)
            # Record telemetry data
            record_user_action(CseOperation.OVDC_COMPUTE_POLICY_ADD)
            return f"Added compute policy '{cp_name}' ({cp_id}) to ovdc " \
                   f"({ovdc_id})"

        if action == ComputePolicyAction.REMOVE:
            # TODO: fix remove_compute_policy by implementing a proper way
            # for calling async methods without having to pass op_ctx
            # outside handlers.
            op_ctx.is_async = True
            response = cpm.remove_vdc_compute_policy_from_vdc(
                ovdc_id, cp_href, force=remove_compute_policy_from_vms)
            # Follow task_href to completion in a different thread and end
            # operation context
            _follow_task(op_ctx, response['task_href'], ovdc_id)
            # Record telemetry data
            record_user_action(CseOperation.OVDC_COMPUTE_POLICY_REMOVE)
            return response

        raise e.BadRequestError("Unsupported compute policy action")

    except Exception as err:
        # Record telemetry data failure`
        if action == ComputePolicyAction.ADD:
            record_user_action(CseOperation.OVDC_COMPUTE_POLICY_ADD,
                               status=OperationStatus.FAILED)
        elif action == ComputePolicyAction.REMOVE:
            record_user_action(CseOperation.OVDC_COMPUTE_POLICY_REMOVE,
                               status=OperationStatus.FAILED)
        raise err
Beispiel #30
0
def _update_ovdc_using_placement_policy_async(
        operation_context: ctx.OperationContext,  # noqa: E501
        task: vcd_task.Task,
        task_href,
        user_href,
        policy_list,
        ovdc_id,
        vdc,
        org_name,
        remove_cp_from_vms_on_disable=False):  # noqa: E501
    """Enable ovdc using placement policies.

    :param ctx.OperationContext operation_context: operation context object
    :param vcd_task.Task task: Task resource to track progress
    :param str task_href: href of the task
    :param str user_href:
    :param List[str] policy_list: The new list of policies associated with
        the ovdc
    :param str ovdc_id:
    :param pyvcloud.vcd.vdc.VDC vdc: VDC object
    :param str org_name: name of the organization that vdc provides resource
    :param bool remove_cp_from_vms_on_disable: Set to true if placement
        policies need to be removed from the vms before removing from the VDC.
    """
    operation_name = "Update OVDC with placement policies"
    k8s_runtimes_added = ''
    k8s_runtimes_deleted = ''
    try:
        config = server_utils.get_server_runtime_config()
        log_wire = utils.str_to_bool(config.get('service', {}).get('log_wire'))
        cpm = compute_policy_manager.ComputePolicyManager(
            operation_context.sysadmin_client, log_wire=log_wire)
        existing_policies = []
        for cse_policy in \
                compute_policy_manager.list_cse_placement_policies_on_vdc(cpm, ovdc_id):  # noqa: E501
            existing_policies.append(cse_policy['display_name'])

        logger.SERVER_LOGGER.debug(policy_list)
        logger.SERVER_LOGGER.debug(existing_policies)
        policies_to_add = set(policy_list) - set(existing_policies)
        policies_to_delete = set(existing_policies) - set(policy_list)

        # Telemetry for 'vcd cse ovdc enable' command
        # TODO: Update telemetry request to handle 'k8s_runtime' array
        k8s_runtimes_added = ','.join(policies_to_add)
        if k8s_runtimes_added:
            cse_params = {
                RequestKey.K8S_PROVIDER:
                k8s_runtimes_added,
                RequestKey.OVDC_ID:
                ovdc_id,
                RequestKey.ORG_NAME:
                org_name,
                PayloadKey.SOURCE_DESCRIPTION:
                thread_local_data.get_thread_local_data(
                    ThreadLocalData.USER_AGENT)  # noqa: E501
            }
            telemetry_handler.record_user_action_details(
                cse_operation=CseOperation.OVDC_ENABLE,  # noqa: E501
                cse_params=cse_params)

        # Telemetry for 'vcd cse ovdc enable' command
        # TODO: Update telemetry request to handle 'k8s_runtime' array
        k8s_runtimes_deleted = '.'.join(policies_to_delete)
        if k8s_runtimes_deleted:
            cse_params = {
                RequestKey.K8S_PROVIDER:
                k8s_runtimes_deleted,
                RequestKey.OVDC_ID:
                ovdc_id,
                RequestKey.ORG_NAME:
                org_name,
                RequestKey.REMOVE_COMPUTE_POLICY_FROM_VMS:
                remove_cp_from_vms_on_disable,  # noqa: E501
                PayloadKey.SOURCE_DESCRIPTION:
                thread_local_data.get_thread_local_data(
                    ThreadLocalData.USER_AGENT)  # noqa: E501
            }
            telemetry_handler.record_user_action_details(
                cse_operation=CseOperation.OVDC_DISABLE,  # noqa: E501
                cse_params=cse_params)

        for cp_name in policies_to_add:
            msg = f"Adding k8s provider {cp_name} to OVDC {vdc.name}"
            logger.SERVER_LOGGER.debug(msg)
            task.update(status=vcd_client.TaskStatus.RUNNING.value,
                        namespace='vcloud.cse',
                        operation=msg,
                        operation_name=operation_name,
                        details='',
                        progress=None,
                        owner_href=vdc.href,
                        owner_name=vdc.name,
                        owner_type=vcd_client.EntityType.VDC.value,
                        user_href=user_href,
                        user_name=operation_context.user.name,
                        task_href=task_href,
                        org_href=operation_context.user.org_href)
            policy = compute_policy_manager.get_cse_vdc_compute_policy(
                cpm, cp_name, is_placement_policy=True)
            cpm.add_compute_policy_to_vdc(vdc_id=ovdc_id,
                                          compute_policy_href=policy['href'])

        for cp_name in policies_to_delete:
            msg = f"Removing k8s provider {RUNTIME_INTERNAL_NAME_TO_DISPLAY_NAME_MAP[cp_name]} from OVDC {ovdc_id}"  # noqa: E501
            logger.SERVER_LOGGER.debug(msg)
            task_resource = \
                task.update(status=vcd_client.TaskStatus.RUNNING.value,
                            namespace='vcloud.cse',
                            operation=msg,
                            operation_name=operation_name,
                            details='',
                            progress=None,
                            owner_href=vdc.href,
                            owner_name=vdc.name,
                            owner_type=vcd_client.EntityType.VDC.value,
                            user_href=user_href,
                            user_name=operation_context.user.name,
                            task_href=task_href,
                            org_href=operation_context.user.org_href)
            policy = compute_policy_manager.get_cse_vdc_compute_policy(
                cpm, cp_name, is_placement_policy=True)  # noqa: E501
            cpm.remove_compute_policy_from_vdc_sync(
                vdc=vdc,
                compute_policy_href=policy['href'],  # noqa: E501
                force=remove_cp_from_vms_on_disable,  # noqa: E501
                is_placement_policy=True,
                task_resource=task_resource)  # noqa: E501
        msg = f"Successfully updated OVDC: {vdc.name}"
        logger.SERVER_LOGGER.debug(msg)
        task.update(status=vcd_client.TaskStatus.SUCCESS.value,
                    namespace='vcloud.cse',
                    operation="Operation success",
                    operation_name=operation_name,
                    details=msg,
                    progress=None,
                    owner_href=vdc.href,
                    owner_name=vdc.name,
                    owner_type=vcd_client.EntityType.VDC.value,
                    user_href=user_href,
                    user_name=operation_context.user.name,
                    task_href=task_href,
                    org_href=operation_context.user.org_href)
        # Record telemetry
        if k8s_runtimes_added:
            telemetry_handler.record_user_action(
                CseOperation.OVDC_ENABLE,
                status=OperationStatus.SUCCESS)  # noqa: E501
        if k8s_runtimes_deleted:
            telemetry_handler.record_user_action(
                CseOperation.OVDC_DISABLE,
                status=OperationStatus.SUCCESS)  # noqa: E501
    except Exception as err:
        # Record telemetry
        if k8s_runtimes_added:
            telemetry_handler.record_user_action(CseOperation.OVDC_ENABLE,
                                                 status=OperationStatus.FAILED)
        if k8s_runtimes_deleted:
            telemetry_handler.record_user_action(CseOperation.OVDC_DISABLE,
                                                 status=OperationStatus.FAILED)
        logger.SERVER_LOGGER.error(err)
        task.update(status=vcd_client.TaskStatus.ERROR.value,
                    namespace='vcloud.cse',
                    operation='Failed to update OVDC',
                    operation_name=operation_name,
                    details=f'Failed with error: {err}',
                    progress=None,
                    owner_href=vdc.href,
                    owner_name=vdc.name,
                    owner_type=vcd_client.EntityType.VDC.value,
                    user_href=user_href,
                    user_name=operation_context.user.name,
                    task_href=task_href,
                    org_href=operation_context.user.org_href,
                    error_message=f"{err}")
    finally:
        if operation_context.sysadmin_client:
            operation_context.end()