Exemple #1
0
    def _load_def_schema(self, msg_update_callback=utils.NullPrinter()):
        """Load cluster interface and cluster entity type to global context.

        If defined entity framework is supported by vCD api version, load
        defined entity interface and defined entity type registered during
        server install

        :param utils.ConsoleMessagePrinter msg_update_callback
        """
        sysadmin_client = None
        try:
            sysadmin_client = vcd_utils.get_sys_admin_client()
            logger_wire = logger.NULL_LOGGER
            if utils.str_to_bool(
                    utils.str_to_bool(self.config['service'].get(
                        'log_wire', False))):  # noqa: E501
                logger_wire = logger.SERVER_CLOUDAPI_WIRE_LOGGER

            cloudapi_client = \
                vcd_utils.get_cloudapi_client_from_vcd_client(sysadmin_client,
                                                              logger.SERVER_LOGGER, # noqa: E501
                                                              logger_wire)
            raise_error_if_def_not_supported(cloudapi_client)
            schema_svc = def_schema_svc.DefSchemaService(cloudapi_client)
            defKey = def_utils.DefKey
            keys_map = def_utils.MAP_API_VERSION_TO_KEYS[float(
                sysadmin_client.get_api_version())]  # noqa: E501
            interface_id = def_utils.generate_interface_id(
                vendor=keys_map[defKey.VENDOR],  # noqa: E501
                nss=keys_map[defKey.INTERFACE_NSS],  # noqa: E501
                version=keys_map[defKey.INTERFACE_VERSION])  # noqa: E501
            entity_type_id = def_utils.generate_entity_type_id(
                vendor=keys_map[defKey.VENDOR],  # noqa: E501
                nss=keys_map[defKey.ENTITY_TYPE_NSS],  # noqa: E501
                version=keys_map[defKey.ENTITY_TYPE_VERSION])  # noqa: E501
            self._nativeInterface = schema_svc.get_interface(interface_id)
            self._nativeEntityType = schema_svc.get_entity_type(entity_type_id)
            msg = "Successfully loaded defined entity schema to global context"
            msg_update_callback.general(msg)
            logger.SERVER_LOGGER.debug(msg)
        except cse_exception.DefNotSupportedException:
            msg = "Skipping initialization of defined entity type" \
                  " and defined entity interface"
            msg_update_callback.info(msg)
            logger.SERVER_LOGGER.debug(msg)
        except cse_exception.DefSchemaServiceError as e:
            msg = f"Error while loading defined entity schema: {e.minor_error_code}"  # noqa: E501
            msg_update_callback.error(msg)
            logger.SERVER_LOGGER.debug(msg)
            raise e
        except Exception as e:
            msg = f"Failed to load defined entity schema to global context: {str(e)}"  # noqa: E501
            msg_update_callback.error(e)
            logger.SERVER_LOGGER.error(e)
            raise (e)
        finally:
            if sysadmin_client:
                sysadmin_client.logout()
    def __init__(self, pks_ctx, op_ctx: ctx.OperationContext):
        """Initialize PKS broker.

        :param dict pks_ctx: A dictionary with which should atleast have the
            following keys in it ['username', 'secret', 'host', 'port',
            'uaac_port'], 'proxy' and 'pks_compute_profile_name' are optional
            keys. Currently all callers of this method is using ovdc cache
            (subject to change) to initialize PKS broker.
        """
        self.context: ctx.OperationContext = None
        # populates above attributes
        super().__init__(op_ctx)

        if not pks_ctx:
            raise ValueError(
                "PKS context is required to establish connection to PKS")

        self.username = pks_ctx['username']
        self.secret = pks_ctx['secret']
        self.pks_host_uri = f"https://{pks_ctx['host']}:{pks_ctx['port']}"
        self.uaac_uri = f"https://{pks_ctx['host']}:{pks_ctx['uaac_port']}"
        self.proxy_uri = None
        if pks_ctx.get('proxy'):
            self.proxy_uri = f"http://{pks_ctx['proxy']}:80"
        self.compute_profile = pks_ctx.get(PKS_COMPUTE_PROFILE_KEY, None)
        self.nsxt_server = \
            utils.get_pks_cache().get_nsxt_info(pks_ctx.get('vc'))
        self.nsxt_client = None
        self.pks_wire_logger = NULL_LOGGER
        nsxt_wire_logger = NULL_LOGGER
        config = utils.get_server_runtime_config()
        if utils.str_to_bool(config['service'].get('log_wire')):
            nsxt_wire_logger = SERVER_NSXT_WIRE_LOGGER
            self.pks_wire_logger = SERVER_PKS_WIRE_LOGGER
        if self.nsxt_server:
            self.nsxt_client = NSXTClient(
                host=self.nsxt_server.get('host'),
                username=self.nsxt_server.get('username'),
                password=self.nsxt_server.get('password'),
                logger_debug=SERVER_LOGGER,
                logger_wire=nsxt_wire_logger,
                http_proxy=self.nsxt_server.get('proxy'),
                https_proxy=self.nsxt_server.get('proxy'),
                verify_ssl=self.nsxt_server.get('verify'))
        # TODO() Add support in pyvcloud to send metadata values with their
        # types intact.
        verify_ssl = pks_ctx.get('verify')
        self.verify = True
        if isinstance(verify_ssl, bool):
            self.verify = verify_ssl
        elif isinstance(verify_ssl, str):
            self.verify = utils.str_to_bool(verify_ssl)

        self.pks_client = self._get_pks_client(self._get_token())
Exemple #3
0
def get_sys_admin_client():
    server_config = get_server_runtime_config()
    if not server_config['vcd']['verify']:
        SERVER_LOGGER.warning("InsecureRequestWarning: Unverified HTTPS "
                              "request is being made. Adding certificate "
                              "verification is strongly advised.")
        requests.packages.urllib3.disable_warnings()
    log_filename = None
    log_wire = str_to_bool(server_config['service'].get('log_wire'))
    if log_wire:
        log_filename = SERVER_DEBUG_WIRELOG_FILEPATH
    client = vcd_client.Client(
        uri=server_config['vcd']['host'],
        api_version=server_config['vcd']['api_version'],
        verify_ssl_certs=server_config['vcd']['verify'],
        log_file=log_filename,
        log_requests=log_wire,
        log_headers=log_wire,
        log_bodies=log_wire)
    credentials = vcd_client.BasicLoginCredentials(
        server_config['vcd']['username'],
        SYSTEM_ORG_NAME,
        server_config['vcd']['password'])
    client.set_credentials(credentials)
    return client
def list_ovdc(operation_context: ctx.OperationContext) -> List[dict]:
    """List all ovdc and their k8s runtimes.

    :param ctx.OperationContext operation_context: context for the request
    :return: list of dictionary containing details about the ovdc
    :rtype: List[dict]
    """
    # NOTE: For CSE 3.0, if `enable_tkg_plus` flag in config is set to false,
    # Prevent showing information about TKG+ by skipping TKG+ from the result.
    # Record telemetry
    telemetry_handler.record_user_action_details(cse_operation=CseOperation.OVDC_LIST, # noqa: E501
                                                 cse_params={})

    ovdcs = []
    org_vdcs = vcd_utils.get_all_ovdcs(operation_context.client)
    for ovdc in org_vdcs:
        ovdc_name = ovdc.get('name')
        config = utils.get_server_runtime_config()
        log_wire = utils.str_to_bool(config.get('service', {}).get('log_wire'))
        ovdc_id = vcd_utils.extract_id(ovdc.get('id'))
        ovdc_details = asdict(
            get_ovdc_k8s_runtime_details(operation_context.sysadmin_client,
                                         ovdc_id=ovdc_id,
                                         ovdc_name=ovdc_name,
                                         log_wire=log_wire))
        if ClusterEntityKind.TKG_PLUS.value in ovdc_details['k8s_runtime'] \
                and not utils.is_tkg_plus_enabled():  # noqa: E501
            ovdc_details['k8s_runtime'].remove(ClusterEntityKind.TKG_PLUS.value)  # noqa: E501
        # TODO: Find a better way to remove remove_cp_from_vms_on_disable
        del ovdc_details['remove_cp_from_vms_on_disable']
        ovdcs.append(ovdc_details)
    return ovdcs
def get_ovdc(operation_context: ctx.OperationContext, ovdc_id: str) -> dict:
    """Get ovdc info for a particular ovdc.

    :param ctx.OperationContext operation_context: context for the request
    :param str ovdc_id: ID of the ovdc
    :return: dictionary containing the ovdc information
    :rtype: dict
    """
    # NOTE: For CSE 3.0, if `enable_tkg_plus` flag in config is set to false,
    # Prevent showing information about TKG+ by skipping TKG+ from the result.
    cse_params = {
        RequestKey.OVDC_ID: ovdc_id
    }
    telemetry_handler.record_user_action_details(cse_operation=CseOperation.OVDC_INFO, # noqa: E501
                                                 cse_params=cse_params)
    config = utils.get_server_runtime_config()
    log_wire = utils.str_to_bool(config.get('service', {}).get('log_wire'))
    result = asdict(get_ovdc_k8s_runtime_details(operation_context.sysadmin_client, # noqa: E501
                                                 ovdc_id=ovdc_id,
                                                 log_wire=log_wire))
    # TODO: Find a better way to avoid sending remove_cp_from_vms_on_disable
    # flag
    if ClusterEntityKind.TKG_PLUS.value in result['k8s_runtime'] \
            and not utils.is_tkg_plus_enabled():
        result['k8s_runtime'].remove(ClusterEntityKind.TKG_PLUS.value)
    del result['remove_cp_from_vms_on_disable']
    return result
def ovdc_compute_policy_list(request_data, op_ctx: ctx.OperationContext):
    """Request handler for ovdc compute-policy list operation.

    Required data: ovdc_id

    :return: Dictionary with task href.
    """
    required = [RequestKey.OVDC_ID]
    req_utils.validate_payload(request_data, required)

    config = utils.get_server_runtime_config()
    cpm = compute_policy_manager.ComputePolicyManager(
        op_ctx.sysadmin_client,
        log_wire=utils.str_to_bool(config['service'].get('log_wire')))
    compute_policies = []
    for cp in \
            compute_policy_manager.list_cse_sizing_policies_on_vdc(
                cpm,
                request_data[RequestKey.OVDC_ID]):
        policy = {
            'name': cp['display_name'],
            'id': cp['id'],
            'href': cp['href']
        }
        compute_policies.append(policy)
    return compute_policies
    def __init__(self, pks_ctx, tenant_auth_token):
        """Initialize PKS broker.

        :param dict pks_ctx: A dictionary with which should atleast have the
            following keys in it ['username', 'secret', 'host', 'port',
            'uaac_port'], 'proxy' and 'pks_compute_profile_name' are optional
            keys. Currently all callers of this method is using ovdc cache
            (subject to change) to initialize PKS broker.
        """
        self.tenant_client = None
        self.client_session = None
        self.tenant_user_name = None
        self.tenant_user_id = None
        self.tenant_org_name = None
        self.tenant_org_href = None
        # populates above attributes
        super().__init__(tenant_auth_token)

        if not pks_ctx:
            raise ValueError(
                "PKS context is required to establish connection to PKS")

        self.username = pks_ctx['username']
        self.secret = pks_ctx['secret']
        self.pks_host_uri = f"https://{pks_ctx['host']}:{pks_ctx['port']}"
        self.uaac_uri = f"https://{pks_ctx['host']}:{pks_ctx['uaac_port']}"
        self.proxy_uri = None
        if pks_ctx.get('proxy'):
            self.proxy_uri = f"http://{pks_ctx['proxy']}:80"
        self.compute_profile = pks_ctx.get(PKS_COMPUTE_PROFILE_KEY, None)
        self.nsxt_server = \
            utils.get_pks_cache().get_nsxt_info(pks_ctx.get('vc'))
        self.nsxt_client = None
        if self.nsxt_server:
            self.nsxt_client = NSXTClient(
                host=self.nsxt_server.get('host'),
                username=self.nsxt_server.get('username'),
                password=self.nsxt_server.get('password'),
                http_proxy=self.nsxt_server.get('proxy'),
                https_proxy=self.nsxt_server.get('proxy'),
                verify_ssl=self.nsxt_server.get('verify'),
                log_requests=True,
                log_headers=True,
                log_body=True)
        # TODO() Add support in pyvcloud to send metadata values with their
        # types intact.
        verify_ssl = pks_ctx.get('verify')
        self.verify = True
        if isinstance(verify_ssl, bool):
            self.verify = verify_ssl
        elif isinstance(verify_ssl, str):
            self.verify = utils.str_to_bool(verify_ssl)

        token = self._get_token()
        self.client_v1 = self._get_pks_client(token, self.VERSION_V1)
        self.client_v1beta = self._get_pks_client(token, self.VERSION_V1BETA)
Exemple #8
0
def ovdc_list(request_data, tenant_auth_token):
    """Request handler for ovdc list operation.

    :return: List of dictionaries with org VDC k8s provider metadata.
    """
    client, _ = vcd_utils.connect_vcd_user_via_token(tenant_auth_token)
    list_pks_plans = utils.str_to_bool(request_data[RequestKey.LIST_PKS_PLANS])

    return ovdc_utils.get_ovdc_list(client,
                                    list_pks_plans=list_pks_plans,
                                    tenant_auth_token=tenant_auth_token)
Exemple #9
0
    def invoke(self, op):
        """Handle ovdc related operations.

        :param CseOperation op: Operation to be performed on the ovdc.

        :return result: result of the operation.

        :rtype: dict
        """
        result = {}

        if op == CseOperation.OVDC_UPDATE:
            ovdc_id = self.req_spec.get(RequestKey.OVDC_ID)
            org_name = self.req_spec.get(RequestKey.ORG_NAME)
            pks_plans = self.req_spec.get(RequestKey.PKS_PLAN_NAME)
            pks_cluster_domain = self.req_spec.get(
                RequestKey.PKS_CLUSTER_DOMAIN)  # noqa: E501
            container_provider = self.req_spec.get(RequestKey.K8S_PROVIDER)

            ctr_prov_ctx = construct_ctr_prov_ctx_from_pks_cache(
                ovdc_id=ovdc_id,
                org_name=org_name,
                pks_plans=pks_plans,
                pks_cluster_domain=pks_cluster_domain,
                container_provider=container_provider)

            if container_provider == K8sProviders.PKS:
                if is_pks_enabled():
                    create_pks_compute_profile(ctr_prov_ctx,
                                               self.tenant_auth_token,
                                               self.req_spec)
                else:
                    raise CseServerError(
                        'CSE is not configured to work with PKS.')

            task = OvdcManager().set_ovdc_container_provider_metadata(
                ovdc_id=ovdc_id,
                container_prov_data=ctr_prov_ctx,
                container_provider=container_provider)

            result = {'task_href': task.get('href')}
        elif op == CseOperation.OVDC_INFO:
            ovdc_id = self.req_spec.get(RequestKey.OVDC_ID)
            result = OvdcManager().get_ovdc_container_provider_metadata(
                ovdc_id=ovdc_id)
        elif op == CseOperation.OVDC_LIST:
            list_pks_plans = str_to_bool(
                self.req_spec.get(RequestKey.LIST_PKS_PLANS))  # noqa: E501
            result = self._list_ovdcs(list_pks_plans=list_pks_plans)

        return result
def ovdc_list(request_data, tenant_auth_token):
    """Request handler for ovdc list operation.

    :return: List of dictionaries with org VDC k8s provider metadata.
    """
    defaults = {RequestKey.LIST_PKS_PLANS: False}
    validated_data = {**defaults, **request_data}

    client, _ = vcd_utils.connect_vcd_user_via_token(tenant_auth_token)
    # TODO check if this is needed
    list_pks_plans = utils.str_to_bool(
        validated_data[RequestKey.LIST_PKS_PLANS])  # noqa: E501

    return ovdc_utils.get_ovdc_list(client,
                                    list_pks_plans=list_pks_plans,
                                    tenant_auth_token=tenant_auth_token)
Exemple #11
0
def ovdc_compute_policy_list(request_data,
                             request_context: ctx.RequestContext):
    """Request handler for ovdc compute-policy list operation.

    Required data: ovdc_id

    :return: Dictionary with task href.
    """
    required = [RequestKey.OVDC_ID]
    req_utils.validate_payload(request_data, required)

    config = utils.get_server_runtime_config()
    cpm = compute_policy_manager.ComputePolicyManager(
        request_context.sysadmin_client,
        log_wire=utils.str_to_bool(config['service'].get('log_wire')))
    return cpm.list_compute_policies_on_vdc(request_data[RequestKey.OVDC_ID])
def connect_vcd_user_via_token(tenant_auth_token):
    server_config = get_server_runtime_config()
    vcd_uri = server_config['vcd']['host']
    version = server_config['vcd']['api_version']
    verify_ssl_certs = server_config['vcd']['verify']
    log_filename = None
    log_wire = str_to_bool(server_config['service'].get('log_wire'))
    if log_wire:
        log_filename = SERVER_DEBUG_WIRELOG_FILEPATH
    client_tenant = Client(uri=vcd_uri,
                           api_version=version,
                           verify_ssl_certs=verify_ssl_certs,
                           log_file=log_filename,
                           log_requests=log_wire,
                           log_headers=log_wire,
                           log_bodies=log_wire)
    session = client_tenant.rehydrate_from_token(tenant_auth_token)
    return (client_tenant, session)
def ovdc_list(request_data, tenant_auth_token, is_jwt_token):
    """Request handler for ovdc list operation.

    :return: List of dictionaries with org VDC k8s provider metadata.
    """
    defaults = {
        RequestKey.LIST_PKS_PLANS: False
    }
    validated_data = {**defaults, **request_data}

    client = vcd_utils.connect_vcd_user_via_token(
        tenant_auth_token, is_jwt_token)
    # TODO check if this is needed
    list_pks_plans = utils.str_to_bool(validated_data[RequestKey.LIST_PKS_PLANS]) # noqa: E501

    # Record telemetry data
    cse_params = copy.deepcopy(validated_data)
    cse_params[RequestKey.LIST_PKS_PLANS] = list_pks_plans
    record_user_action_details(cse_operation=CseOperation.OVDC_LIST, cse_params=cse_params)  # noqa: E501

    return ovdc_utils.get_ovdc_list(client, list_pks_plans=list_pks_plans,
                                    tenant_auth_token=tenant_auth_token,
                                    is_jwt_token=is_jwt_token)
Exemple #14
0
    def _load_template_definition_from_catalog(
        self, msg_update_callback=utils.NullPrinter()):  # noqa: E501
        msg = "Loading k8s template definition from catalog"
        logger.SERVER_LOGGER.info(msg)
        msg_update_callback.general_no_color(msg)

        client = None
        try:
            log_filename = None
            log_wire = \
                utils.str_to_bool(self.config['service'].get('log_wire'))
            if log_wire:
                log_filename = logger.SERVER_DEBUG_WIRELOG_FILEPATH

            client = Client(self.config['vcd']['host'],
                            api_version=self.config['vcd']['api_version'],
                            verify_ssl_certs=self.config['vcd']['verify'],
                            log_file=log_filename,
                            log_requests=log_wire,
                            log_headers=log_wire,
                            log_bodies=log_wire)
            credentials = BasicLoginCredentials(self.config['vcd']['username'],
                                                SYSTEM_ORG_NAME,
                                                self.config['vcd']['password'])
            client.set_credentials(credentials)

            org_name = self.config['broker']['org']
            catalog_name = self.config['broker']['catalog']
            k8_templates = ltm.get_all_k8s_local_template_definition(
                client=client, catalog_name=catalog_name, org_name=org_name)

            if not k8_templates:
                msg = "No valid K8 templates were found in catalog " \
                      f"'{catalog_name}'. Unable to start CSE server."
                msg_update_callback.error(msg)
                logger.SERVER_LOGGER.error(msg)
                sys.exit(1)

            # Check that default k8s template exists in vCD at the correct
            # revision
            default_template_name = \
                self.config['broker']['default_template_name']
            default_template_revision = \
                str(self.config['broker']['default_template_revision'])
            found_default_template = False
            for template in k8_templates:
                if str(template[LocalTemplateKey.REVISION]
                       ) == default_template_revision and template[
                           LocalTemplateKey.
                           NAME] == default_template_name:  # noqa: E501
                    found_default_template = True

                msg = f"Found K8 template '{template['name']}' at revision " \
                      f"{template['revision']} in catalog '{catalog_name}'"
                msg_update_callback.general(msg)
                logger.SERVER_LOGGER.info(msg)

            if not found_default_template:
                msg = f"Default template {default_template_name} with " \
                      f"revision {default_template_revision} not found." \
                      " Unable to start CSE server."
                msg_update_callback.error(msg)
                logger.SERVER_LOGGER.error(msg)
                sys.exit(1)

            self.config['broker']['templates'] = k8_templates
        finally:
            if client:
                client.logout()
    def _process_template_compute_policy_compliance(self,
                                                    msg_update_callback=None):
        msg = "Processing compute policy for k8s templates."
        LOGGER.info(msg)
        if msg_update_callback:
            msg_update_callback.general_no_color(msg)

        log_filename = None
        log_wire = str_to_bool(self.config['service'].get('log_wire'))
        if log_wire:
            log_filename = SERVER_DEBUG_WIRELOG_FILEPATH

        org_name = self.config['broker']['org']
        catalog_name = self.config['broker']['catalog']
        api_version = self.config['vcd']['api_version']
        client = None
        try:
            if float(api_version) >= float(
                    ApiVersion.VERSION_32.value):  # noqa: E501
                # TODO this api version 32 client should be removed once
                # vcd can handle cp removal/replacement on version 33
                client = Client(self.config['vcd']['host'],
                                api_version=ApiVersion.VERSION_32.value,
                                verify_ssl_certs=self.config['vcd']['verify'],
                                log_file=log_filename,
                                log_requests=log_wire,
                                log_headers=log_wire,
                                log_bodies=log_wire)
            else:
                client = Client(self.config['vcd']['host'],
                                api_version=api_version,
                                verify_ssl_certs=self.config['vcd']['verify'],
                                log_file=log_filename,
                                log_requests=log_wire,
                                log_headers=log_wire,
                                log_bodies=log_wire)

            credentials = BasicLoginCredentials(self.config['vcd']['username'],
                                                SYSTEM_ORG_NAME,
                                                self.config['vcd']['password'])
            client.set_credentials(credentials)
            cpm = ComputePolicyManager(client)

            try:
                for template in self.config['broker']['templates']:
                    policy_name = template[LocalTemplateKey.COMPUTE_POLICY]
                    catalog_item_name = template[
                        LocalTemplateKey.CATALOG_ITEM_NAME]  # noqa: E501
                    # if policy name is not empty, stamp it on the template
                    if policy_name:
                        policy = cpm.get_policy(policy_name=policy_name)
                        # create the policy if not present in system
                        if not policy:
                            msg = "Creating missing compute policy " \
                                f"'{policy_name}'."
                            if msg_update_callback:
                                msg_update_callback.info(msg)
                            LOGGER.debug(msg)
                            policy = cpm.add_policy(policy_name=policy_name)

                        msg = f"Assigning compute policy '{policy_name}' to " \
                              f"template '{catalog_item_name}'."
                        if msg_update_callback:
                            msg_update_callback.general(msg)
                        LOGGER.debug(msg)
                        cpm.assign_compute_policy_to_vapp_template_vms(
                            compute_policy_href=policy['href'],
                            org_name=org_name,
                            catalog_name=catalog_name,
                            catalog_item_name=catalog_item_name)
                    else:
                        # empty policy name means we should remove policy from
                        # template
                        msg = f"Removing compute policy from template " \
                              f"'{catalog_item_name}'."
                        if msg_update_callback:
                            msg_update_callback.general(msg)
                        LOGGER.debug(msg)

                        cpm.remove_all_compute_policies_from_vapp_template_vms(
                            org_name=org_name,
                            catalog_name=catalog_name,
                            catalog_item_name=catalog_item_name)
            except OperationNotSupportedException:
                msg = "Compute policy not supported by vCD. Skipping " \
                    "assigning/removing it to/from templates."
                if msg_update_callback:
                    msg_update_callback.info(msg)
                LOGGER.debug(msg)
        finally:
            if client:
                client.logout()
def check_cse_installation(config, msg_update_callback=None):
    """Ensure that CSE is installed on vCD according to the config file.

    Checks,
        1. AMQP exchange exists
        2. CSE is registered with vCD,
        3. CSE K8 catalog exists

    :param dict config: config yaml file as a dictionary
    :param utils.ConsoleMessagePrinter msg_update_callback: Callback object
        that writes messages onto console.

    :raises Exception: if CSE is not registered to vCD as an extension, or if
        specified catalog does not exist, or if specified template(s) do not
        exist.
    """
    if msg_update_callback:
        msg_update_callback.info(
            "Validating CSE installation according to config file")
    err_msgs = []
    client = None
    try:
        log_filename = None
        log_wire = str_to_bool(config['service'].get('log_wire'))
        if log_wire:
            log_filename = SERVER_DEBUG_WIRELOG_FILEPATH

        client = Client(config['vcd']['host'],
                        api_version=config['vcd']['api_version'],
                        verify_ssl_certs=config['vcd']['verify'],
                        log_file=log_filename,
                        log_requests=log_wire,
                        log_headers=log_wire,
                        log_bodies=log_wire)
        credentials = BasicLoginCredentials(config['vcd']['username'],
                                            SYSTEM_ORG_NAME,
                                            config['vcd']['password'])
        client.set_credentials(credentials)

        # check that AMQP exchange exists
        amqp = config['amqp']
        credentials = pika.PlainCredentials(amqp['username'], amqp['password'])
        parameters = pika.ConnectionParameters(amqp['host'],
                                               amqp['port'],
                                               amqp['vhost'],
                                               credentials,
                                               ssl=amqp['ssl'],
                                               connection_attempts=3,
                                               retry_delay=2,
                                               socket_timeout=5)
        connection = None
        try:
            connection = pika.BlockingConnection(parameters)
            channel = connection.channel()
            try:
                channel.exchange_declare(exchange=amqp['exchange'],
                                         exchange_type=EXCHANGE_TYPE,
                                         durable=True,
                                         passive=True,
                                         auto_delete=False)
                if msg_update_callback:
                    msg_update_callback.general(
                        f"AMQP exchange '{amqp['exchange']}' exists")
            except pika.exceptions.ChannelClosed:
                msg = f"AMQP exchange '{amqp['exchange']}' does not exist"
                if msg_update_callback:
                    msg_update_callback.error(msg)
                err_msgs.append(msg)
        except Exception:  # TODO() replace raw exception with specific
            msg = f"Could not connect to AMQP exchange '{amqp['exchange']}'"
            if msg_update_callback:
                msg_update_callback.error(msg)
            err_msgs.append(msg)
        finally:
            if connection is not None:
                connection.close()

        # check that CSE is registered to vCD correctly
        ext = APIExtension(client)
        try:
            cse_info = ext.get_extension(CSE_SERVICE_NAME,
                                         namespace=CSE_SERVICE_NAMESPACE)
            rkey_matches = cse_info['routingKey'] == amqp['routing_key']
            exchange_matches = cse_info['exchange'] == amqp['exchange']
            if not rkey_matches or not exchange_matches:
                msg = "CSE is registered as an extension, but the extension " \
                      "settings on vCD are not the same as config settings."
                if not rkey_matches:
                    msg += f"\nvCD-CSE routing key: {cse_info['routingKey']}" \
                           f"\nCSE config routing key: {amqp['routing_key']}"
                if not exchange_matches:
                    msg += f"\nvCD-CSE exchange: {cse_info['exchange']}" \
                           f"\nCSE config exchange: {amqp['exchange']}"
                if msg_update_callback:
                    msg_update_callback.info(msg)
                err_msgs.append(msg)
            if cse_info['enabled'] == 'true':
                if msg_update_callback:
                    msg_update_callback.general(
                        "CSE on vCD is currently enabled")
            else:
                if msg_update_callback:
                    msg_update_callback.info(
                        "CSE on vCD is currently disabled")
        except MissingRecordException:
            msg = "CSE is not registered to vCD"
            if msg_update_callback:
                msg_update_callback.error(msg)
            err_msgs.append(msg)

        # check that catalog exists in vCD
        org_name = config['broker']['org']
        org = get_org(client, org_name=org_name)
        catalog_name = config['broker']['catalog']
        if catalog_exists(org, catalog_name):
            if msg_update_callback:
                msg_update_callback.general(f"Found catalog '{catalog_name}'")
        else:
            msg = f"Catalog '{catalog_name}' not found"
            if msg_update_callback:
                msg_update_callback.error(msg)
            err_msgs.append(msg)
    finally:
        if client:
            client.logout()

    if err_msgs:
        raise Exception(err_msgs)
    if msg_update_callback:
        msg_update_callback.general("CSE installation is valid")
def install_template(template_name,
                     template_revision,
                     config_file_name,
                     force_create,
                     retain_temp_vapp,
                     ssh_key,
                     skip_config_decryption=False,
                     decryption_password=None,
                     msg_update_callback=None):
    """Install a particular template in CSE.

    If template_name and revision are wild carded to *, all templates defined
    in remote template cookbook will be installed.

    :param str template_name:
    :param str template_revision:
    :param str config_file_name: config file name.
    :param bool force_create: if True and template already exists in vCD,
        overwrites existing template.
    :param str ssh_key: public ssh key to place into template vApp(s).
    :param bool retain_temp_vapp: if True, temporary vApp will not destroyed,
        so the user can ssh into and debug the vm.
    :param bool skip_config_decryption: do not decrypt the config file.
    :param str decryption_password: password to decrypt the config file.
    :param utils.ConsoleMessagePrinter msg_update_callback: Callback object
        that writes messages onto console.
    """
    configure_install_logger()

    config = get_validated_config(
        config_file_name,
        skip_config_decryption=skip_config_decryption,
        decryption_password=decryption_password,
        msg_update_callback=msg_update_callback)
    populate_vsphere_list(config['vcs'])

    msg = f"Installing template '{template_name}' at revision " \
          f"'{template_revision}' on vCloud Director using config file " \
          f"'{config_file_name}'"
    if msg_update_callback:
        msg_update_callback.info(msg)
    LOGGER.info(msg)

    client = None
    try:
        log_filename = None
        log_wire = str_to_bool(config['service'].get('log_wire'))
        if log_wire:
            log_filename = INSTALL_WIRELOG_FILEPATH

        client = Client(config['vcd']['host'],
                        api_version=config['vcd']['api_version'],
                        verify_ssl_certs=config['vcd']['verify'],
                        log_file=log_filename,
                        log_requests=log_wire,
                        log_headers=log_wire,
                        log_bodies=log_wire)
        credentials = BasicLoginCredentials(config['vcd']['username'],
                                            SYSTEM_ORG_NAME,
                                            config['vcd']['password'])
        client.set_credentials(credentials)
        msg = f"Connected to vCD as system administrator: " \
              f"{config['vcd']['host']}:{config['vcd']['port']}"
        if msg_update_callback:
            msg_update_callback.general(msg)
        LOGGER.info(msg)

        # read remote template cookbook
        rtm = RemoteTemplateManager(
            remote_template_cookbook_url=config['broker']
            ['remote_template_cookbook_url'],  # noqa: E501
            logger=LOGGER,
            msg_update_callback=msg_update_callback)
        remote_template_cookbook = rtm.get_remote_template_cookbook()

        found_template = False
        for template in remote_template_cookbook['templates']:
            template_name_matched = template_name in (
                template[RemoteTemplateKey.NAME], '*')  # noqa: E501
            template_revision_matched = str(template_revision) in (str(
                template[RemoteTemplateKey.REVISION]), '*')  # noqa: E501
            if template_name_matched and template_revision_matched:
                found_template = True
                _install_template(
                    client=client,
                    remote_template_manager=rtm,
                    template=template,
                    org_name=config['broker']['org'],
                    vdc_name=config['broker']['vdc'],
                    catalog_name=config['broker']['catalog'],
                    network_name=config['broker']['network'],
                    ip_allocation_mode=config['broker']['ip_allocation_mode'],
                    storage_profile=config['broker']['storage_profile'],
                    force_update=force_create,
                    retain_temp_vapp=retain_temp_vapp,
                    ssh_key=ssh_key,
                    msg_update_callback=msg_update_callback)

        if not found_template:
            msg = f"Template '{template_name}' at revision " \
                  f"'{template_revision}' not found in remote template " \
                  "cookbook."
            if msg_update_callback:
                msg_update_callback.error(msg)
            LOGGER.error(msg, exc_info=True)
    except Exception:
        if msg_update_callback:
            msg_update_callback.error(
                "Template Installation Error. Check CSE install logs")
        LOGGER.error("Template Installation Error", exc_info=True)
    finally:
        if client is not None:
            client.logout()
Exemple #18
0
def get_validated_config(config_file_name,
                         pks_config_file_name=None,
                         skip_config_decryption=False,
                         decryption_password=None,
                         log_wire_file=None,
                         logger_debug=NULL_LOGGER,
                         msg_update_callback=NullPrinter()):
    """Get the config file as a dictionary and check for validity.

    Ensures that all properties exist and all values are the expected type.
    Checks that AMQP connection is available, and vCD/VCs are valid.
    Does not guarantee that CSE has been installed according to this
    config file.

    :param str config_file_name: path to config file.
    :param str pks_config_file_name: path to PKS config file.
    :param bool skip_config_decryption: do not decrypt the config file.
    :param str decryption_password: password to decrypt the config file.
    :param str log_wire_file: log_wire_file to use if needed to wire log
        pyvcloud requests and responses
    :param logging.Logger logger: logger to log with.
    :param utils.ConsoleMessagePrinter msg_update_callback: Callback object.

    :return: CSE config

    :rtype: dict

    :raises KeyError: if config file has missing or extra properties.
    :raises TypeError: if the value type for a config file property
        is incorrect.
    :raises container_service_extension.exceptions.AmqpConnectionError: if
        AMQP connection failed (host, password, port, username,
        vhost is invalid).
    :raises pyvcloud.vcd.exceptions.NotAcceptableException: if 'vcd'
        'api_version' is unsupported.
    :raises requests.exceptions.ConnectionError: if 'vcd' 'host' is invalid.
    :raises pyvcloud.vcd.exceptions.VcdException: if 'vcd' 'username' or
        'password' is invalid.
    :raises pyVmomi.vim.fault.InvalidLogin: if 'vcs' 'username' or 'password'
        is invalid.
    """
    check_file_permissions(config_file_name,
                           msg_update_callback=msg_update_callback)
    if skip_config_decryption:
        with open(config_file_name) as config_file:
            config = yaml.safe_load(config_file) or {}
    else:
        msg_update_callback.info(
            f"Decrypting '{config_file_name}'")
        config = yaml.safe_load(
            get_decrypted_file_contents(config_file_name,
                                        decryption_password)) or {}

    msg_update_callback.info(
        f"Validating config file '{config_file_name}'")
    # This allows us to compare top-level config keys and value types
    sample_config = {
        **SAMPLE_AMQP_CONFIG, **SAMPLE_VCD_CONFIG,
        **SAMPLE_VCS_CONFIG, **SAMPLE_SERVICE_CONFIG,
        **SAMPLE_BROKER_CONFIG
    }
    log_wire = str_to_bool(config.get('service', {}).get('log_wire'))
    nsxt_wire_logger = NULL_LOGGER
    if not log_wire:
        log_wire_file = None
        nsxt_wire_logger = SERVER_NSXT_WIRE_LOGGER
    check_keys_and_value_types(config, sample_config, location='config file',
                               msg_update_callback=msg_update_callback)
    _validate_amqp_config(config['amqp'], msg_update_callback)
    _validate_vcd_and_vcs_config(config['vcd'], config['vcs'],
                                 msg_update_callback,
                                 log_file=log_wire_file,
                                 log_wire=log_wire)
    _validate_broker_config(config['broker'], msg_update_callback,
                            logger_debug)
    check_keys_and_value_types(config['service'],
                               SAMPLE_SERVICE_CONFIG['service'],
                               location="config file 'service' section",
                               excluded_keys=['log_wire'],
                               msg_update_callback=msg_update_callback)
    check_keys_and_value_types(config['service']['telemetry'],
                               SAMPLE_SERVICE_CONFIG['service']['telemetry'],
                               location="config file 'service->telemetry' "
                                        "section",
                               msg_update_callback=msg_update_callback)
    msg_update_callback.general(
        f"Config file '{config_file_name}' is valid")
    if pks_config_file_name:
        check_file_permissions(pks_config_file_name,
                               msg_update_callback=msg_update_callback)
        if skip_config_decryption:
            with open(pks_config_file_name) as f:
                pks_config = yaml.safe_load(f) or {}
        else:
            msg_update_callback.info(
                f"Decrypting '{pks_config_file_name}'")
            pks_config = yaml.safe_load(
                get_decrypted_file_contents(pks_config_file_name,
                                            decryption_password)) or {}
        msg_update_callback.info(
            f"Validating PKS config file '{pks_config_file_name}'")
        _validate_pks_config_structure(pks_config, msg_update_callback)
        _validate_pks_config_data_integrity(pks_config,
                                            msg_update_callback,
                                            logger_debug=logger_debug,
                                            logger_wire=nsxt_wire_logger)
        msg_update_callback.general(
            f"PKS Config file '{pks_config_file_name}' is valid")
        config['pks_config'] = pks_config
    else:
        config['pks_config'] = None

    # Store telemetry instance id, url and collector id in config
    store_telemetry_settings(config)

    return config
Exemple #19
0
def install_template(template_name,
                     template_revision,
                     config_file_name,
                     force_create,
                     retain_temp_vapp,
                     ssh_key,
                     skip_config_decryption=False,
                     decryption_password=None,
                     msg_update_callback=utils.NullPrinter()):
    """Install a particular template in CSE.

    If template_name and revision are wild carded to *, all templates defined
    in remote template cookbook will be installed.

    :param str template_name:
    :param str template_revision:
    :param str config_file_name: config file name.
    :param bool force_create: if True and template already exists in vCD,
        overwrites existing template.
    :param str ssh_key: public ssh key to place into template vApp(s).
    :param bool retain_temp_vapp: if True, temporary vApp will not destroyed,
        so the user can ssh into and debug the vm.
    :param bool skip_config_decryption: do not decrypt the config file.
    :param str decryption_password: password to decrypt the config file.
    :param utils.ConsoleMessagePrinter msg_update_callback: Callback object.
    """
    config = get_validated_config(
        config_file_name,
        skip_config_decryption=skip_config_decryption,
        decryption_password=decryption_password,
        log_wire_file=INSTALL_WIRELOG_FILEPATH,
        logger_debug=INSTALL_LOGGER,
        msg_update_callback=msg_update_callback)

    populate_vsphere_list(config['vcs'])

    msg = f"Installing template '{template_name}' at revision " \
          f"'{template_revision}' on vCloud Director using config file " \
          f"'{config_file_name}'"
    msg_update_callback.info(msg)
    INSTALL_LOGGER.info(msg)

    client = None
    try:
        # Telemetry data construction
        cse_params = {
            PayloadKey.TEMPLATE_NAME: template_name,
            PayloadKey.TEMPLATE_REVISION: template_revision,
            PayloadKey.WAS_DECRYPTION_SKIPPED: bool(skip_config_decryption),
            PayloadKey.WERE_TEMPLATES_FORCE_UPDATED: bool(force_create),
            PayloadKey.WAS_TEMP_VAPP_RETAINED: bool(retain_temp_vapp),
            PayloadKey.WAS_SSH_KEY_SPECIFIED: bool(ssh_key)
        }
        # Record telemetry data
        record_user_action_details(
            cse_operation=CseOperation.TEMPLATE_INSTALL,
            cse_params=cse_params,
            telemetry_settings=config['service']['telemetry'])

        log_filename = None
        log_wire = utils.str_to_bool(config['service'].get('log_wire'))
        if log_wire:
            log_filename = INSTALL_WIRELOG_FILEPATH

        client = Client(config['vcd']['host'],
                        api_version=config['vcd']['api_version'],
                        verify_ssl_certs=config['vcd']['verify'],
                        log_file=log_filename,
                        log_requests=log_wire,
                        log_headers=log_wire,
                        log_bodies=log_wire)
        credentials = BasicLoginCredentials(config['vcd']['username'],
                                            server_constants.SYSTEM_ORG_NAME,
                                            config['vcd']['password'])
        client.set_credentials(credentials)
        msg = f"Connected to vCD as system administrator: " \
              f"{config['vcd']['host']}:{config['vcd']['port']}"
        msg_update_callback.general(msg)
        INSTALL_LOGGER.info(msg)

        # read remote template cookbook
        rtm = RemoteTemplateManager(
            remote_template_cookbook_url=config['broker']
            ['remote_template_cookbook_url'],  # noqa: E501
            logger=INSTALL_LOGGER,
            msg_update_callback=msg_update_callback)
        remote_template_cookbook = rtm.get_remote_template_cookbook()

        found_template = False
        for template in remote_template_cookbook['templates']:
            template_name_matched = template_name in (
                template[server_constants.RemoteTemplateKey.NAME], '*'
            )  # noqa: E501
            template_revision_matched = \
                str(template_revision) in (str(template[server_constants.RemoteTemplateKey.REVISION]), '*') # noqa: E501
            if template_name_matched and template_revision_matched:
                found_template = True
                _install_template(
                    client=client,
                    remote_template_manager=rtm,
                    template=template,
                    org_name=config['broker']['org'],
                    vdc_name=config['broker']['vdc'],
                    catalog_name=config['broker']['catalog'],
                    network_name=config['broker']['network'],
                    ip_allocation_mode=config['broker']['ip_allocation_mode'],
                    storage_profile=config['broker']['storage_profile'],
                    force_update=force_create,
                    retain_temp_vapp=retain_temp_vapp,
                    ssh_key=ssh_key,
                    msg_update_callback=msg_update_callback)

        if not found_template:
            msg = f"Template '{template_name}' at revision " \
                  f"'{template_revision}' not found in remote template " \
                  "cookbook."
            msg_update_callback.error(msg)
            INSTALL_LOGGER.error(msg, exc_info=True)
            raise Exception(msg)

        # Record telemetry data on successful template install
        record_user_action(
            cse_operation=CseOperation.TEMPLATE_INSTALL,
            status=OperationStatus.SUCCESS,
            telemetry_settings=config['service']['telemetry'])  # noqa: E501
    except Exception:
        msg_update_callback.error(
            "Template Installation Error. Check CSE install logs")
        INSTALL_LOGGER.error("Template Installation Error", exc_info=True)

        # Record telemetry data on template install failure
        record_user_action(cse_operation=CseOperation.TEMPLATE_INSTALL,
                           status=OperationStatus.FAILED,
                           telemetry_settings=config['service']['telemetry'])
    finally:
        if client is not None:
            client.logout()
Exemple #20
0
    def _load_template_definition_from_catalog(self,
                                               msg_update_callback=utils.NullPrinter()): # noqa: E501
        # NOTE: If `enable_tkg_plus` in the config file is set to false,
        # CSE server will skip loading the TKG+ template this will prevent
        # users from performing TKG+ related operations.
        msg = "Loading k8s template definition from catalog"
        logger.SERVER_LOGGER.info(msg)
        msg_update_callback.general_no_color(msg)

        client = None
        try:
            log_filename = None
            log_wire = \
                utils.str_to_bool(self.config['service'].get('log_wire'))
            if log_wire:
                log_filename = logger.SERVER_DEBUG_WIRELOG_FILEPATH

            client = Client(self.config['vcd']['host'],
                            api_version=self.config['vcd']['api_version'],
                            verify_ssl_certs=self.config['vcd']['verify'],
                            log_file=log_filename,
                            log_requests=log_wire,
                            log_headers=log_wire,
                            log_bodies=log_wire)
            credentials = BasicLoginCredentials(self.config['vcd']['username'],
                                                server_constants.SYSTEM_ORG_NAME,  # noqa: E501
                                                self.config['vcd']['password'])
            client.set_credentials(credentials)

            is_tkg_plus_enabled = utils.is_tkg_plus_enabled(self.config)
            org_name = self.config['broker']['org']
            catalog_name = self.config['broker']['catalog']
            k8_templates = ltm.get_all_k8s_local_template_definition(
                client=client, catalog_name=catalog_name, org_name=org_name,
                logger_debug=logger.SERVER_LOGGER)

            if not k8_templates:
                msg = "No valid K8 templates were found in catalog " \
                      f"'{catalog_name}'. Unable to start CSE server."
                msg_update_callback.error(msg)
                logger.SERVER_LOGGER.error(msg)
                sys.exit(1)

            # Check that default k8s template exists in vCD at the correct
            # revision
            default_template_name = \
                self.config['broker']['default_template_name']
            default_template_revision = \
                str(self.config['broker']['default_template_revision'])
            found_default_template = False
            for template in k8_templates:
                api_version = float(client.get_api_version())
                if api_version >= float(vCDApiVersion.VERSION_35.value) and \
                        template[server_constants.LocalTemplateKey.KIND] == \
                        shared_constants.ClusterEntityKind.TKG_PLUS.value and \
                        not is_tkg_plus_enabled:
                    # TKG+ is not enabled on CSE config. Skip the template and
                    # log the relevant information.
                    msg = "Skipping loading template data for " \
                          f"'{template[server_constants.LocalTemplateKey.NAME]}' as " \
                          "TKG+ is not enabled"  # noqa: E501
                    logger.SERVER_LOGGER.debug(msg)
                    k8_templates.remove(template)
                    continue
                if str(template[server_constants.LocalTemplateKey.REVISION]) == default_template_revision and \
                        template[server_constants.LocalTemplateKey.NAME] == default_template_name: # noqa: E501
                    found_default_template = True

                msg = f"Found K8 template '{template['name']}' at revision " \
                      f"{template['revision']} in catalog '{catalog_name}'"
                msg_update_callback.general(msg)
                logger.SERVER_LOGGER.info(msg)

            if not found_default_template:
                msg = f"Default template {default_template_name} with " \
                      f"revision {default_template_revision} not found." \
                      " Unable to start CSE server."
                msg_update_callback.error(msg)
                logger.SERVER_LOGGER.error(msg)
                sys.exit(1)

            self.config['broker']['templates'] = k8_templates
        finally:
            if client:
                client.logout()
Exemple #21
0
    vcd_client.ApiVersion.VERSION_34.value: {
        GroupKey.CLUSTER: {
            CommandNameKey.CREATE: ['sizing_class'],
            CommandNameKey.DELETE: ['k8_runtime', 'cluster_id'],
            CommandNameKey.INFO: ['k8_runtime', 'cluster_id'],
            CommandNameKey.UPGRADE: ['k8_runtime'],
            CommandNameKey.UPGRADE_PLAN: ['k8_runtime'],
            CommandNameKey.CONFIG: ['k8_runtime', 'cluster_id']
        }
    },
    vcd_client.ApiVersion.VERSION_35.value: {
        GroupKey.CLUSTER: {
            CommandNameKey.CREATE: ['cpu', 'memory']
        },
        GroupKey.OVDC: {
            CommandNameKey.ENABLE: [] if str_to_bool(
                os.getenv(cli_constants.ENV_CSE_TKG_PLUS_ENABLED)) else
            ['enable_tkg_plus'],  # noqa: E501
            CommandNameKey.DISABLE: [] if str_to_bool(
                os.getenv(cli_constants.ENV_CSE_TKG_PLUS_ENABLED)) else
            ['disable_tkg_plus']  # noqa: E501
        }
    }
}

UNSUPPORTED_COMMANDS_WITH_SERVER_NOT_RUNNING_BY_VERSION = {
    vcd_client.ApiVersion.VERSION_35.value: [
        GroupKey.VERSION,
        GroupKey.OVDC,
        GroupKey.SYSTEM,
        GroupKey.TEMPLATE,  # noqa: E501
        GroupKey.PKS
Exemple #22
0
import requests

import container_service_extension.client.constants as cli_constants
from container_service_extension.exceptions import CseResponseError
from container_service_extension.logger import CLIENT_WIRE_LOGGER
from container_service_extension.logger import NULL_LOGGER
from container_service_extension.minor_error_codes import MinorErrorCode
from container_service_extension.shared_constants import ERROR_DESCRIPTION_KEY
from container_service_extension.shared_constants import ERROR_MINOR_CODE_KEY
from container_service_extension.shared_constants import RESPONSE_MESSAGE_KEY
from container_service_extension.shared_constants import UNKNOWN_ERROR_MESSAGE
from container_service_extension.utils import str_to_bool

wire_logger = NULL_LOGGER
if str_to_bool(os.getenv(cli_constants.ENV_CSE_CLIENT_WIRE_LOGGING)):
    wire_logger = CLIENT_WIRE_LOGGER


def process_response(response):
    """Process the given response dictionary with following keys.

    Log the response if wire logging is enabled.

    If the value of status code is 2xx, return the response content, else
    raise exception with proper error message

    :param requests.models.Response response: object with attributes viz.
        status code and content
        status_code: http status code
        content: response result as string
def convert_cluster(ctx, config_file_name, skip_config_decryption,
                    cluster_name, admin_password, org_name, vdc_name,
                    skip_wait_for_gc):
    if skip_config_decryption:
        decryption_password = None
    else:
        decryption_password = os.getenv('CSE_CONFIG_PASSWORD') or prompt_text(
            PASSWORD_FOR_CONFIG_DECRYPTION_MSG, color='green', hide_input=True)

    try:
        check_python_version()
    except Exception as err:
        click.secho(str(err), fg='red')
        sys.exit(1)

    client = None
    try:
        console_message_printer = ConsoleMessagePrinter()
        config = get_validated_config(
            config_file_name,
            skip_config_decryption=skip_config_decryption,
            decryption_password=decryption_password,
            msg_update_callback=console_message_printer)

        log_filename = None
        log_wire = str_to_bool(config['service'].get('log_wire'))
        if log_wire:
            log_filename = 'cluster_convert_wire.log'

        client = Client(config['vcd']['host'],
                        api_version=config['vcd']['api_version'],
                        verify_ssl_certs=config['vcd']['verify'],
                        log_file=log_filename,
                        log_requests=log_wire,
                        log_headers=log_wire,
                        log_bodies=log_wire)
        credentials = BasicLoginCredentials(config['vcd']['username'],
                                            SYSTEM_ORG_NAME,
                                            config['vcd']['password'])
        client.set_credentials(credentials)
        msg = f"Connected to vCD as system administrator: " \
              f"{config['vcd']['host']}:{config['vcd']['port']}"
        console_message_printer.general(msg)

        cluster_records = get_all_clusters(client=client,
                                           cluster_name=cluster_name,
                                           org_name=org_name,
                                           ovdc_name=vdc_name)

        if len(cluster_records) == 0:
            console_message_printer.info(f"No clusters were found.")
            return

        vms = []
        for cluster in cluster_records:
            console_message_printer.info(
                f"Processing cluster '{cluster['name']}'.")
            vapp_href = cluster['vapp_href']
            vapp = VApp(client, href=vapp_href)

            # this step removes the old 'cse.template' metadata and adds
            # cse.template.name and cse.template.revision metadata
            # using hard-coded values taken from github history
            console_message_printer.info("Processing metadata of cluster.")
            metadata_dict = metadata_to_dict(vapp.get_metadata())
            old_template_name = metadata_dict.get(
                ClusterMetadataKey.BACKWARD_COMPATIBILE_TEMPLATE_NAME
            )  # noqa: E501
            new_template_name = None
            cse_version = metadata_dict.get(ClusterMetadataKey.CSE_VERSION)
            if old_template_name:
                console_message_printer.info(
                    "Determining k8s version on cluster.")
                if 'photon' in old_template_name:
                    new_template_name = 'photon-v2'
                    if cse_version in ('1.0.0'):
                        new_template_name += '_k8s-1.8_weave-2.0.5'
                    elif cse_version in ('1.1.0', '1.2.0', '1.2.1', '1.2.2',
                                         '1.2.3', '1.2.4'):  # noqa: E501
                        new_template_name += '_k8s-1.9_weave-2.3.0'
                    elif cse_version in (
                            '1.2.5',
                            '1.2.6',
                            '1.2.7',
                    ):  # noqa: E501
                        new_template_name += '_k8s-1.10_weave-2.3.0'
                    elif cse_version in ('2.0.0'):
                        new_template_name += '_k8s-1.12_weave-2.3.0'
                elif 'ubuntu' in old_template_name:
                    new_template_name = 'ubuntu-16.04'
                    if cse_version in ('1.0.0'):
                        new_template_name += '_k8s-1.9_weave-2.1.3'
                    elif cse_version in ('1.1.0', '1.2.0', '1.2.1', '1.2.2',
                                         '1.2.3', '1.2.4', '1.2.5', '1.2.6',
                                         '1.2.7'):  # noqa: E501
                        new_template_name += '_k8s-1.10_weave-2.3.0'
                    elif cse_version in ('2.0.0'):
                        new_template_name += '_k8s-1.13_weave-2.3.0'

            if new_template_name:
                console_message_printer.info("Updating metadata of cluster.")
                task = vapp.remove_metadata(
                    ClusterMetadataKey.BACKWARD_COMPATIBILE_TEMPLATE_NAME
                )  # noqa: E501
                client.get_task_monitor().wait_for_success(task)
                new_metadata_to_add = {
                    ClusterMetadataKey.TEMPLATE_NAME: new_template_name,
                    ClusterMetadataKey.TEMPLATE_REVISION: 0
                }
                task = vapp.set_multiple_metadata(new_metadata_to_add)
                client.get_task_monitor().wait_for_success(task)

            # this step uses hard-coded data from the newly updated
            # cse.template.name and cse.template.revision metadata fields as
            # well as github history to add [cse.os, cse.docker.version,
            # cse.kubernetes, cse.kubernetes.version, cse.cni, cse.cni.version]
            # to the clusters
            vapp.reload()
            metadata_dict = metadata_to_dict(vapp.get_metadata())
            template_name = metadata_dict.get(ClusterMetadataKey.TEMPLATE_NAME)
            template_revision = str(
                metadata_dict.get(ClusterMetadataKey.TEMPLATE_REVISION,
                                  '0'))  # noqa: E501

            if template_name:
                k8s_version, docker_version = get_k8s_and_docker_versions(
                    template_name,
                    template_revision=template_revision,
                    cse_version=cse_version)  # noqa: E501
                tokens = template_name.split('_')
                new_metadata = {
                    ClusterMetadataKey.OS: tokens[0],
                    ClusterMetadataKey.DOCKER_VERSION: docker_version,
                    ClusterMetadataKey.KUBERNETES: 'upstream',
                    ClusterMetadataKey.KUBERNETES_VERSION: k8s_version,
                    ClusterMetadataKey.CNI: tokens[2].split('-')[0],
                    ClusterMetadataKey.CNI_VERSION: tokens[2].split('-')[1],
                }
                task = vapp.set_multiple_metadata(new_metadata)
                client.get_task_monitor().wait_for_success(task)

            console_message_printer.general(
                "Finished processing metadata of cluster.")

            reset_admin_pw = False
            vm_resources = vapp.get_all_vms()
            for vm_resource in vm_resources:
                try:
                    vapp.get_admin_password(vm_resource.get('name'))
                except EntityNotFoundException:
                    reset_admin_pw = True
                    break

            if reset_admin_pw:
                try:
                    console_message_printer.info(
                        f"Undeploying the vApp '{cluster['name']}'")
                    task = vapp.undeploy()
                    client.get_task_monitor().wait_for_success(task)
                    console_message_printer.general(
                        "Successfully undeployed the vApp.")
                except Exception as err:
                    console_message_printer.error(str(err))

                for vm_resource in vm_resources:
                    console_message_printer.info(
                        f"Processing vm '{vm_resource.get('name')}'.")
                    vm = VM(client, href=vm_resource.get('href'))
                    vms.append(vm)

                    console_message_printer.info("Updating vm admin password")
                    task = vm.update_guest_customization_section(
                        enabled=True,
                        admin_password_enabled=True,
                        admin_password_auto=not admin_password,
                        admin_password=admin_password,
                    )
                    client.get_task_monitor().wait_for_success(task)
                    console_message_printer.general("Successfully updated vm")

                    console_message_printer.info("Deploying vm.")
                    task = vm.power_on_and_force_recustomization()
                    client.get_task_monitor().wait_for_success(task)
                    console_message_printer.general("Successfully deployed vm")

                console_message_printer.info("Deploying cluster")
                task = vapp.deploy(power_on=True)
                client.get_task_monitor().wait_for_success(task)
                console_message_printer.general(
                    "Successfully deployed cluster")  # noqa: E501

            console_message_printer.general(
                f"Successfully processed cluster '{cluster['name']}'")

        if skip_wait_for_gc:
            return

        while True:
            to_remove = []
            for vm in vms:
                status = vm.get_guest_customization_status()
                if status != 'GC_PENDING':
                    to_remove.append(vm)
            for vm in to_remove:
                vms.remove(vm)
            console_message_printer.info(
                f"Waiting on guest customization to finish on {len(vms)} vms.")
            if not len(vms) == 0:
                time.sleep(5)
            else:
                break
    except cryptography.fernet.InvalidToken:
        click.secho(CONFIG_DECRYPTION_ERROR_MSG, fg='red')
    except Exception as err:
        click.secho(str(err), fg='red')
    finally:
        if client:
            client.logout()
def install_cse(config_file_name,
                skip_template_creation,
                force_update,
                ssh_key,
                retain_temp_vapp,
                pks_config_file_name=None,
                skip_config_decryption=False,
                decryption_password=None,
                msg_update_callback=None):
    """Handle logistics for CSE installation.

    Handles decision making for configuring AMQP exchange/settings,
    extension registration, catalog setup, and template creation.

    :param str config_file_name: config file name.
    :param bool skip_template_creation: If True, skip creating the templates.
    :param bool force_update: if True and templates already exist in vCD,
        overwrites existing templates.
    :param str ssh_key: public ssh key to place into template vApp(s).
    :param bool retain_temp_vapp: if True, temporary vApp will not destroyed,
        so the user can ssh into and debug the vm.
    :param str pks_config_file_name: pks config file name.
    :param bool skip_config_decryption: do not decrypt the config file.
    :param str decryption_password: password to decrypt the config file.
    :param utils.ConsoleMessagePrinter msg_update_callback: Callback object
        that writes messages onto console.

    :raises AmqpError: if AMQP exchange could not be created.
    """
    configure_install_logger()

    config = get_validated_config(
        config_file_name,
        pks_config_file_name=pks_config_file_name,
        skip_config_decryption=skip_config_decryption,
        decryption_password=decryption_password,
        msg_update_callback=msg_update_callback)

    populate_vsphere_list(config['vcs'])

    msg = f"Installing CSE on vCloud Director using config file " \
          f"'{config_file_name}'"
    if msg_update_callback:
        msg_update_callback.info(msg)
    LOGGER.info(msg)

    client = None
    try:
        log_filename = None
        log_wire = str_to_bool(config['service'].get('log_wire'))
        if log_wire:
            log_filename = INSTALL_WIRELOG_FILEPATH

        client = Client(config['vcd']['host'],
                        api_version=config['vcd']['api_version'],
                        verify_ssl_certs=config['vcd']['verify'],
                        log_file=log_filename,
                        log_requests=log_wire,
                        log_headers=log_wire,
                        log_bodies=log_wire)
        credentials = BasicLoginCredentials(config['vcd']['username'],
                                            SYSTEM_ORG_NAME,
                                            config['vcd']['password'])
        client.set_credentials(credentials)
        msg = f"Connected to vCD as system administrator: " \
              f"{config['vcd']['host']}:{config['vcd']['port']}"
        if msg_update_callback:
            msg_update_callback.general(msg)
        LOGGER.info(msg)

        # create amqp exchange if it doesn't exist
        amqp = config['amqp']
        _create_amqp_exchange(amqp['exchange'],
                              amqp['host'],
                              amqp['port'],
                              amqp['vhost'],
                              amqp['ssl'],
                              amqp['username'],
                              amqp['password'],
                              msg_update_callback=msg_update_callback)

        # register or update cse on vCD
        _register_cse(client,
                      amqp['routing_key'],
                      amqp['exchange'],
                      msg_update_callback=msg_update_callback)

        # register rights to vCD
        # TODO() should also remove rights when unregistering CSE
        _register_right(client,
                        right_name=CSE_NATIVE_DEPLOY_RIGHT_NAME,
                        description=CSE_NATIVE_DEPLOY_RIGHT_DESCRIPTION,
                        category=CSE_NATIVE_DEPLOY_RIGHT_CATEGORY,
                        bundle_key=CSE_NATIVE_DEPLOY_RIGHT_BUNDLE_KEY,
                        msg_update_callback=msg_update_callback)
        _register_right(client,
                        right_name=CSE_PKS_DEPLOY_RIGHT_NAME,
                        description=CSE_PKS_DEPLOY_RIGHT_DESCRIPTION,
                        category=CSE_PKS_DEPLOY_RIGHT_CATEGORY,
                        bundle_key=CSE_PKS_DEPLOY_RIGHT_BUNDLE_KEY,
                        msg_update_callback=msg_update_callback)

        # set up cse catalog
        org = get_org(client, org_name=config['broker']['org'])
        create_and_share_catalog(org,
                                 config['broker']['catalog'],
                                 catalog_desc='CSE templates',
                                 msg_update_callback=msg_update_callback)

        if skip_template_creation:
            msg = "Skipping creation of templates."
            if msg_update_callback:
                msg_update_callback.info(msg)
            LOGGER.warning(msg)
        else:
            # read remote template cookbook, download all scripts
            rtm = RemoteTemplateManager(
                remote_template_cookbook_url=config['broker']
                ['remote_template_cookbook_url'],  # noqa: E501
                logger=LOGGER,
                msg_update_callback=msg_update_callback)
            remote_template_cookbook = rtm.get_remote_template_cookbook()

            # create all templates defined in cookbook
            for template in remote_template_cookbook['templates']:
                _install_template(
                    client=client,
                    remote_template_manager=rtm,
                    template=template,
                    org_name=config['broker']['org'],
                    vdc_name=config['broker']['vdc'],
                    catalog_name=config['broker']['catalog'],
                    network_name=config['broker']['network'],
                    ip_allocation_mode=config['broker']['ip_allocation_mode'],
                    storage_profile=config['broker']['storage_profile'],
                    force_update=force_update,
                    retain_temp_vapp=retain_temp_vapp,
                    ssh_key=ssh_key,
                    msg_update_callback=msg_update_callback)

        # if it's a PKS setup, setup NSX-T constructs
        if config.get('pks_config'):
            nsxt_servers = config.get('pks_config')['nsxt_servers']
            for nsxt_server in nsxt_servers:
                msg = f"Configuring NSX-T server ({nsxt_server.get('name')})" \
                      " for CSE. Please check install logs for details."
                if msg_update_callback:
                    msg_update_callback.general(msg)
                LOGGER.info(msg)
                nsxt_client = NSXTClient(host=nsxt_server.get('host'),
                                         username=nsxt_server.get('username'),
                                         password=nsxt_server.get('password'),
                                         http_proxy=nsxt_server.get('proxy'),
                                         https_proxy=nsxt_server.get('proxy'),
                                         verify_ssl=nsxt_server.get('verify'),
                                         logger_instance=LOGGER,
                                         log_requests=True,
                                         log_headers=True,
                                         log_body=True)
                setup_nsxt_constructs(
                    nsxt_client=nsxt_client,
                    nodes_ip_block_id=nsxt_server.get('nodes_ip_block_ids'),
                    pods_ip_block_id=nsxt_server.get('pods_ip_block_ids'),
                    ncp_boundary_firewall_section_anchor_id=nsxt_server.get(
                        'distributed_firewall_section_anchor_id')
                )  # noqa: E501

    except Exception:
        if msg_update_callback:
            msg_update_callback.error(
                "CSE Installation Error. Check CSE install logs")
        LOGGER.error("CSE Installation Error", exc_info=True)
        raise  # TODO() need installation relevant exceptions for rollback
    finally:
        if client is not None:
            client.logout()
def ovdc_list(request_data, op_ctx: ctx.OperationContext):
    """Request handler for ovdc list operation.

    :return: List of dictionaries with org VDC k8s provider metadata.
    """
    defaults = {RequestKey.LIST_PKS_PLANS: False}
    validated_data = {**defaults, **request_data}

    list_pks_plans = utils.str_to_bool(
        validated_data[RequestKey.LIST_PKS_PLANS])  # noqa: E501

    # Record telemetry data
    cse_params = copy.deepcopy(validated_data)
    cse_params[RequestKey.LIST_PKS_PLANS] = list_pks_plans
    record_user_action_details(cse_operation=CseOperation.OVDC_LIST,
                               cse_params=cse_params)

    if list_pks_plans and not op_ctx.client.is_sysadmin():
        raise e.UnauthorizedRequestError(
            'Operation denied. Enterprise PKS plans visible only '
            'to System Administrators.')

    # Ideally this should be extracted out to ovdc_utils, but the mandatory
    # usage of sysadmin client along with a potentially non-sysadmin client
    # means that the function signature require both tenant client and
    # sysadmin client, which is very awkward
    if op_ctx.client.is_sysadmin():
        org_resource_list = op_ctx.client.get_org_list()
    else:
        org_resource_list = list(op_ctx.client.get_org())
    ovdcs = []
    for org_resource in org_resource_list:
        org = vcd_org.Org(op_ctx.client, resource=org_resource)
        for vdc_sparse in org.list_vdcs():
            ovdc_name = vdc_sparse['name']
            org_name = org.get_name()

            k8s_metadata = ovdc_utils.get_ovdc_k8s_provider_metadata(
                op_ctx.sysadmin_client, ovdc_name=ovdc_name, org_name=org_name)
            k8s_provider = k8s_metadata[K8S_PROVIDER_KEY]
            ovdc_dict = {
                'name': ovdc_name,
                'org': org_name,
                'k8s provider': k8s_provider
            }

            if list_pks_plans:
                pks_plans = ''
                pks_server = ''
                if k8s_provider == K8sProvider.PKS:
                    # vc name for vdc can only be found using typed query
                    q = op_ctx.client.get_typed_query(
                        vcd_client.ResourceType.ADMIN_ORG_VDC.value,
                        query_result_format=vcd_client.QueryResultFormat.
                        RECORDS,  # noqa: E501
                        qfilter=f"name=={ovdc_name};orgName=={org_name}")
                    # should only ever be one element in the generator
                    ovdc_records = list(q.execute())
                    if len(ovdc_records) == 0:
                        raise vcd_e.EntityNotFoundException(
                            f"Org VDC {ovdc_name} not found in org {org_name}")
                    ovdc_record = None
                    for record in ovdc_records:
                        ovdc_record = pyvcd_utils.to_dict(
                            record,
                            resource_type=vcd_client.ResourceType.
                            ADMIN_ORG_VDC.value)  # noqa: E501
                        break

                    vc_to_pks_plans_map = {}
                    pks_contexts = pksbroker_manager.create_pks_context_for_all_accounts_in_org(
                        op_ctx)  # noqa: E501

                    for pks_context in pks_contexts:
                        if pks_context['vc'] in vc_to_pks_plans_map:
                            continue
                        pks_broker = pksbroker.PksBroker(pks_context, op_ctx)
                        plans = pks_broker.list_plans()
                        plan_names = [plan.get('name') for plan in plans]
                        vc_to_pks_plans_map[pks_context['vc']] = \
                            [plan_names, pks_context['host']]

                    pks_plan_and_server_info = vc_to_pks_plans_map.get(
                        ovdc_record['vcName'], [])
                    if len(pks_plan_and_server_info) > 0:
                        pks_plans = pks_plan_and_server_info[0]
                        pks_server = pks_plan_and_server_info[1]

                ovdc_dict['pks api server'] = pks_server
                ovdc_dict['available pks plans'] = pks_plans

            ovdcs.append(ovdc_dict)

    return ovdcs
def list_template(ctx, config_file_name, skip_config_decryption,
                  display_option):
    """List CSE k8s templates."""
    if skip_config_decryption:
        password = None
    else:
        password = os.getenv('CSE_CONFIG_PASSWORD') or prompt_text(
            PASSWORD_FOR_CONFIG_DECRYPTION_MSG, color='green', hide_input=True)
    try:
        console_message_printer = ConsoleMessagePrinter()
        try:
            check_python_version()
        except Exception as err:
            click.secho(str(err), fg='red')
            sys.exit(1)

        # We don't want to validate config file, because server startup or
        # installation is not being perfomred. If values in config file are
        # missing or bad, appropriate exception will be raised while accessing
        # or using them.
        if skip_config_decryption:
            with open(config_file_name) as config_file:
                config_dict = yaml.safe_load(config_file) or {}
        else:
            console_message_printer.info(f"Decrypting '{config_file_name}'")
            config_dict = yaml.safe_load(
                get_decrypted_file_contents(config_file_name, password)) or {}

        local_templates = []
        if display_option in (DISPLAY_ALL, DISPLAY_DIFF, DISPLAY_LOCAL):
            client = None
            try:
                # To suppress the warning message that pyvcloud prints if
                # ssl_cert verification is skipped.
                if not config_dict['vcd']['verify']:
                    requests.packages.urllib3.disable_warnings()

                client = Client(config_dict['vcd']['host'],
                                api_version=config_dict['vcd']['api_version'],
                                verify_ssl_certs=config_dict['vcd']['verify'])
                credentials = BasicLoginCredentials(
                    config_dict['vcd']['username'], SYSTEM_ORG_NAME,
                    config_dict['vcd']['password'])
                client.set_credentials(credentials)

                org_name = config_dict['broker']['org']
                catalog_name = config_dict['broker']['catalog']
                local_template_definitions = \
                    get_all_k8s_local_template_definition(
                        client=client,
                        catalog_name=catalog_name,
                        org_name=org_name)

                default_template_name = \
                    config_dict['broker']['default_template_name']
                default_template_revision = \
                    str(config_dict['broker']['default_template_revision'])
                for definition in local_template_definitions:
                    template = {}
                    template['name'] = definition[LocalTemplateKey.NAME]
                    template['revision'] = \
                        definition[LocalTemplateKey.REVISION]
                    template['compute_policy'] = \
                        definition[LocalTemplateKey.COMPUTE_POLICY]
                    template['local'] = True
                    template['remote'] = False
                    if str(definition[LocalTemplateKey.REVISION]
                           ) == default_template_revision and definition[
                               LocalTemplateKey.
                               NAME] == default_template_name:  # noqa: E501
                        template['default'] = True
                    else:
                        template['default'] = False
                    template['deprecated'] = \
                        str_to_bool(definition[LocalTemplateKey.DEPRECATED])
                    template['cpu'] = definition[LocalTemplateKey.CPU]
                    template['memory'] = definition[LocalTemplateKey.MEMORY]
                    template['description'] = \
                        definition[LocalTemplateKey.DESCRIPTION]
                    local_templates.append(template)
            finally:
                if client:
                    client.logout()

        remote_templates = []
        if display_option in (DISPLAY_ALL, DISPLAY_DIFF, DISPLAY_REMOTE):
            rtm = RemoteTemplateManager(
                remote_template_cookbook_url=config_dict['broker']
                ['remote_template_cookbook_url'],  # noqa: E501
                msg_update_callback=ConsoleMessagePrinter())
            remote_template_cookbook = rtm.get_remote_template_cookbook()
            remote_template_definitions = remote_template_cookbook['templates']
            for definition in remote_template_definitions:
                template = {}
                template['name'] = definition[RemoteTemplateKey.NAME]
                template['revision'] = definition[RemoteTemplateKey.REVISION]
                template['compute_policy'] = \
                    definition[RemoteTemplateKey.COMPUTE_POLICY]
                template['local'] = False
                template['remote'] = True
                template['default'] = False
                template['deprecated'] = \
                    str_to_bool(definition[RemoteTemplateKey.DEPRECATED])
                template['cpu'] = definition[RemoteTemplateKey.CPU]
                template['memory'] = definition[RemoteTemplateKey.MEMORY]
                template['description'] = \
                    definition[RemoteTemplateKey.DESCRIPTION]
                remote_templates.append(template)

        result = []
        if display_option is DISPLAY_ALL:
            result = remote_templates
            # If local copy of template exists, update the remote definition
            # with relevant values, else add the local definition to the result
            # list.
            for local_template in local_templates:
                found = False
                for remote_template in remote_templates:
                    if str(local_template[LocalTemplateKey.REVISION]) == str(
                            remote_template[RemoteTemplateKey.REVISION]
                    ) and local_template[
                            LocalTemplateKey.NAME] == remote_template[
                                RemoteTemplateKey.NAME]:  # noqa: E501
                        remote_template['compute_policy'] = \
                            local_template['compute_policy']
                        remote_template['local'] = local_template['local']
                        remote_template['default'] = local_template['default']
                        found = True
                        break
                if not found:
                    result.append(local_template)
        elif display_option in DISPLAY_DIFF:
            for remote_template in remote_templates:
                found = False
                for local_template in local_templates:
                    if str(local_template[LocalTemplateKey.REVISION]) == str(
                            remote_template[RemoteTemplateKey.REVISION]
                    ) and local_template[
                            LocalTemplateKey.NAME] == remote_template[
                                RemoteTemplateKey.NAME]:  # noqa: E501
                        found = True
                        break
                if not found:
                    result.append(remote_template)
        elif display_option in DISPLAY_LOCAL:
            result = local_templates
        elif display_option in DISPLAY_REMOTE:
            result = remote_templates

        stdout(result, ctx, sort_headers=False)
    except cryptography.fernet.InvalidToken:
        click.secho(CONFIG_DECRYPTION_ERROR_MSG, fg='red')
    except Exception as err:
        click.secho(str(err), fg='red')
def _update_ovdc_using_placement_policy_async(operation_context: ctx.OperationContext,  # noqa: E501
                                              task: vcd_task.Task,
                                              task_href,
                                              user_href,
                                              policy_list,
                                              ovdc_id,
                                              vdc,
                                              remove_cp_from_vms_on_disable=False):  # noqa: E501
    """Enable ovdc using placement policies.

    :param ctx.OperationContext operation_context: operation context object
    :param vcd_task.Task task: Task resource to track progress
    :param str task_href: href of the task
    :param str user_href:
    :param List[str] policy_list: The new list of policies associated with
        the ovdc
    :param str ovdc_id:
    :param pyvcloud.vcd.vdc.VDC vdc: VDC object
    :param bool remove_cp_from_vms_on_disable: Set to true if placement
        policies need to be removed from the vms before removing from the VDC.
    """
    operation_name = "Update OVDC with placement policies"
    k8s_runtimes_added = ''
    k8s_runtimes_deleted = ''
    try:
        config = utils.get_server_runtime_config()
        log_wire = utils.str_to_bool(config.get('service', {}).get('log_wire'))
        cpm = compute_policy_manager.ComputePolicyManager(
            operation_context.sysadmin_client, log_wire=log_wire)
        existing_policies = []
        for cse_policy in \
                compute_policy_manager.list_cse_placement_policies_on_vdc(cpm, ovdc_id):  # noqa: E501
            existing_policies.append(cse_policy['display_name'])

        logger.SERVER_LOGGER.debug(policy_list)
        logger.SERVER_LOGGER.debug(existing_policies)
        policies_to_add = set(policy_list) - set(existing_policies)
        policies_to_delete = set(existing_policies) - set(policy_list)

        # Telemetry for 'vcd cse ovdc enable' command
        # TODO: Update telemetry request to handle 'k8s_runtime' array
        k8s_runtimes_added = ','.join(policies_to_add)
        if k8s_runtimes_added:
            cse_params = {
                RequestKey.K8S_PROVIDER: k8s_runtimes_added,
                RequestKey.OVDC_ID: ovdc_id,
            }
            telemetry_handler.record_user_action_details(cse_operation=CseOperation.OVDC_ENABLE, # noqa: E501
                                                         cse_params=cse_params)

        # Telemetry for 'vcd cse ovdc enable' command
        # TODO: Update telemetry request to handle 'k8s_runtime' array
        k8s_runtimes_deleted = '.'.join(policies_to_delete)
        if k8s_runtimes_deleted:
            cse_params = {
                RequestKey.K8S_PROVIDER: k8s_runtimes_deleted,
                RequestKey.OVDC_ID: ovdc_id,
                RequestKey.REMOVE_COMPUTE_POLICY_FROM_VMS: remove_cp_from_vms_on_disable # noqa: E501
            }
            telemetry_handler.record_user_action_details(cse_operation=CseOperation.OVDC_DISABLE, # noqa: E501
                                                         cse_params=cse_params)

        for cp_name in policies_to_add:
            msg = f"Adding k8s provider {cp_name} to OVDC {vdc.name}"
            logger.SERVER_LOGGER.debug(msg)
            task.update(status=vcd_client.TaskStatus.RUNNING.value,
                        namespace='vcloud.cse',
                        operation=msg,
                        operation_name=operation_name,
                        details='',
                        progress=None,
                        owner_href=vdc.href,
                        owner_name=vdc.name,
                        owner_type=vcd_client.EntityType.VDC.value,
                        user_href=user_href,
                        user_name=operation_context.user.name,
                        task_href=task_href,
                        org_href=operation_context.user.org_href)
            policy = compute_policy_manager.get_cse_vdc_compute_policy(
                cpm,
                cp_name,
                is_placement_policy=True)
            cpm.add_compute_policy_to_vdc(vdc_id=ovdc_id,
                                          compute_policy_href=policy['href'])

        for cp_name in policies_to_delete:
            msg = f"Removing k8s provider {RUNTIME_INTERNAL_NAME_TO_DISPLAY_NAME_MAP[cp_name]} from OVDC {ovdc_id}"  # noqa: E501
            logger.SERVER_LOGGER.debug(msg)
            task_resource = \
                task.update(status=vcd_client.TaskStatus.RUNNING.value,
                            namespace='vcloud.cse',
                            operation=msg,
                            operation_name=operation_name,
                            details='',
                            progress=None,
                            owner_href=vdc.href,
                            owner_name=vdc.name,
                            owner_type=vcd_client.EntityType.VDC.value,
                            user_href=user_href,
                            user_name=operation_context.user.name,
                            task_href=task_href,
                            org_href=operation_context.user.org_href)
            policy = compute_policy_manager.get_cse_vdc_compute_policy(cpm,
                                                                       cp_name,
                                                                       is_placement_policy=True)  # noqa: E501
            cpm.remove_compute_policy_from_vdc_sync(vdc=vdc,
                                                    compute_policy_href=policy['href'],  # noqa: E501
                                                    force=remove_cp_from_vms_on_disable, # noqa: E501
                                                    is_placement_policy=True,
                                                    task_resource=task_resource) # noqa: E501
        msg = f"Successfully updated OVDC: {vdc.name}"
        logger.SERVER_LOGGER.debug(msg)
        task.update(status=vcd_client.TaskStatus.SUCCESS.value,
                    namespace='vcloud.cse',
                    operation="Operation success",
                    operation_name=operation_name,
                    details=msg,
                    progress=None,
                    owner_href=vdc.href,
                    owner_name=vdc.name,
                    owner_type=vcd_client.EntityType.VDC.value,
                    user_href=user_href,
                    user_name=operation_context.user.name,
                    task_href=task_href,
                    org_href=operation_context.user.org_href)
        # Record telemetry
        if k8s_runtimes_added:
            telemetry_handler.record_user_action(CseOperation.OVDC_ENABLE,
                                                 status=OperationStatus.SUCCESS) # noqa: E501
        if k8s_runtimes_deleted:
            telemetry_handler.record_user_action(CseOperation.OVDC_DISABLE,
                                                 status=OperationStatus.SUCCESS) # noqa: E501
    except Exception as err:
        # Record telemetry
        if k8s_runtimes_added:
            telemetry_handler.record_user_action(CseOperation.OVDC_ENABLE,
                                                 status=OperationStatus.FAILED)
        if k8s_runtimes_deleted:
            telemetry_handler.record_user_action(CseOperation.OVDC_DISABLE,
                                                 status=OperationStatus.FAILED)
        logger.SERVER_LOGGER.error(err)
        task.update(status=vcd_client.TaskStatus.ERROR.value,
                    namespace='vcloud.cse',
                    operation='Failed to update OVDC',
                    operation_name=operation_name,
                    details=f'Failed with error: {err}',
                    progress=None,
                    owner_href=vdc.href,
                    owner_name=vdc.name,
                    owner_type=vcd_client.EntityType.VDC.value,
                    user_href=user_href,
                    user_name=operation_context.user.name,
                    task_href=task_href,
                    org_href=operation_context.user.org_href,
                    error_message=f"{err}")
    finally:
        if operation_context.sysadmin_client:
            operation_context.end()
def ovdc_compute_policy_update(request_data, op_ctx: ctx.OperationContext):
    """Request handler for ovdc compute-policy update operation.

    Required data: ovdc_id, compute_policy_action, compute_policy_names

    :return: Dictionary with task href.
    """
    required = [
        RequestKey.OVDC_ID, RequestKey.COMPUTE_POLICY_ACTION,
        RequestKey.COMPUTE_POLICY_NAME
    ]
    defaults = {
        RequestKey.REMOVE_COMPUTE_POLICY_FROM_VMS: False,
    }
    validated_data = {**defaults, **request_data}
    req_utils.validate_payload(validated_data, required)

    action = validated_data[RequestKey.COMPUTE_POLICY_ACTION]
    cp_name = validated_data[RequestKey.COMPUTE_POLICY_NAME]
    ovdc_id = validated_data[RequestKey.OVDC_ID]
    remove_compute_policy_from_vms = validated_data[
        RequestKey.REMOVE_COMPUTE_POLICY_FROM_VMS]  # noqa: E501
    try:
        config = utils.get_server_runtime_config()
        cpm = compute_policy_manager.ComputePolicyManager(
            op_ctx.sysadmin_client,
            log_wire=utils.str_to_bool(
                config['service'].get('log_wire')))  # noqa: E501
        cp_href = None
        cp_id = None
        if cp_name == SYSTEM_DEFAULT_COMPUTE_POLICY_NAME:
            for _cp in cpm.list_compute_policies_on_vdc(ovdc_id):
                if _cp['name'] == cp_name:
                    cp_href = _cp['href']
                    cp_id = _cp['id']
        else:
            try:
                _cp = cpm.get_vdc_compute_policy(cp_name)
                cp_href = _cp['href']
                cp_id = _cp['id']
            except vcd_e.EntityNotFoundException:
                pass

        if cp_href is None:
            raise e.BadRequestError(f"Compute policy '{cp_name}' not found.")

        if action == ComputePolicyAction.ADD:
            cpm.add_compute_policy_to_vdc(ovdc_id, cp_href)
            # Record telemetry data
            record_user_action(CseOperation.OVDC_COMPUTE_POLICY_ADD)
            return f"Added compute policy '{cp_name}' ({cp_id}) to ovdc " \
                   f"({ovdc_id})"

        if action == ComputePolicyAction.REMOVE:
            # TODO: fix remove_compute_policy by implementing a proper way
            # for calling async methods without having to pass op_ctx
            # outside handlers.
            task_href = cpm.remove_vdc_compute_policy_from_vdc(
                op_ctx,
                ovdc_id,
                cp_href,
                remove_compute_policy_from_vms=remove_compute_policy_from_vms)
            # Record telemetry data
            record_user_action(CseOperation.OVDC_COMPUTE_POLICY_REMOVE)
            return task_href

        raise e.BadRequestError("Unsupported compute policy action")

    except Exception as err:
        # Record telemetry data failure
        if action == ComputePolicyAction.ADD:
            record_user_action(CseOperation.OVDC_COMPUTE_POLICY_ADD,
                               status=OperationStatus.FAILED)
        elif action == ComputePolicyAction.REMOVE:
            record_user_action(CseOperation.OVDC_COMPUTE_POLICY_REMOVE,
                               status=OperationStatus.FAILED)
        raise err
import requests

from container_service_extension.exceptions import CseResponseError
from container_service_extension.logger import CLIENT_WIRE_LOGGER
from container_service_extension.logger import NULL_LOGGER
from container_service_extension.minor_error_codes import MinorErrorCode
from container_service_extension.shared_constants import ERROR_DESCRIPTION_KEY
from container_service_extension.shared_constants import ERROR_MINOR_CODE_KEY
from container_service_extension.shared_constants import RESPONSE_MESSAGE_KEY
from container_service_extension.shared_constants import UNKNOWN_ERROR_MESSAGE
from container_service_extension.utils import str_to_bool

ENV_CSE_CLIENT_WIRE_LOGGING = 'CSE_CLIENT_WIRE_LOGGING'

wire_logger = NULL_LOGGER
if str_to_bool(os.getenv(ENV_CSE_CLIENT_WIRE_LOGGING)):
    wire_logger = CLIENT_WIRE_LOGGER


def process_response(response):
    """Process the given response dictionary with following keys.

    Log the response if wire logging is enabled.

    If the value of status code is 2xx, return the response content, else
    raise exception with proper error message

    :param requests.models.Response response: object with attributes viz.
        status code and content
        status_code: http status code
        content: response result as string
Exemple #30
0
def install_cse(config_file_name,
                skip_template_creation,
                force_update,
                ssh_key,
                retain_temp_vapp,
                pks_config_file_name=None,
                skip_config_decryption=False,
                decryption_password=None,
                msg_update_callback=utils.NullPrinter()):
    """Handle logistics for CSE installation.

    Handles decision making for configuring AMQP exchange/settings,
    defined entity schema registration for vCD api version >= 35,
    extension registration, catalog setup and template creation.

    Also records telemetry data on installation details.

    :param str config_file_name: config file name.
    :param bool skip_template_creation: If True, skip creating the templates.
    :param bool force_update: if True and templates already exist in vCD,
        overwrites existing templates.
    :param str ssh_key: public ssh key to place into template vApp(s).
    :param bool retain_temp_vapp: if True, temporary vApp will not destroyed,
        so the user can ssh into and debug the vm.
    :param str pks_config_file_name: pks config file name.
    :param bool skip_config_decryption: do not decrypt the config file.
    :param str decryption_password: password to decrypt the config file.
    :param utils.ConsoleMessagePrinter msg_update_callback: Callback object.

    :raises cse_exception.AmqpError: if AMQP exchange could not be created.
    """
    config = get_validated_config(
        config_file_name,
        pks_config_file_name=pks_config_file_name,
        skip_config_decryption=skip_config_decryption,
        decryption_password=decryption_password,
        log_wire_file=INSTALL_WIRELOG_FILEPATH,
        logger_debug=INSTALL_LOGGER,
        msg_update_callback=msg_update_callback)

    populate_vsphere_list(config['vcs'])

    msg = f"Installing CSE on vCloud Director using config file " \
          f"'{config_file_name}'"
    msg_update_callback.info(msg)
    INSTALL_LOGGER.info(msg)

    client = None
    try:
        # Telemetry - Construct telemetry data
        telemetry_data = {
            PayloadKey.WAS_DECRYPTION_SKIPPED:
            bool(skip_config_decryption),  # noqa: E501
            PayloadKey.WAS_PKS_CONFIG_FILE_PROVIDED:
            bool(pks_config_file_name),  # noqa: E501
            PayloadKey.WERE_TEMPLATES_SKIPPED:
            bool(skip_template_creation),  # noqa: E501
            PayloadKey.WERE_TEMPLATES_FORCE_UPDATED:
            bool(force_update),  # noqa: E501
            PayloadKey.WAS_TEMP_VAPP_RETAINED:
            bool(retain_temp_vapp),  # noqa: E501
            PayloadKey.WAS_SSH_KEY_SPECIFIED: bool(ssh_key)  # noqa: E501
        }

        # Telemetry - Record detailed telemetry data on install
        record_user_action_details(
            CseOperation.SERVICE_INSTALL,
            telemetry_data,
            telemetry_settings=config['service']['telemetry'])  # noqa: E501

        log_filename = None
        log_wire = utils.str_to_bool(config['service'].get('log_wire'))
        if log_wire:
            log_filename = INSTALL_WIRELOG_FILEPATH

        client = Client(config['vcd']['host'],
                        api_version=config['vcd']['api_version'],
                        verify_ssl_certs=config['vcd']['verify'],
                        log_file=log_filename,
                        log_requests=log_wire,
                        log_headers=log_wire,
                        log_bodies=log_wire)
        credentials = BasicLoginCredentials(config['vcd']['username'],
                                            server_constants.SYSTEM_ORG_NAME,
                                            config['vcd']['password'])
        client.set_credentials(credentials)
        msg = f"Connected to vCD as system administrator: " \
              f"{config['vcd']['host']}:{config['vcd']['port']}"
        msg_update_callback.general(msg)
        INSTALL_LOGGER.info(msg)

        # create amqp exchange if it doesn't exist
        amqp = config['amqp']
        _create_amqp_exchange(amqp['exchange'],
                              amqp['host'],
                              amqp['port'],
                              amqp['vhost'],
                              amqp['ssl'],
                              amqp['username'],
                              amqp['password'],
                              msg_update_callback=msg_update_callback)

        # register or update cse on vCD
        _register_cse(client,
                      amqp['routing_key'],
                      amqp['exchange'],
                      msg_update_callback=msg_update_callback)

        # register cse def schema on VCD
        # schema should be located at
        # ~/.cse-schema/api-v<API VERSION>/schema.json
        _register_def_schema(client,
                             msg_update_callback=msg_update_callback,
                             log_wire=log_wire)

        # Since we use CSE extension id as our telemetry instance_id, the
        # validated config won't have the instance_id yet. Now that CSE has
        # been registered as an extension, we should update the telemetry
        # config with the correct instance_id
        if config['service']['telemetry']['enable']:
            store_telemetry_settings(config)

        # register rights to vCD
        # TODO() should also remove rights when unregistering CSE
        _register_right(
            client,
            right_name=server_constants.
            CSE_NATIVE_DEPLOY_RIGHT_NAME,  # noqa: E501
            description=server_constants.
            CSE_NATIVE_DEPLOY_RIGHT_DESCRIPTION,  # noqa: E501
            category=server_constants.
            CSE_NATIVE_DEPLOY_RIGHT_CATEGORY,  # noqa: E501
            bundle_key=server_constants.
            CSE_NATIVE_DEPLOY_RIGHT_BUNDLE_KEY,  # noqa: E501
            msg_update_callback=msg_update_callback)
        _register_right(
            client,
            right_name=server_constants.
            CSE_PKS_DEPLOY_RIGHT_NAME,  # noqa: E501
            description=server_constants.
            CSE_PKS_DEPLOY_RIGHT_DESCRIPTION,  # noqa: E501
            category=server_constants.
            CSE_PKS_DEPLOY_RIGHT_CATEGORY,  # noqa: E501
            bundle_key=server_constants.
            CSE_PKS_DEPLOY_RIGHT_BUNDLE_KEY,  # noqa: E501
            msg_update_callback=msg_update_callback)

        # set up placement policies for all types of clusters
        _setup_placement_policies(
            client,
            policy_list=server_constants.
            CLUSTER_PLACEMENT_POLICIES,  # noqa: E501
            msg_update_callback=msg_update_callback,
            log_wire=log_wire)

        # set up cse catalog
        org = vcd_utils.get_org(client, org_name=config['broker']['org'])
        vcd_utils.create_and_share_catalog(
            org,
            config['broker']['catalog'],
            catalog_desc='CSE templates',
            logger=INSTALL_LOGGER,
            msg_update_callback=msg_update_callback)

        if skip_template_creation:
            msg = "Skipping creation of templates."
            msg_update_callback.info(msg)
            INSTALL_LOGGER.warning(msg)
        else:
            # read remote template cookbook, download all scripts
            rtm = RemoteTemplateManager(
                remote_template_cookbook_url=config['broker']
                ['remote_template_cookbook_url'],  # noqa: E501
                logger=INSTALL_LOGGER,
                msg_update_callback=msg_update_callback)
            remote_template_cookbook = rtm.get_remote_template_cookbook()

            # create all templates defined in cookbook
            for template in remote_template_cookbook['templates']:
                # TODO tag created templates with placement policies
                _install_template(
                    client=client,
                    remote_template_manager=rtm,
                    template=template,
                    org_name=config['broker']['org'],
                    vdc_name=config['broker']['vdc'],
                    catalog_name=config['broker']['catalog'],
                    network_name=config['broker']['network'],
                    ip_allocation_mode=config['broker']['ip_allocation_mode'],
                    storage_profile=config['broker']['storage_profile'],
                    force_update=force_update,
                    retain_temp_vapp=retain_temp_vapp,
                    ssh_key=ssh_key,
                    msg_update_callback=msg_update_callback)

        # if it's a PKS setup, setup NSX-T constructs
        if config.get('pks_config'):
            nsxt_servers = config['pks_config']['nsxt_servers']
            wire_logger = NULL_LOGGER
            if log_wire:
                wire_logger = SERVER_NSXT_WIRE_LOGGER

            for nsxt_server in nsxt_servers:
                msg = f"Configuring NSX-T server ({nsxt_server.get('name')})" \
                      " for CSE. Please check install logs for details."
                msg_update_callback.general(msg)
                INSTALL_LOGGER.info(msg)
                nsxt_client = NSXTClient(host=nsxt_server.get('host'),
                                         username=nsxt_server.get('username'),
                                         password=nsxt_server.get('password'),
                                         logger_debug=INSTALL_LOGGER,
                                         logger_wire=wire_logger,
                                         http_proxy=nsxt_server.get('proxy'),
                                         https_proxy=nsxt_server.get('proxy'),
                                         verify_ssl=nsxt_server.get('verify'))
                setup_nsxt_constructs(
                    nsxt_client=nsxt_client,
                    nodes_ip_block_id=nsxt_server.get('nodes_ip_block_ids'),
                    pods_ip_block_id=nsxt_server.get('pods_ip_block_ids'),
                    ncp_boundary_firewall_section_anchor_id=nsxt_server.get(
                        'distributed_firewall_section_anchor_id')
                )  # noqa: E501

        # Telemetry - Record successful install action
        record_user_action(CseOperation.SERVICE_INSTALL,
                           telemetry_settings=config['service']['telemetry'])
    except Exception:
        msg_update_callback.error(
            "CSE Installation Error. Check CSE install logs")
        INSTALL_LOGGER.error("CSE Installation Error", exc_info=True)
        # Telemetry - Record failed install action
        record_user_action(CseOperation.SERVICE_INSTALL,
                           status=OperationStatus.FAILED,
                           telemetry_settings=config['service']['telemetry'])
        raise  # TODO() need installation relevant exceptions for rollback
    finally:
        if client is not None:
            client.logout()