def get_k8s_local_template_definition(client, catalog_name, catalog_item_name, org=None, org_name=None): """Fetch definition of a template. Read metadata on a catalog item and construct a dictionary that defines the template. If partial data (which indicates a malformed or non k8s template) is retrieved from the metadata, an empty dictionary would be sent back. :param pyvcloud.vcd.Client client: A sys admin client to be used to retrieve metadata off the catalog item. :param str catalog_name: Name of the catalog where the template resides. :param str catalog_item_name: Name of the template. :param pyvcloud.vcd.Org org: Org object which hosts the catalog. :param str org_name: Name of the org that is hosting the catalog. Can be provided in lieu param org, however param org takes precedence. :return: definition of the template. :rtype: dict """ if org is None: org = get_org(client, org_name=org_name) md = org.get_all_metadata_from_catalog_item(catalog_name=catalog_name, item_name=catalog_item_name) try: metadata = metadata_to_dict(md) return _dict_to_k8s_local_template_definition(metadata) except ValueError: return None
def assign_vdc_sizing_policy_to_vapp_template_vms(self, compute_policy_href, org_name, catalog_name, catalog_item_name): """Assign the compute policy to vms of given vapp template. Note: The VDC sizing policy need not be created by CSE. :param str compute_policy_href: compute policy to be removed :param str org_name: name of the organization that has the catalog :param str catalog_name: name of the catalog :param str catalog_item_name: name of the catalog item that has vms :return: an object of type vcd_client.TASK XML which represents the asynchronous task that is updating virtual application template. :rtype: lxml.objectify.ObjectifiedElement """ self._raise_error_if_not_supported() org = vcd_utils.get_org(self._sysadmin_client, org_name=org_name) # TODO shift to org.assign_sizing_policy_to_vapp_template_vms return org.assign_compute_policy_to_vapp_template_vms( catalog_name=catalog_name, catalog_item_name=catalog_item_name, compute_policy_href=compute_policy_href)
def get_all_k8s_local_template_definition(client, catalog_name, org=None, org_name=None): """Fetch definitions of all templates in a catalog. :param pyvcloud.vcd.Client client: A sys admin client to be used to retrieve metadata off the catalog items. :param str catalog_name: Name of the catalog where the template resides. :param pyvcloud.vcd.Org org: Org object which hosts the catalog. :param str org_name: Name of the org that is hosting the catalog. Can be provided in lieu param org, however param org takes precedence. :return: definition of the templates. :rtype: list of dicts """ if not org: org = get_org(client, org_name=org_name) catalog_item_names = [ entry['name'] for entry in org.list_catalog_items(catalog_name)] result = [] for catalog_item_name in catalog_item_names: template_definition = get_k8s_local_template_definition( client, catalog_name, catalog_item_name, org=org) if template_definition: result.append(template_definition) return result
def _follow_task(op_ctx: ctx.OperationContext, task_href: str, ovdc_id: str): try: task = vcd_task.Task(client=op_ctx.sysadmin_client) session = op_ctx.sysadmin_client.get_vcloud_session() vdc = vcd_utils.get_vdc(op_ctx.sysadmin_client, vdc_id=ovdc_id) org = vcd_utils.get_org(op_ctx.sysadmin_client) user_name = session.get('user') user_href = org.get_user(user_name).get('href') msg = "Remove ovdc compute policy" # TODO(pyvcloud): Add method to retireve task from task href t = task.update(status=vcd_task.TaskStatus.RUNNING.value, namespace='vcloud.cse', operation=msg, operation_name=msg, details='', progress=None, owner_href=vdc.href, owner_name=vdc.name, owner_type=vcd_client.EntityType.VDC.value, user_href=user_href, user_name=user_name, org_href=op_ctx.user.org_href, task_href=task_href) op_ctx.sysadmin_client.get_task_monitor().wait_for_status(t) except Exception as err: logger.SERVER_LOGGER.error(f"{err}") finally: if op_ctx.sysadmin_client: op_ctx.end()
def init_environment(config_filepath=BASE_CONFIG_FILEPATH): """Set up module variables according to config dict. :param str config_filepath: """ global AMQP_USERNAME, AMQP_PASSWORD, CLIENT, ORG_HREF, VDC_HREF, \ CATALOG_NAME, TEARDOWN_INSTALLATION, TEARDOWN_CLUSTERS, \ TEMPLATE_DEFINITIONS, TEST_ALL_TEMPLATES, SYS_ADMIN_LOGIN_CMD, \ ORG_ADMIN_LOGIN_CMD, VAPP_AUTHOR_LOGIN_CMD, USER_LOGIN_CMD_MAP config = testutils.yaml_to_dict(config_filepath) rtm = RemoteTemplateManager( config['broker']['remote_template_cookbook_url']) template_cookbook = rtm.get_remote_template_cookbook() TEMPLATE_DEFINITIONS = template_cookbook['templates'] rtm.download_all_template_scripts(force_overwrite=True) CLIENT = Client(config['vcd']['host'], api_version=config['vcd']['api_version'], verify_ssl_certs=config['vcd']['verify']) credentials = BasicLoginCredentials(config['vcd']['username'], SYSTEM_ORG_NAME, config['vcd']['password']) CLIENT.set_credentials(credentials) org = pyvcloud_utils.get_org(CLIENT, org_name=config['broker']['org']) vdc = pyvcloud_utils.get_vdc(CLIENT, vdc_name=config['broker']['vdc'], org=org) ORG_HREF = org.href VDC_HREF = vdc.href CATALOG_NAME = config['broker']['catalog'] AMQP_USERNAME = config['amqp']['username'] AMQP_PASSWORD = config['amqp']['password'] SYS_ADMIN_LOGIN_CMD = f"login {config['vcd']['host']} system " \ f"{config['vcd']['username']} " \ f"-iwp {config['vcd']['password']} " \ f"-V {config['vcd']['api_version']}" ORG_ADMIN_LOGIN_CMD = f"login {config['vcd']['host']} " \ f"{config['broker']['org']}" \ f" {ORG_ADMIN_NAME} -iwp {ORG_ADMIN_PASSWORD} " \ f"-V {config['vcd']['api_version']}" VAPP_AUTHOR_LOGIN_CMD = f"login {config['vcd']['host']} " \ f"{config['broker']['org']} " \ f"{VAPP_AUTHOR_NAME} -iwp {VAPP_AUTHOR_PASSWORD}" \ f" -V {config['vcd']['api_version']}" USER_LOGIN_CMD_MAP = { 'sys_admin': SYS_ADMIN_LOGIN_CMD, 'org_admin': ORG_ADMIN_LOGIN_CMD, 'vapp_author': VAPP_AUTHOR_LOGIN_CMD } test_config = config.get('test') if test_config is not None: TEARDOWN_INSTALLATION = test_config.get('teardown_installation', True) TEARDOWN_CLUSTERS = test_config.get('teardown_clusters', True) TEST_ALL_TEMPLATES = test_config.get('test_all_templates', False)
def update_ovdc(operation_context: ctx.OperationContext, ovdc_id: str, ovdc_spec: def_models.Ovdc) -> dict: # noqa: 501 """Update ovdc with the updated k8s runtimes list. :param ctx.OperationContext operation_context: context for the request :param def_models.Ovdc ovdc_spec: Ovdc object having the updated k8s runtime list :return: dictionary containing the task href for the update operation :rtype: dict """ # NOTE: For CSE 3.0, if `enable_tkg_plus` flag in config is set to false, # Prevent enable/disable of OVDC for TKG+ k8s runtime by throwing an # exception msg = "Updating OVDC placement policies" task = vcd_task.Task(operation_context.sysadmin_client) org = vcd_utils.get_org(operation_context.client) user_href = org.get_user(operation_context.user.name).get('href') vdc = vcd_utils.get_vdc(operation_context.sysadmin_client, vdc_id=ovdc_id, # noqa: E501 is_admin_operation=True) logger.SERVER_LOGGER.debug(msg) task_resource = task.update( status=vcd_client.TaskStatus.RUNNING.value, namespace='vcloud.cse', operation=msg, operation_name='OVDC Update', details='', progress=None, owner_href=vdc.href, owner_name=vdc.name, owner_type=vcd_client.EntityType.VDC.value, user_href=user_href, user_name=operation_context.user.name, org_href=operation_context.user.org_href, task_href=None, error_message=None, stack_trace=None) task_href = task_resource.get('href') operation_context.is_async = True # NOTE: Telemetry is currently handled in the async function as it is not # possible to know the operation (enable/disable) without comparing it to # current k8s runtimes. if ClusterEntityKind.TKG_PLUS.value in ovdc_spec.k8s_runtime and \ not utils.is_tkg_plus_enabled(): msg = "TKG+ is not enabled on CSE server. Please enable TKG+ in the " \ "server and try again." logger.SERVER_LOGGER.debug(msg) raise Exception(msg) policy_list = [RUNTIME_DISPLAY_NAME_TO_INTERNAL_NAME_MAP[p] for p in ovdc_spec.k8s_runtime] # noqa: E501 _update_ovdc_using_placement_policy_async(operation_context=operation_context, # noqa:E501 task=task, task_href=task_href, user_href=user_href, policy_list=policy_list, # noqa:E501 ovdc_id=ovdc_id, vdc=vdc, remove_cp_from_vms_on_disable=ovdc_spec.remove_cp_from_vms_on_disable) # noqa:E501 return {'task_href': task_href}
def remove_vdc_compute_policy_from_vdc( self, request_context: ctx.RequestContext, # noqa: E501 ovdc_id, compute_policy_href, remove_compute_policy_from_vms=False): # noqa: E501 """Delete the compute policy from the specified vdc. Note: The VDC compute policy need not be created by CSE. :param request_context: request context of remove compute policy request :param str ovdc_id: id of the vdc to assign the policy :param compute_policy_href: policy href to remove :param bool remove_compute_policy_from_vms: If True, will set affected VMs' compute policy to 'System Default' :return: dictionary containing 'task_href'. """ # TODO find an efficient way without passing in request context vdc = vcd_utils.get_vdc(self._sysadmin_client, vdc_id=ovdc_id) org = vcd_utils.get_org(self._sysadmin_client) org.reload() user_name = self._session.get('user') user_href = org.get_user(user_name).get('href') task = Task(self._sysadmin_client) task_resource = task.update( status=vcd_client.TaskStatus.RUNNING.value, namespace='vcloud.cse', operation=f"Removing compute policy (href: {compute_policy_href})" f" from org VDC (vdc id: {ovdc_id})", operation_name='Remove org VDC compute policy', details='', progress=None, owner_href=vdc.href, owner_name=vdc.name, owner_type=vcd_client.EntityType.VDC.value, user_href=user_href, user_name=user_name, org_href=org.href) task_href = task_resource.get('href') request_context.is_async = True self._remove_compute_policy_from_vdc_async( request_context=request_context, task=task, task_href=task_href, user_href=user_href, org_href=org.href, ovdc_id=ovdc_id, compute_policy_href=compute_policy_href, remove_compute_policy_from_vms=remove_compute_policy_from_vms) return {'task_href': task_href}
def remove_vdc_compute_policy_from_vdc( self, # noqa: E501 ovdc_id, compute_policy_href, force=False): # noqa: E501 """Delete the compute policy from the specified vdc. :param str ovdc_id: id of the vdc to assign the policy :param compute_policy_href: policy href to remove :param bool force: If True, will set affected VMs' compute policy to 'System Default' :return: dictionary containing 'task_href'. """ vdc = vcd_utils.get_vdc(self._sysadmin_client, vdc_id=ovdc_id) # TODO the following org will be associated with 'System' org. # task created should be associated with the corresponding org of the # vdc object. org = vcd_utils.get_org(self._sysadmin_client) org.reload() user_name = self._session.get('user') user_href = org.get_user(user_name).get('href') task = Task(self._sysadmin_client) task_resource = task.update( status=vcd_client.TaskStatus.RUNNING.value, namespace='vcloud.cse', operation=f"Removing compute policy (href: {compute_policy_href})" f" from org VDC (vdc id: {ovdc_id})", operation_name='Remove org VDC compute policy', details='', progress=None, owner_href=vdc.href, owner_name=vdc.name, owner_type=vcd_client.EntityType.VDC.value, user_href=user_href, user_name=user_name, org_href=org.href) task_href = task_resource.get('href') self._remove_compute_policy_from_vdc_async( ovdc_id=ovdc_id, compute_policy_href=compute_policy_href, task_resource=task_resource, force=force) return {'task_href': task_href}
def remove_compute_policy_from_vdc(self, ovdc_id, compute_policy_href, remove_compute_policy_from_vms=False): """Delete the compute policy from the specified vdc. :param str ovdc_id: id of the vdc to assign the policy :param compute_policy_href: policy href to remove :param bool remove_compute_policy_from_vms: If True, will set affected VMs' compute policy to 'System Default' :return: dictionary containing 'task_href'. """ vdc = pyvcd_utils.get_vdc(self._vcd_client, vdc_id=ovdc_id) # TODO is there no better way to get the client href? org = pyvcd_utils.get_org(self._vcd_client) org.reload() user_name = self._session.get('user') user_href = org.get_user(user_name).get('href') task = Task(self._vcd_client) task_resource = task.update( status=TaskStatus.RUNNING.value, namespace='vcloud.cse', operation=f"Removing compute policy (href: {compute_policy_href})" f" from org VDC (vdc id: {ovdc_id})", operation_name='Remove org VDC compute policy', details='', progress=None, owner_href=vdc.href, owner_name=vdc.name, owner_type=EntityType.VDC.value, user_href=user_href, user_name=user_name, org_href=org.href) task_href = task_resource.get('href') self._remove_compute_policy_from_vdc_async( task=task, task_href=task_href, user_href=user_href, org_href=org.href, ovdc_id=ovdc_id, compute_policy_href=compute_policy_href, remove_compute_policy_from_vms=remove_compute_policy_from_vms) return { 'task_href': task_href }
def _update_task(self, status, message='', error_message=None, stack_trace=''): """Update task or create it if it does not exist. This function should only be used in the x_async functions, or in the 6 common broker functions to create the required task. When this function is used, it logs in the sys admin client if it is not already logged in, but it does not log out. This is because many _update_task() calls are used in sequence until the task succeeds or fails. Once the task is updated to a success or failure state, then the sys admin client should be logged out. Another reason for decoupling sys admin logout and this function is because if any unknown errors occur during an operation, there should be a finally clause that takes care of logging out. """ if not self.tenant_client.is_sysadmin(): stack_trace = '' if self.task is None: self.task = Task(self.sys_admin_client) task_href = None if self.task_resource is not None: task_href = self.task_resource.get('href') org = vcd_utils.get_org(self.tenant_client) user_href = org.get_user(self.client_session.get('user')).get('href') self.task_resource = self.task.update( status=status.value, namespace='vcloud.cse', operation=message, operation_name='cluster operation', details='', progress=None, owner_href=self.tenant_org_href, owner_name=self.tenant_org_name, owner_type='application/vnd.vmware.vcloud.org+xml', user_href=user_href, user_name=self.tenant_user_name, org_href=self.tenant_org_href, task_href=task_href, error_message=error_message, stack_trace=stack_trace)
def set_tenant_org_context(self, org_name=None): """Set tenant org context if not set in the client. :param str org_name: Name of the org. If not set, makes use of the logged in org name """ if cli_constants.TKGRequestHeaderKey.X_VMWARE_VCLOUD_TENANT_CONTEXT not in self._tkg_client.default_headers: # noqa: E501 logger.CLIENT_LOGGER.debug( f"Setting client org context with org name {org_name}" ) # noqa: E501 org_resource = vcd_utils.get_org(self._client, org_name=org_name) org_id = org_resource.href.split('/')[-1] self._tkg_client.set_default_header( cli_constants.TKGRequestHeaderKey. X_VMWARE_VCLOUD_TENANT_CONTEXT, # noqa: E501 org_id)
def remove_all_compute_policies_from_vapp_template_vms( self, org_name, catalog_name, catalog_item_name): """Remove all compute policies from vms of given vapp template. :param str org_name: name of the organization that has the catalog. :param str catalog_name: name of the catalog. :param str catalog_item_name: name of the catalog item that has vms. :return: an object of type EntityType.TASK XML which represents the asynchronous task that is updating virtual application template. :rtype: lxml.objectify.ObjectifiedElement """ org = get_org(self._vcd_client, org_name=org_name) return org.remove_all_compute_policies_from_vapp_template_vms( catalog_name, catalog_item_name)
def remove_all_vdc_compute_policies_from_vapp_template_vms( self, org_name, catalog_name, catalog_item_name): # noqa: E501 """Remove all compute policies from vms of given vapp template. :param str org_name: name of the organization that has the catalog. :param str catalog_name: name of the catalog. :param str catalog_item_name: name of the catalog item that has vms. :return: an object of type EntityType.TASK XML which represents the asynchronous task that is updating virtual application template. :rtype: lxml.objectify.ObjectifiedElement """ self._raise_error_if_not_supported() org = vcd_utils.get_org(self._sysadmin_client, org_name=org_name) return org.remove_all_compute_policies_from_vapp_template_vms( catalog_name, catalog_item_name)
def assign_compute_policy_to_vapp_template_vms(self, compute_policy_href, org_name, catalog_name, catalog_item_name): """Assign the compute policy to vms of given vapp template. :param str compute_policy_href: compute policy to be removed :param str org_name: name of the organization that has the catalog :param str catalog_name: name of the catalog :param str catalog_item_name: name of the catalog item that has vms :return: an object of type EntityType.TASK XML which represents the asynchronous task that is updating virtual application template. :rtype: lxml.objectify.ObjectifiedElement """ org = get_org(self._vcd_client, org_name=org_name) return org.assign_compute_policy_to_vapp_template_vms( catalog_name=catalog_name, catalog_item_name=catalog_item_name, compute_policy_href=compute_policy_href)
def check_cse_installation(config, msg_update_callback=None): """Ensure that CSE is installed on vCD according to the config file. Checks, 1. AMQP exchange exists 2. CSE is registered with vCD, 3. CSE K8 catalog exists :param dict config: config yaml file as a dictionary :param utils.ConsoleMessagePrinter msg_update_callback: Callback object that writes messages onto console. :raises Exception: if CSE is not registered to vCD as an extension, or if specified catalog does not exist, or if specified template(s) do not exist. """ if msg_update_callback: msg_update_callback.info( "Validating CSE installation according to config file") err_msgs = [] client = None try: log_filename = None log_wire = str_to_bool(config['service'].get('log_wire')) if log_wire: log_filename = SERVER_DEBUG_WIRELOG_FILEPATH client = Client(config['vcd']['host'], api_version=config['vcd']['api_version'], verify_ssl_certs=config['vcd']['verify'], log_file=log_filename, log_requests=log_wire, log_headers=log_wire, log_bodies=log_wire) credentials = BasicLoginCredentials(config['vcd']['username'], SYSTEM_ORG_NAME, config['vcd']['password']) client.set_credentials(credentials) # check that AMQP exchange exists amqp = config['amqp'] credentials = pika.PlainCredentials(amqp['username'], amqp['password']) parameters = pika.ConnectionParameters(amqp['host'], amqp['port'], amqp['vhost'], credentials, ssl=amqp['ssl'], connection_attempts=3, retry_delay=2, socket_timeout=5) connection = None try: connection = pika.BlockingConnection(parameters) channel = connection.channel() try: channel.exchange_declare(exchange=amqp['exchange'], exchange_type=EXCHANGE_TYPE, durable=True, passive=True, auto_delete=False) if msg_update_callback: msg_update_callback.general( f"AMQP exchange '{amqp['exchange']}' exists") except pika.exceptions.ChannelClosed: msg = f"AMQP exchange '{amqp['exchange']}' does not exist" if msg_update_callback: msg_update_callback.error(msg) err_msgs.append(msg) except Exception: # TODO() replace raw exception with specific msg = f"Could not connect to AMQP exchange '{amqp['exchange']}'" if msg_update_callback: msg_update_callback.error(msg) err_msgs.append(msg) finally: if connection is not None: connection.close() # check that CSE is registered to vCD correctly ext = APIExtension(client) try: cse_info = ext.get_extension(CSE_SERVICE_NAME, namespace=CSE_SERVICE_NAMESPACE) rkey_matches = cse_info['routingKey'] == amqp['routing_key'] exchange_matches = cse_info['exchange'] == amqp['exchange'] if not rkey_matches or not exchange_matches: msg = "CSE is registered as an extension, but the extension " \ "settings on vCD are not the same as config settings." if not rkey_matches: msg += f"\nvCD-CSE routing key: {cse_info['routingKey']}" \ f"\nCSE config routing key: {amqp['routing_key']}" if not exchange_matches: msg += f"\nvCD-CSE exchange: {cse_info['exchange']}" \ f"\nCSE config exchange: {amqp['exchange']}" if msg_update_callback: msg_update_callback.info(msg) err_msgs.append(msg) if cse_info['enabled'] == 'true': if msg_update_callback: msg_update_callback.general( "CSE on vCD is currently enabled") else: if msg_update_callback: msg_update_callback.info( "CSE on vCD is currently disabled") except MissingRecordException: msg = "CSE is not registered to vCD" if msg_update_callback: msg_update_callback.error(msg) err_msgs.append(msg) # check that catalog exists in vCD org_name = config['broker']['org'] org = get_org(client, org_name=org_name) catalog_name = config['broker']['catalog'] if catalog_exists(org, catalog_name): if msg_update_callback: msg_update_callback.general(f"Found catalog '{catalog_name}'") else: msg = f"Catalog '{catalog_name}' not found" if msg_update_callback: msg_update_callback.error(msg) err_msgs.append(msg) finally: if client: client.logout() if err_msgs: raise Exception(err_msgs) if msg_update_callback: msg_update_callback.general("CSE installation is valid")
def install_cse(ctx, config_file_name='config.yaml', skip_template_creation=True, force_update=False, ssh_key=None, retain_temp_vapp=False, msg_update_callback=None): """Handle logistics for CSE installation. Handles decision making for configuring AMQP exchange/settings, extension registration, catalog setup, and template creation. :param click.core.Context ctx: :param str config_file_name: config file name. :param bool skip_template_creation: If True, skip creating the templates. :param bool force_update: if True and templates already exist in vCD, overwrites existing templates. :param str ssh_key: public ssh key to place into template vApp(s). :param bool retain_temp_vapp: if True, temporary vApp will not destroyed, so the user can ssh into and debug the vm. :param utils.ConsoleMessagePrinter msg_update_callback: Callback object that writes messages onto console. :raises AmqpError: if AMQP exchange could not be created. """ configure_install_logger() config = get_validated_config(config_file_name, msg_update_callback=msg_update_callback) populate_vsphere_list(config['vcs']) msg = f"Installing CSE on vCloud Director using config file " \ f"'{config_file_name}'" if msg_update_callback: msg_update_callback.info(msg) LOGGER.info(msg) client = None try: client = Client(config['vcd']['host'], api_version=config['vcd']['api_version'], verify_ssl_certs=config['vcd']['verify'], log_file=INSTALL_WIRELOG_FILEPATH, log_requests=True, log_headers=True, log_bodies=True) credentials = BasicLoginCredentials(config['vcd']['username'], SYSTEM_ORG_NAME, config['vcd']['password']) client.set_credentials(credentials) msg = f"Connected to vCD as system administrator: " \ f"{config['vcd']['host']}:{config['vcd']['port']}" if msg_update_callback: msg_update_callback.general(msg) LOGGER.info(msg) # create amqp exchange if it doesn't exist amqp = config['amqp'] _create_amqp_exchange(amqp['exchange'], amqp['host'], amqp['port'], amqp['vhost'], amqp['ssl'], amqp['username'], amqp['password'], msg_update_callback=msg_update_callback) # register or update cse on vCD _register_cse(client, amqp['routing_key'], amqp['exchange'], msg_update_callback=msg_update_callback) # register rights to vCD # TODO() should also remove rights when unregistering CSE _register_right(client, right_name=CSE_NATIVE_DEPLOY_RIGHT_NAME, description=CSE_NATIVE_DEPLOY_RIGHT_DESCRIPTION, category=CSE_NATIVE_DEPLOY_RIGHT_CATEGORY, bundle_key=CSE_NATIVE_DEPLOY_RIGHT_BUNDLE_KEY, msg_update_callback=msg_update_callback) _register_right(client, right_name=CSE_PKS_DEPLOY_RIGHT_NAME, description=CSE_PKS_DEPLOY_RIGHT_DESCRIPTION, category=CSE_PKS_DEPLOY_RIGHT_CATEGORY, bundle_key=CSE_PKS_DEPLOY_RIGHT_BUNDLE_KEY, msg_update_callback=msg_update_callback) org_name = config['broker']['org'] catalog_name = config['broker']['catalog'] # set up cse catalog org = get_org(client, org_name=org_name) create_and_share_catalog(org, catalog_name, catalog_desc='CSE templates', msg_update_callback=msg_update_callback) if skip_template_creation: msg = "Skipping creation of templates." if msg_update_callback: msg_update_callback.info(msg) LOGGER.warning(msg) else: # read remote template cookbook, download all scripts rtm = RemoteTemplateManager( remote_template_cookbook_url=config['broker'] ['remote_template_cookbook_url'], # noqa: E501 logger=LOGGER, msg_update_callback=ConsoleMessagePrinter()) remote_template_cookbook = rtm.get_remote_template_cookbook() # create all templates defined in cookbook for template in remote_template_cookbook['templates']: rtm.download_template_scripts( template_name=template[RemoteTemplateKey.NAME], revision=template[RemoteTemplateKey.REVISION], force_overwrite=force_update) catalog_item_name = get_revisioned_template_name( template[RemoteTemplateKey.NAME], template[RemoteTemplateKey.REVISION]) build_params = { 'template_name': template[RemoteTemplateKey.NAME], 'template_revision': template[RemoteTemplateKey.REVISION], 'source_ova_name': template[RemoteTemplateKey.SOURCE_OVA_NAME], # noqa: E501 'source_ova_href': template[RemoteTemplateKey.SOURCE_OVA_HREF], # noqa: E501 'source_ova_sha256': template[ RemoteTemplateKey.SOURCE_OVA_SHA256], # noqa: E501 'org_name': org_name, 'vdc_name': config['broker']['vdc'], 'catalog_name': catalog_name, 'catalog_item_name': catalog_item_name, 'catalog_item_description': template[RemoteTemplateKey.DESCRIPTION], # noqa: E501 'temp_vapp_name': template[RemoteTemplateKey.NAME] + '_temp', # noqa: E501 'cpu': template[RemoteTemplateKey.CPU], 'memory': template[RemoteTemplateKey.MEMORY], 'network_name': config['broker']['network'], 'ip_allocation_mode': config['broker']['ip_allocation_mode'], # noqa: E501 'storage_profile': config['broker']['storage_profile'] } builder = TemplateBuilder( client, client, build_params, ssh_key=ssh_key, logger=LOGGER, msg_update_callback=ConsoleMessagePrinter()) builder.build(force_recreate=force_update, retain_temp_vapp=retain_temp_vapp) # remote definition is a super set of local definition, barring # the key 'catalog_item_name' template_definition = dict(template) template_definition['catalog_item_name'] = catalog_item_name save_k8s_local_template_definition_as_metadata( client=client, catalog_name=catalog_name, catalog_item_name=catalog_item_name, template_definition=template_definition, org_name=org_name) # if it's a PKS setup, setup NSX-T constructs if config.get('pks_config'): nsxt_servers = config.get('pks_config')['nsxt_servers'] for nsxt_server in nsxt_servers: msg = f"Configuring NSX-T server ({nsxt_server.get('name')})" \ " for CSE. Please check install logs for details." if msg_update_callback: msg_update_callback.general(msg) LOGGER.info(msg) nsxt_client = NSXTClient(host=nsxt_server.get('host'), username=nsxt_server.get('username'), password=nsxt_server.get('password'), http_proxy=nsxt_server.get('proxy'), https_proxy=nsxt_server.get('proxy'), verify_ssl=nsxt_server.get('verify'), logger_instance=LOGGER, log_requests=True, log_headers=True, log_body=True) setup_nsxt_constructs( nsxt_client=nsxt_client, nodes_ip_block_id=nsxt_server.get('nodes_ip_block_ids'), pods_ip_block_id=nsxt_server.get('pods_ip_block_ids'), ncp_boundary_firewall_section_anchor_id=nsxt_server.get( 'distributed_firewall_section_anchor_id') ) # noqa: E501 except Exception: if msg_update_callback: msg_update_callback.error( "CSE Installation Error. Check CSE install logs") LOGGER.error("CSE Installation Error", exc_info=True) raise # TODO() need installation relevant exceptions for rollback finally: if client is not None: client.logout()
def get_all_k8s_local_template_definition(client, catalog_name, org=None, org_name=None): """Fetch all templates in a catalog. A template is a catalog item that has the LocalTemplateKey.NAME and LocalTemplateKey.REVISION metadata keys. :param pyvcloud.vcd.Client client: A sys admin client to be used to retrieve metadata off the catalog items. :param str catalog_name: Name of the catalog where the template resides. :param pyvcloud.vcd.Org org: Org object which hosts the catalog. :param str org_name: Name of the org that is hosting the catalog. Can be provided in lieu param org, however param org takes precedence. :return: list of dictionaries containing template data :rtype: list of dicts """ if not org: org = get_org(client, org_name=org_name) catalog_item_names = [ entry['name'] for entry in org.list_catalog_items(catalog_name) ] templates = [] for item_name in catalog_item_names: md = org.get_all_metadata_from_catalog_item(catalog_name=catalog_name, item_name=item_name) metadata_dict = metadata_to_dict(md) # make sure all pre-2.6 template metadata exists on catalog item old_metadata_keys = { LocalTemplateKey.CATALOG_ITEM_NAME, LocalTemplateKey.COMPUTE_POLICY, LocalTemplateKey.CPU, LocalTemplateKey.DEPRECATED, LocalTemplateKey.DESCRIPTION, LocalTemplateKey.MEMORY, LocalTemplateKey.NAME, LocalTemplateKey.REVISION, } # if catalog item doesn't have the old metadata keys, CSE should # not recognize it as a template if not metadata_dict.keys() >= old_metadata_keys: continue # non-string metadata is written to the dictionary as a string # 'upgrade_from' should be converted to an array if it is a string # 'upgrade_from' should be converted to [] if it is [''] if LocalTemplateKey.UPGRADE_FROM in metadata_dict: if isinstance(metadata_dict[LocalTemplateKey.UPGRADE_FROM], str): # noqa: E501 metadata_dict[ LocalTemplateKey.UPGRADE_FROM] = ast.literal_eval( metadata_dict[ LocalTemplateKey.UPGRADE_FROM]) # noqa: E501 if metadata_dict[LocalTemplateKey.UPGRADE_FROM] == ['']: metadata_dict[LocalTemplateKey.UPGRADE_FROM] = [] # if 2.5.1+ template metadata is missing, add them to the dict template_name = metadata_dict[LocalTemplateKey.NAME] template_revision = str( metadata_dict.get(LocalTemplateKey.REVISION, '0')) # noqa: E501 k8s_version, docker_version = get_k8s_and_docker_versions( template_name, template_revision=template_revision) # noqa: E501 tokens = template_name.split('_') if LocalTemplateKey.OS not in metadata_dict: metadata_dict[LocalTemplateKey.OS] = tokens[0] if LocalTemplateKey.DOCKER_VERSION not in metadata_dict: metadata_dict[LocalTemplateKey.DOCKER_VERSION] = docker_version if LocalTemplateKey.KUBERNETES not in metadata_dict: metadata_dict[LocalTemplateKey.KUBERNETES] = 'upstream' if LocalTemplateKey.KUBERNETES_VERSION not in metadata_dict: metadata_dict[LocalTemplateKey.KUBERNETES_VERSION] = k8s_version if LocalTemplateKey.CNI not in metadata_dict: metadata_dict[LocalTemplateKey.CNI] = tokens[2].split('-')[0] if LocalTemplateKey.CNI_VERSION not in metadata_dict: metadata_dict[LocalTemplateKey.CNI_VERSION] = tokens[2].split('-')[ 1] # noqa: E501 if LocalTemplateKey.UPGRADE_FROM not in metadata_dict: metadata_dict[LocalTemplateKey.UPGRADE_FROM] = [] # final check that all keys in LocalTemplateKey exist in the template # should never fail, but useful to double check dev work missing_metadata = set(LocalTemplateKey) - metadata_dict.keys() num_missing_metadata = len(missing_metadata) if num_missing_metadata > 0: raise ValueError(f"Template '{template_name}' missing " f"{num_missing_metadata} metadata: " f"{missing_metadata}") templates.append(metadata_dict) return templates
def _create_cluster_async(self, *args, org_name, ovdc_name, cluster_name, cluster_id, template_name, template_revision, num_workers, network_name, num_cpu, mb_memory, storage_profile_name, ssh_key_filepath, enable_nfs, rollback): org = vcd_utils.get_org(self.tenant_client, org_name=org_name) vdc = vcd_utils.get_vdc( self.tenant_client, vdc_name=ovdc_name, org=org) LOGGER.debug(f"About to create cluster {cluster_name} on {ovdc_name}" f" with {num_workers} worker nodes, " f"storage profile={storage_profile_name}") try: self._update_task( TaskStatus.RUNNING, message=f"Creating cluster vApp {cluster_name}({cluster_id})") try: vapp_resource = \ vdc.create_vapp(cluster_name, description=f"cluster {cluster_name}", network=network_name, fence_mode='bridged') except Exception as e: msg = f"Error while creating vApp: {e}" LOGGER.debug(str(e)) raise ClusterOperationError(msg) self.tenant_client.get_task_monitor().wait_for_status(vapp_resource.Tasks.Task[0]) # noqa: E501 template = get_template(template_name, template_revision) tags = { ClusterMetadataKey.CLUSTER_ID: cluster_id, ClusterMetadataKey.CSE_VERSION: pkg_resources.require('container-service-extension')[0].version, # noqa: E501 ClusterMetadataKey.TEMPLATE_NAME: template[LocalTemplateKey.NAME], # noqa: E501 ClusterMetadataKey.TEMPLATE_REVISION: template[LocalTemplateKey.REVISION] # noqa: E501 } vapp = VApp(self.tenant_client, href=vapp_resource.get('href')) task = vapp.set_multiple_metadata(tags) self.tenant_client.get_task_monitor().wait_for_status(task) self._update_task( TaskStatus.RUNNING, message=f"Creating master node for " f"{cluster_name} ({cluster_id})") vapp.reload() server_config = utils.get_server_runtime_config() catalog_name = server_config['broker']['catalog'] try: add_nodes(client=self.tenant_client, num_nodes=1, node_type=NodeType.MASTER, org=org, vdc=vdc, vapp=vapp, catalog_name=catalog_name, template=template, network_name=network_name, num_cpu=num_cpu, memory_in_mb=mb_memory, storage_profile=storage_profile_name, ssh_key_filepath=ssh_key_filepath) except Exception as e: raise MasterNodeCreationError("Error adding master node:", str(e)) self._update_task( TaskStatus.RUNNING, message=f"Initializing cluster {cluster_name} ({cluster_id})") vapp.reload() init_cluster(vapp, template[LocalTemplateKey.NAME], template[LocalTemplateKey.REVISION]) master_ip = get_master_ip(vapp) task = vapp.set_metadata('GENERAL', 'READWRITE', 'cse.master.ip', master_ip) self.tenant_client.get_task_monitor().wait_for_status(task) self._update_task( TaskStatus.RUNNING, message=f"Creating {num_workers} node(s) for " f"{cluster_name}({cluster_id})") try: add_nodes(client=self.tenant_client, num_nodes=num_workers, node_type=NodeType.WORKER, org=org, vdc=vdc, vapp=vapp, catalog_name=catalog_name, template=template, network_name=network_name, num_cpu=num_cpu, memory_in_mb=mb_memory, storage_profile=storage_profile_name, ssh_key_filepath=ssh_key_filepath) except Exception as e: raise WorkerNodeCreationError("Error creating worker node:", str(e)) self._update_task( TaskStatus.RUNNING, message=f"Adding {num_workers} node(s) to " f"{cluster_name}({cluster_id})") vapp.reload() join_cluster(vapp, template[LocalTemplateKey.NAME], template[LocalTemplateKey.REVISION]) if enable_nfs: self._update_task( TaskStatus.RUNNING, message=f"Creating NFS node for " f"{cluster_name} ({cluster_id})") try: add_nodes(client=self.tenant_client, num_nodes=1, node_type=NodeType.NFS, org=org, vdc=vdc, vapp=vapp, catalog_name=catalog_name, template=template, network_name=network_name, num_cpu=num_cpu, memory_in_mb=mb_memory, storage_profile=storage_profile_name, ssh_key_filepath=ssh_key_filepath) except Exception as e: raise NFSNodeCreationError("Error creating NFS node:", str(e)) self._update_task( TaskStatus.SUCCESS, message=f"Created cluster {cluster_name} ({cluster_id})") except (MasterNodeCreationError, WorkerNodeCreationError, NFSNodeCreationError, ClusterJoiningError, ClusterInitializationError, ClusterOperationError) as e: if rollback: msg = f"Error creating cluster {cluster_name}. " \ f"Deleting cluster (rollback=True)" self._update_task(TaskStatus.RUNNING, message=msg) LOGGER.info(msg) try: cluster = get_cluster(self.tenant_client, cluster_name, cluster_id=cluster_id, org_name=org_name, ovdc_name=ovdc_name) self._delete_cluster(cluster_name=cluster_name, cluster_vdc_href=cluster['vdc_href']) except Exception: LOGGER.error(f"Failed to delete cluster {cluster_name}", exc_info=True) LOGGER.error(f"Error creating cluster {cluster_name}", exc_info=True) error_obj = error_to_json(e) stack_trace = ''.join(error_obj[ERROR_MESSAGE_KEY][ERROR_STACKTRACE_KEY]) # noqa: E501 self._update_task( TaskStatus.ERROR, error_message=error_obj[ERROR_MESSAGE_KEY][ERROR_DESCRIPTION_KEY], # noqa: E501 stack_trace=stack_trace) # raising an exception here prints a stacktrace to server console except Exception as e: LOGGER.error(f"Unknown error creating cluster {cluster_name}", exc_info=True) error_obj = error_to_json(e) stack_trace = ''.join(error_obj[ERROR_MESSAGE_KEY][ERROR_STACKTRACE_KEY]) # noqa: E501 self._update_task( TaskStatus.ERROR, error_message=error_obj[ERROR_MESSAGE_KEY][ERROR_DESCRIPTION_KEY], # noqa: E501 stack_trace=stack_trace) finally: self.logout_sys_admin_client()
def _create_nodes_async(self, *args, cluster_name, cluster_vdc_href, cluster_vapp_href, cluster_id, template_name, template_revision, num_workers, network_name, num_cpu, mb_memory, storage_profile_name, ssh_key_filepath, enable_nfs, rollback): org = vcd_utils.get_org(self.tenant_client) vdc = VDC(self.tenant_client, href=cluster_vdc_href) vapp = VApp(self.tenant_client, href=cluster_vapp_href) template = get_template(name=template_name, revision=template_revision) msg = f"Creating {num_workers} node(s) from template " \ f"'{template_name}' (revision {template_revision}) and " \ f"adding to {cluster_name} ({cluster_id})" LOGGER.debug(msg) try: self._update_task(TaskStatus.RUNNING, message=msg) node_type = NodeType.WORKER if enable_nfs: node_type = NodeType.NFS server_config = utils.get_server_runtime_config() catalog_name = server_config['broker']['catalog'] new_nodes = add_nodes(client=self.tenant_client, num_nodes=num_workers, node_type=node_type, org=org, vdc=vdc, vapp=vapp, catalog_name=catalog_name, template=template, network_name=network_name, num_cpu=num_cpu, memory_in_mb=mb_memory, storage_profile=storage_profile_name, ssh_key_filepath=ssh_key_filepath) if node_type == NodeType.NFS: self._update_task( TaskStatus.SUCCESS, message=f"Created {num_workers} node(s) for " f"{cluster_name}({cluster_id})") elif node_type == NodeType.WORKER: self._update_task( TaskStatus.RUNNING, message=f"Adding {num_workers} node(s) to cluster " f"{cluster_name}({cluster_id})") target_nodes = [] for spec in new_nodes['specs']: target_nodes.append(spec['target_vm_name']) vapp.reload() join_cluster(vapp, template[LocalTemplateKey.NAME], template[LocalTemplateKey.REVISION], target_nodes) self._update_task( TaskStatus.SUCCESS, message=f"Added {num_workers} node(s) to cluster " f"{cluster_name}({cluster_id})") except NodeCreationError as e: if rollback: msg = f"Error adding nodes to {cluster_name} {cluster_id}." \ f" Deleting nodes: {e.node_names} (rollback=True)" self._update_task(TaskStatus.RUNNING, message=msg) LOGGER.info(msg) try: self._delete_nodes(cluster_name=cluster_name, cluster_vapp_href=cluster_vapp_href, node_names_list=e.node_names) except Exception: LOGGER.error(f"Failed to delete nodes {e.node_names} " f"from cluster {cluster_name}", exc_info=True) LOGGER.error(f"Error adding nodes to {cluster_name}", exc_info=True) error_obj = error_to_json(e) LOGGER.error(str(e), exc_info=True) stack_trace = ''.join(error_obj[ERROR_MESSAGE_KEY][ERROR_STACKTRACE_KEY]) # noqa: E501 self._update_task( TaskStatus.ERROR, error_message=error_obj[ERROR_MESSAGE_KEY][ERROR_DESCRIPTION_KEY], # noqa: E501 stack_trace=stack_trace) # raising an exception here prints a stacktrace to server console except Exception as e: error_obj = error_to_json(e) LOGGER.error(str(e), exc_info=True) stack_trace = ''.join(error_obj[ERROR_MESSAGE_KEY][ERROR_STACKTRACE_KEY]) # noqa: E501 self._update_task( TaskStatus.ERROR, error_message=error_obj[ERROR_MESSAGE_KEY][ERROR_DESCRIPTION_KEY], # noqa: E501 stack_trace=stack_trace) finally: self.logout_sys_admin_client()
def __init__(self, client, sys_admin_client, build_params, org=None, vdc=None, ssh_key=None, logger=NULL_LOGGER, msg_update_callback=NullPrinter(), log_wire=False): """. :param pyvcloud.vcd.Client client: :param pyvcloud.vcd.Client sys_admin_client: :param dict build_params: :param pyvcloud.vcd.org.Org org: specific org to use. Will override the org_name specified in build_params, can be used to save few vCD calls to create the Org object. :param pyvcloud.vcd.vdc.VDC vdc: specific vdc to use. Will override the vdc_name specified in build_params, can be used to save few vCD calls to create the Vdc object. :param str ssh_key: public ssh key to place into the template vApp(s). :param logging.Logger logger: logger object. :param utils.ConsoleMessagePrinter msg_update_callback: Callback object. """ self._is_valid = False self.client = client self.sys_admin_client = sys_admin_client self.ssh_key = ssh_key self.logger = logger self.msg_update_callback = msg_update_callback if self.client is None or self.sys_admin_client is None: return # validate and populate required fields self.template_name = build_params.get( TemplateBuildKey.TEMPLATE_NAME) # noqa: E501 self.template_revision = build_params.get( TemplateBuildKey.TEMPLATE_REVISION) # noqa: E501 self.ova_name = build_params.get( TemplateBuildKey.SOURCE_OVA_NAME) # noqa: E501 self.ova_href = build_params.get( TemplateBuildKey.SOURCE_OVA_HREF) # noqa: E501 self.ova_sha256 = build_params.get( TemplateBuildKey.SOURCE_OVA_SHA256) # noqa: E501 if org: self.org = org self.org_name = org.get_name() else: self.org_name = build_params.get( TemplateBuildKey.ORG_NAME) # noqa: E501 self.org = get_org(self.client, org_name=self.org_name) if vdc: self.vdc = vdc self.vdc.get_resource() # to make sure vdc.resource is populated self.vdc_name = vdc.name else: self.vdc_name = build_params.get( TemplateBuildKey.VDC_NAME) # noqa: E501 self.vdc = get_vdc(self.client, vdc_name=self.vdc_name, org=self.org) self.catalog_name = build_params.get( TemplateBuildKey.CATALOG_NAME) # noqa: E501 self.catalog_item_name = build_params.get( TemplateBuildKey.CATALOG_ITEM_NAME) # noqa: E501 self.catalog_item_description = \ build_params.get(TemplateBuildKey.CATALOG_ITEM_DESCRIPTION) # noqa: E501 self.temp_vapp_name = build_params.get( TemplateBuildKey.TEMP_VAPP_NAME) # noqa: E501 self.temp_vm_name = build_params.get( TemplateBuildKey.TEMP_VM_NAME) # noqa: E501 self.cpu = build_params.get(TemplateBuildKey.CPU) self.memory = build_params.get(TemplateBuildKey.MEMORY) self.network_name = build_params.get( TemplateBuildKey.NETWORK_NAME) # noqa: E501 self.ip_allocation_mode = build_params.get( TemplateBuildKey.IP_ALLOCATION_MODE) # noqa: E501 self.storage_profile = build_params.get( TemplateBuildKey.STORAGE_PROFILE) # noqa: E501 self.cse_placement_policy = build_params.get( TemplateBuildKey.CSE_PLACEMENT_POLICY) # noqa: E501 self.log_wire = log_wire if self.template_name and self.template_revision and \ self.ova_name and self.ova_href and self.ova_sha256 and \ self.org and self.org_name and self.vdc and self.vdc_name and \ self.catalog_name and self.catalog_item_name and \ self.catalog_item_description and self.temp_vapp_name and \ self.temp_vm_name and self.cpu and self.memory and \ self.network_name and self.ip_allocation_mode and \ self.storage_profile: self._is_valid = True
def remove_compute_policy_from_vdc_sync(self, vdc, compute_policy_href, force=False, is_placement_policy=False, task_resource=None): """Remove compute policy from vdc. This method makes use of an umbrella task which can be used for tracking progress. If the umbrella task is not specified, it is created. :param pyvcloud.vcd.vdc.VDC vdc: VDC object :param str compute_policy_href: href of the compute policy to remove :param bool force: Force remove compute policy from vms in the VDC as well :param lxml.objectify.Element task_resource: Task resource for the umbrella task """ user_name = self._session.get('user') task = Task(self._sysadmin_client) task_href = None is_umbrella_task = task_resource is not None # Create a task if not umbrella task if not is_umbrella_task: # TODO the following org will be associated with 'System' org. # task created should be associated with the corresponding org of # the vdc object. org = vcd_utils.get_org(self._sysadmin_client) org.reload() user_href = org.get_user(user_name).get('href') org_href = org.href task_resource = task.update( status=vcd_client.TaskStatus.RUNNING.value, namespace='vcloud.cse', operation= f"Removing compute policy (href: {compute_policy_href})" # noqa: E501 f" from org VDC (vdc id: {vdc.name})", operation_name='Remove org VDC compute policy', details='', progress=None, owner_href=vdc.href, owner_name=vdc.name, owner_type=vcd_client.EntityType.VDC.value, user_href=user_href, user_name=user_name, org_href=org.href) else: user_href = task_resource.User.get('href') org_href = task_resource.Organization.get('href') task_href = task_resource.get('href') try: # remove the compute policy from VMs if force is True if force: compute_policy_id = retrieve_compute_policy_id_from_href( compute_policy_href) # noqa: E501 vdc_id = vcd_utils.extract_id(vdc.get_resource().get('id')) vapps = vcd_utils.get_all_vapps_in_ovdc( client=self._sysadmin_client, ovdc_id=vdc_id) target_vms = [] system_default_href = None operation_msg = None for cp_dict in self.list_compute_policies_on_vdc(vdc_id): if cp_dict['name'] == _SYSTEM_DEFAULT_COMPUTE_POLICY: system_default_href = cp_dict['href'] break if is_placement_policy: for vapp in vapps: target_vms += \ [vm for vm in vapp.get_all_vms() if self._get_vm_placement_policy_id(vm) == compute_policy_id] # noqa: E501 vm_names = [vm.get('name') for vm in target_vms] operation_msg = f"Removing placement policy from " \ f"{len(vm_names)} VMs. " \ f"Affected VMs: {vm_names}" else: for vapp in vapps: target_vms += \ [vm for vm in vapp.get_all_vms() if self._get_vm_sizing_policy_id(vm) == compute_policy_id] # noqa: E501 vm_names = [vm.get('name') for vm in target_vms] operation_msg = "Setting sizing policy to " \ f"'{_SYSTEM_DEFAULT_COMPUTE_POLICY}' on " \ f"{len(vm_names)} VMs. " \ f"Affected VMs: {vm_names}" task.update(status=vcd_client.TaskStatus.RUNNING.value, namespace='vcloud.cse', operation=operation_msg, operation_name='Remove org VDC compute policy', details='', progress=None, owner_href=vdc.href, owner_name=vdc.name, owner_type=vcd_client.EntityType.VDC.value, user_href=user_href, user_name=user_name, task_href=task_href, org_href=org_href) task_monitor = self._sysadmin_client.get_task_monitor() for vm_resource in target_vms: vm = VM(self._sysadmin_client, href=vm_resource.get('href')) _task = None operation_msg = None if is_placement_policy: if hasattr(vm_resource, 'ComputePolicy') and \ not hasattr(vm_resource.ComputePolicy, 'VmSizingPolicy'): # noqa: E501 # Updating sizing policy for the VM _task = vm.update_compute_policy( compute_policy_href=system_default_href) operation_msg = \ "Setting compute policy to " \ f"'{_SYSTEM_DEFAULT_COMPUTE_POLICY}' "\ f"on VM '{vm_resource.get('name')}'" task.update( status=vcd_client.TaskStatus.RUNNING.value, namespace='vcloud.cse', operation=operation_msg, operation_name= f'Setting sizing policy to {_SYSTEM_DEFAULT_COMPUTE_POLICY}', # noqa: E501 details='', progress=None, owner_href=vdc.href, owner_name=vdc.name, owner_type=vcd_client.EntityType.VDC.value, user_href=user_href, user_name=user_name, task_href=task_href, org_href=org_href) task_monitor.wait_for_success(_task) _task = vm.remove_placement_policy() operation_msg = "Removing placement policy on VM " \ f"'{vm_resource.get('name')}'" task.update( status=vcd_client.TaskStatus.RUNNING.value, namespace='vcloud.cse', operation=operation_msg, operation_name='Remove org VDC compute policy', details='', progress=None, owner_href=vdc.href, owner_name=vdc.name, owner_type=vcd_client.EntityType.VDC.value, user_href=user_href, user_name=user_name, task_href=task_href, org_href=org_href) task_monitor.wait_for_success(_task) else: _task = vm.update_compute_policy( compute_policy_href=system_default_href) operation_msg = "Setting sizing policy to " \ f"'{_SYSTEM_DEFAULT_COMPUTE_POLICY}' "\ f"on VM '{vm_resource.get('name')}'" task.update( status=vcd_client.TaskStatus.RUNNING.value, namespace='vcloud.cse', operation=operation_msg, operation_name='Remove org VDC compute policy', details='', progress=None, owner_href=vdc.href, owner_name=vdc.name, owner_type=vcd_client.EntityType.VDC.value, user_href=user_href, user_name=user_name, task_href=task_href, org_href=org_href) task_monitor.wait_for_success(_task) final_status = vcd_client.TaskStatus.RUNNING.value \ if is_umbrella_task else vcd_client.TaskStatus.SUCCESS.value task.update(status=final_status, namespace='vcloud.cse', operation=f"Removing compute policy (href:" f"{compute_policy_href}) from org VDC '{vdc.name}'", operation_name='Remove org VDC compute policy', details='', progress=None, owner_href=vdc.href, owner_name=vdc.name, owner_type=vcd_client.EntityType.VDC.value, user_href=user_href, user_name=user_name, task_href=task_href, org_href=org_href) vdc.remove_compute_policy(compute_policy_href) except Exception as err: logger.SERVER_LOGGER.error(err, exc_info=True) # Set task to error if not an umbrella task if not is_umbrella_task: msg = 'Failed to remove compute policy: ' \ f'{compute_policy_href} from the OVDC: {vdc.name}' task.update(status=vcd_client.TaskStatus.ERROR.value, namespace='vcloud.cse', operation=msg, operation_name='Remove org VDC compute policy', details='', progress=None, owner_href=vdc.href, owner_name=vdc.name, owner_type=vcd_client.EntityType.VDC.value, user_href=user_href, user_name=self._session.get('user'), task_href=task_href, org_href=org_href, error_message=f"{err}", stack_trace='') raise err
def install_cse(config_file_name, skip_template_creation, force_update, ssh_key, retain_temp_vapp, pks_config_file_name=None, skip_config_decryption=False, decryption_password=None, msg_update_callback=utils.NullPrinter()): """Handle logistics for CSE installation. Handles decision making for configuring AMQP exchange/settings, defined entity schema registration for vCD api version >= 35, extension registration, catalog setup and template creation. Also records telemetry data on installation details. :param str config_file_name: config file name. :param bool skip_template_creation: If True, skip creating the templates. :param bool force_update: if True and templates already exist in vCD, overwrites existing templates. :param str ssh_key: public ssh key to place into template vApp(s). :param bool retain_temp_vapp: if True, temporary vApp will not destroyed, so the user can ssh into and debug the vm. :param str pks_config_file_name: pks config file name. :param bool skip_config_decryption: do not decrypt the config file. :param str decryption_password: password to decrypt the config file. :param utils.ConsoleMessagePrinter msg_update_callback: Callback object. :raises cse_exception.AmqpError: if AMQP exchange could not be created. """ config = get_validated_config( config_file_name, pks_config_file_name=pks_config_file_name, skip_config_decryption=skip_config_decryption, decryption_password=decryption_password, log_wire_file=INSTALL_WIRELOG_FILEPATH, logger_debug=INSTALL_LOGGER, msg_update_callback=msg_update_callback) populate_vsphere_list(config['vcs']) msg = f"Installing CSE on vCloud Director using config file " \ f"'{config_file_name}'" msg_update_callback.info(msg) INSTALL_LOGGER.info(msg) client = None try: # Telemetry - Construct telemetry data telemetry_data = { PayloadKey.WAS_DECRYPTION_SKIPPED: bool(skip_config_decryption), # noqa: E501 PayloadKey.WAS_PKS_CONFIG_FILE_PROVIDED: bool(pks_config_file_name), # noqa: E501 PayloadKey.WERE_TEMPLATES_SKIPPED: bool(skip_template_creation), # noqa: E501 PayloadKey.WERE_TEMPLATES_FORCE_UPDATED: bool(force_update), # noqa: E501 PayloadKey.WAS_TEMP_VAPP_RETAINED: bool(retain_temp_vapp), # noqa: E501 PayloadKey.WAS_SSH_KEY_SPECIFIED: bool(ssh_key) # noqa: E501 } # Telemetry - Record detailed telemetry data on install record_user_action_details( CseOperation.SERVICE_INSTALL, telemetry_data, telemetry_settings=config['service']['telemetry']) # noqa: E501 log_filename = None log_wire = utils.str_to_bool(config['service'].get('log_wire')) if log_wire: log_filename = INSTALL_WIRELOG_FILEPATH client = Client(config['vcd']['host'], api_version=config['vcd']['api_version'], verify_ssl_certs=config['vcd']['verify'], log_file=log_filename, log_requests=log_wire, log_headers=log_wire, log_bodies=log_wire) credentials = BasicLoginCredentials(config['vcd']['username'], server_constants.SYSTEM_ORG_NAME, config['vcd']['password']) client.set_credentials(credentials) msg = f"Connected to vCD as system administrator: " \ f"{config['vcd']['host']}:{config['vcd']['port']}" msg_update_callback.general(msg) INSTALL_LOGGER.info(msg) # create amqp exchange if it doesn't exist amqp = config['amqp'] _create_amqp_exchange(amqp['exchange'], amqp['host'], amqp['port'], amqp['vhost'], amqp['ssl'], amqp['username'], amqp['password'], msg_update_callback=msg_update_callback) # register or update cse on vCD _register_cse(client, amqp['routing_key'], amqp['exchange'], msg_update_callback=msg_update_callback) # register cse def schema on VCD # schema should be located at # ~/.cse-schema/api-v<API VERSION>/schema.json _register_def_schema(client, msg_update_callback=msg_update_callback, log_wire=log_wire) # Since we use CSE extension id as our telemetry instance_id, the # validated config won't have the instance_id yet. Now that CSE has # been registered as an extension, we should update the telemetry # config with the correct instance_id if config['service']['telemetry']['enable']: store_telemetry_settings(config) # register rights to vCD # TODO() should also remove rights when unregistering CSE _register_right( client, right_name=server_constants. CSE_NATIVE_DEPLOY_RIGHT_NAME, # noqa: E501 description=server_constants. CSE_NATIVE_DEPLOY_RIGHT_DESCRIPTION, # noqa: E501 category=server_constants. CSE_NATIVE_DEPLOY_RIGHT_CATEGORY, # noqa: E501 bundle_key=server_constants. CSE_NATIVE_DEPLOY_RIGHT_BUNDLE_KEY, # noqa: E501 msg_update_callback=msg_update_callback) _register_right( client, right_name=server_constants. CSE_PKS_DEPLOY_RIGHT_NAME, # noqa: E501 description=server_constants. CSE_PKS_DEPLOY_RIGHT_DESCRIPTION, # noqa: E501 category=server_constants. CSE_PKS_DEPLOY_RIGHT_CATEGORY, # noqa: E501 bundle_key=server_constants. CSE_PKS_DEPLOY_RIGHT_BUNDLE_KEY, # noqa: E501 msg_update_callback=msg_update_callback) # set up placement policies for all types of clusters _setup_placement_policies( client, policy_list=server_constants. CLUSTER_PLACEMENT_POLICIES, # noqa: E501 msg_update_callback=msg_update_callback, log_wire=log_wire) # set up cse catalog org = vcd_utils.get_org(client, org_name=config['broker']['org']) vcd_utils.create_and_share_catalog( org, config['broker']['catalog'], catalog_desc='CSE templates', logger=INSTALL_LOGGER, msg_update_callback=msg_update_callback) if skip_template_creation: msg = "Skipping creation of templates." msg_update_callback.info(msg) INSTALL_LOGGER.warning(msg) else: # read remote template cookbook, download all scripts rtm = RemoteTemplateManager( remote_template_cookbook_url=config['broker'] ['remote_template_cookbook_url'], # noqa: E501 logger=INSTALL_LOGGER, msg_update_callback=msg_update_callback) remote_template_cookbook = rtm.get_remote_template_cookbook() # create all templates defined in cookbook for template in remote_template_cookbook['templates']: # TODO tag created templates with placement policies _install_template( client=client, remote_template_manager=rtm, template=template, org_name=config['broker']['org'], vdc_name=config['broker']['vdc'], catalog_name=config['broker']['catalog'], network_name=config['broker']['network'], ip_allocation_mode=config['broker']['ip_allocation_mode'], storage_profile=config['broker']['storage_profile'], force_update=force_update, retain_temp_vapp=retain_temp_vapp, ssh_key=ssh_key, msg_update_callback=msg_update_callback) # if it's a PKS setup, setup NSX-T constructs if config.get('pks_config'): nsxt_servers = config['pks_config']['nsxt_servers'] wire_logger = NULL_LOGGER if log_wire: wire_logger = SERVER_NSXT_WIRE_LOGGER for nsxt_server in nsxt_servers: msg = f"Configuring NSX-T server ({nsxt_server.get('name')})" \ " for CSE. Please check install logs for details." msg_update_callback.general(msg) INSTALL_LOGGER.info(msg) nsxt_client = NSXTClient(host=nsxt_server.get('host'), username=nsxt_server.get('username'), password=nsxt_server.get('password'), logger_debug=INSTALL_LOGGER, logger_wire=wire_logger, http_proxy=nsxt_server.get('proxy'), https_proxy=nsxt_server.get('proxy'), verify_ssl=nsxt_server.get('verify')) setup_nsxt_constructs( nsxt_client=nsxt_client, nodes_ip_block_id=nsxt_server.get('nodes_ip_block_ids'), pods_ip_block_id=nsxt_server.get('pods_ip_block_ids'), ncp_boundary_firewall_section_anchor_id=nsxt_server.get( 'distributed_firewall_section_anchor_id') ) # noqa: E501 # Telemetry - Record successful install action record_user_action(CseOperation.SERVICE_INSTALL, telemetry_settings=config['service']['telemetry']) except Exception: msg_update_callback.error( "CSE Installation Error. Check CSE install logs") INSTALL_LOGGER.error("CSE Installation Error", exc_info=True) # Telemetry - Record failed install action record_user_action(CseOperation.SERVICE_INSTALL, status=OperationStatus.FAILED, telemetry_settings=config['service']['telemetry']) raise # TODO() need installation relevant exceptions for rollback finally: if client is not None: client.logout()
def __init__(self, client, sys_admin_client, build_params, org=None, vdc=None, ssh_key=None, logger=None, msg_update_callback=None): """. :param pyvcloud.vcd.Client client: :param pyvcloud.vcd.Client sys_admin_client: :param dict build_params: :param pyvcloud.vcd.org.Org org: specific org to use. Will override the org_name specified in build_params, can be used to save few vCD calls to create the Org object. :param pyvcloud.vcd.vdc.VDC vdc: specific vdc to use. Will override the vdc_name specified in build_params, can be used to save few vCD calls to create the Vdc object. :param str ssh_key: public ssh key to place into the template vApp(s). :param logging.Logger logger: optional logger to log with. :param utils.ConsoleMessagePrinter msg_update_callback: Callback object that writes messages onto console. """ self._is_valid = False self.client = client self.sys_admin_client = sys_admin_client self.ssh_key = ssh_key self.logger = logger self.msg_update_callback = msg_update_callback if self.client is None or self.sys_admin_client is None: return # validate and populate required fields self.template_name = build_params.get('template_name') self.template_revision = build_params.get('template_revision') self.ova_name = build_params.get('source_ova_name') self.ova_href = build_params.get('source_ova_href') self.ova_sha256 = build_params.get('source_ova_sha256') if org: self.org = org self.org_name = org.get_name() else: self.org_name = build_params.get('org_name') self.org = get_org(self.client, org_name=self.org_name) if vdc: self.vdc = vdc self.vdc.get_resource() # to make sure vdc.resource is populated self.vdc_name = vdc.name else: self.vdc_name = build_params.get('vdc_name') self.vdc = get_vdc(self.client, vdc_name=self.vdc_name, org=self.org) self.catalog_name = build_params.get('catalog_name') self.catalog_item_name = build_params.get('catalog_item_name') self.catalog_item_description = \ build_params.get('catalog_item_description') self.temp_vapp_name = build_params.get('temp_vapp_name') self.cpu = build_params.get('cpu') self.memory = build_params.get('memory') self.network_name = build_params.get('network_name') self.ip_allocation_mode = build_params.get('ip_allocation_mode') self.storage_profile = build_params.get('storage_profile') if self.template_name and self.template_revision and \ self.ova_name and self.ova_href and self.ova_sha256 and \ self.org and self.org_name and self.vdc and self.vdc_name and \ self.catalog_name and self.catalog_item_name and \ self.catalog_item_description and self.temp_vapp_name and \ self.cpu and self.memory and self.network_name and \ self.ip_allocation_mode and self.storage_profile: self._is_valid = True
def init_environment(config_filepath=BASE_CONFIG_FILEPATH): """Set up module variables according to config dict. :param str config_filepath: """ global AMQP_USERNAME, AMQP_PASSWORD, CLIENT, ORG_HREF, VDC_HREF, \ CATALOG_NAME, TEARDOWN_INSTALLATION, TEARDOWN_CLUSTERS, \ TEMPLATE_DEFINITIONS, TEST_ALL_TEMPLATES, SYS_ADMIN_LOGIN_CMD, \ ORG_ADMIN_LOGIN_CMD, VAPP_AUTHOR_LOGIN_CMD, USERNAME_TO_LOGIN_CMD, \ USERNAME_TO_CLUSTER_NAME config = testutils.yaml_to_dict(config_filepath) rtm = \ RemoteTemplateManager(config['broker']['remote_template_cookbook_url']) template_cookbook = rtm.get_remote_template_cookbook() TEMPLATE_DEFINITIONS = template_cookbook['templates'] rtm.download_all_template_scripts(force_overwrite=True) CLIENT = Client(config['vcd']['host'], api_version=config['vcd']['api_version'], verify_ssl_certs=config['vcd']['verify']) credentials = BasicLoginCredentials(config['vcd']['username'], SYSTEM_ORG_NAME, config['vcd']['password']) CLIENT.set_credentials(credentials) org = pyvcloud_utils.get_org(CLIENT, org_name=config['broker']['org']) vdc = pyvcloud_utils.get_vdc(CLIENT, vdc_name=config['broker']['vdc'], org=org) ORG_HREF = org.href VDC_HREF = vdc.href CATALOG_NAME = config['broker']['catalog'] AMQP_USERNAME = config['amqp']['username'] AMQP_PASSWORD = config['amqp']['password'] SYS_ADMIN_LOGIN_CMD = f"login {config['vcd']['host']} system " \ f"{config['vcd']['username']} " \ f"-iwp {config['vcd']['password']} " \ f"-V {config['vcd']['api_version']}" ORG_ADMIN_LOGIN_CMD = f"login {config['vcd']['host']} " \ f"{config['broker']['org']}" \ f" {ORG_ADMIN_NAME} -iwp {ORG_ADMIN_PASSWORD} " \ f"-V {config['vcd']['api_version']}" VAPP_AUTHOR_LOGIN_CMD = f"login {config['vcd']['host']} " \ f"{config['broker']['org']} " \ f"{VAPP_AUTHOR_NAME} -iwp {VAPP_AUTHOR_PASSWORD}" \ f" -V {config['vcd']['api_version']}" USERNAME_TO_LOGIN_CMD = { 'sys_admin': SYS_ADMIN_LOGIN_CMD, 'org_admin': ORG_ADMIN_LOGIN_CMD, 'vapp_author': VAPP_AUTHOR_LOGIN_CMD } USERNAME_TO_CLUSTER_NAME = { 'sys_admin': SYS_ADMIN_TEST_CLUSTER_NAME, 'org_admin': ORG_ADMIN_TEST_CLUSTER_NAME, 'vapp_author': VAPP_AUTHOR_TEST_CLUSTER_NAME } test_config = config.get('test') if test_config is not None: TEARDOWN_INSTALLATION = test_config.get('teardown_installation', True) TEARDOWN_CLUSTERS = test_config.get('teardown_clusters', True) TEST_ALL_TEMPLATES = test_config.get('test_all_templates', False) if not TEST_ALL_TEMPLATES: specified_templates_str = test_config.get('test_templates', "") specified_templates = specified_templates_str.split(",") specified_templates_def = [] for template in specified_templates: tokens = template.split(":") # ToDo: log missing/bad specified templates if len(tokens) == 2: template_name = tokens[0] template_revision = tokens[1] for template_def in TEMPLATE_DEFINITIONS: if (template_name, int(template_revision)) == ( template_def['name'], int(template_def['revision'])): # noqa: E501 specified_templates_def.append(template_def) break TEMPLATE_DEFINITIONS = specified_templates_def
def get_all_k8s_local_template_definition(client, catalog_name, org=None, org_name=None, logger_debug=logger.NULL_LOGGER): """Fetch all CSE k8s templates in a catalog. A CSE k8s template is a catalog item that has all the necessary metadata stamped onto it. If only partial metadata is present on a catalog item, that catalog item will be disqualified from the result. :param pyvcloud.vcd.Client client: A sys admin client to be used to retrieve metadata off the catalog items. :param str catalog_name: Name of the catalog where the template resides. :param pyvcloud.vcd.Org org: Org object which hosts the catalog. :param str org_name: Name of the org that is hosting the catalog. Can be provided in lieu of param org, however param org takes precedence. :return: list of dictionaries containing template data :rtype: list of dicts """ if not org: org = get_org(client, org_name=org_name) catalog_item_names = [ entry['name'] for entry in org.list_catalog_items(catalog_name) ] templates = [] for item_name in catalog_item_names: md = org.get_all_metadata_from_catalog_item(catalog_name=catalog_name, item_name=item_name) metadata_dict = metadata_to_dict(md) # if catalog item doesn't have all the required metadata keys, # CSE should not recognize it as a template expected_metadata_keys = \ set([entry.value for entry in LocalTemplateKey]) missing_metadata_keys = expected_metadata_keys - metadata_dict.keys() num_missing_metadata_keys = len(missing_metadata_keys) if num_missing_metadata_keys == len(expected_metadata_keys): # This catalog item has no CSE related metadata, so skip it. continue if num_missing_metadata_keys > 0: # This catalog item has partial CSE metadata, so skip it but also # log relevant information. msg = f"Catalog item '{item_name}' missing " \ f"{num_missing_metadata_keys} metadata: " \ f"{missing_metadata_keys}" # noqa: F841 logger_debug.debug(msg) continue # non-string metadata is written to the dictionary as a string # when 'upgrade_from' metadata is empty, vcd returns it as: "['']" # when 'upgrade_from' metadata is not empty, vcd returns it as an array # coerce "['']" to the more usable empty array [] if isinstance(metadata_dict[LocalTemplateKey.UPGRADE_FROM], str): metadata_dict[LocalTemplateKey.UPGRADE_FROM] = ast.literal_eval( metadata_dict[LocalTemplateKey.UPGRADE_FROM]) # noqa: E501 if metadata_dict[LocalTemplateKey.UPGRADE_FROM] == ['']: metadata_dict[LocalTemplateKey.UPGRADE_FROM] = [] templates.append(metadata_dict) return templates
def install_cse(config_file_name, skip_template_creation, force_update, ssh_key, retain_temp_vapp, pks_config_file_name=None, skip_config_decryption=False, decryption_password=None, msg_update_callback=None): """Handle logistics for CSE installation. Handles decision making for configuring AMQP exchange/settings, extension registration, catalog setup, and template creation. :param str config_file_name: config file name. :param bool skip_template_creation: If True, skip creating the templates. :param bool force_update: if True and templates already exist in vCD, overwrites existing templates. :param str ssh_key: public ssh key to place into template vApp(s). :param bool retain_temp_vapp: if True, temporary vApp will not destroyed, so the user can ssh into and debug the vm. :param str pks_config_file_name: pks config file name. :param bool skip_config_decryption: do not decrypt the config file. :param str decryption_password: password to decrypt the config file. :param utils.ConsoleMessagePrinter msg_update_callback: Callback object that writes messages onto console. :raises AmqpError: if AMQP exchange could not be created. """ configure_install_logger() config = get_validated_config( config_file_name, pks_config_file_name=pks_config_file_name, skip_config_decryption=skip_config_decryption, decryption_password=decryption_password, msg_update_callback=msg_update_callback) populate_vsphere_list(config['vcs']) msg = f"Installing CSE on vCloud Director using config file " \ f"'{config_file_name}'" if msg_update_callback: msg_update_callback.info(msg) LOGGER.info(msg) client = None try: log_filename = None log_wire = str_to_bool(config['service'].get('log_wire')) if log_wire: log_filename = INSTALL_WIRELOG_FILEPATH client = Client(config['vcd']['host'], api_version=config['vcd']['api_version'], verify_ssl_certs=config['vcd']['verify'], log_file=log_filename, log_requests=log_wire, log_headers=log_wire, log_bodies=log_wire) credentials = BasicLoginCredentials(config['vcd']['username'], SYSTEM_ORG_NAME, config['vcd']['password']) client.set_credentials(credentials) msg = f"Connected to vCD as system administrator: " \ f"{config['vcd']['host']}:{config['vcd']['port']}" if msg_update_callback: msg_update_callback.general(msg) LOGGER.info(msg) # create amqp exchange if it doesn't exist amqp = config['amqp'] _create_amqp_exchange(amqp['exchange'], amqp['host'], amqp['port'], amqp['vhost'], amqp['ssl'], amqp['username'], amqp['password'], msg_update_callback=msg_update_callback) # register or update cse on vCD _register_cse(client, amqp['routing_key'], amqp['exchange'], msg_update_callback=msg_update_callback) # register rights to vCD # TODO() should also remove rights when unregistering CSE _register_right(client, right_name=CSE_NATIVE_DEPLOY_RIGHT_NAME, description=CSE_NATIVE_DEPLOY_RIGHT_DESCRIPTION, category=CSE_NATIVE_DEPLOY_RIGHT_CATEGORY, bundle_key=CSE_NATIVE_DEPLOY_RIGHT_BUNDLE_KEY, msg_update_callback=msg_update_callback) _register_right(client, right_name=CSE_PKS_DEPLOY_RIGHT_NAME, description=CSE_PKS_DEPLOY_RIGHT_DESCRIPTION, category=CSE_PKS_DEPLOY_RIGHT_CATEGORY, bundle_key=CSE_PKS_DEPLOY_RIGHT_BUNDLE_KEY, msg_update_callback=msg_update_callback) # set up cse catalog org = get_org(client, org_name=config['broker']['org']) create_and_share_catalog(org, config['broker']['catalog'], catalog_desc='CSE templates', msg_update_callback=msg_update_callback) if skip_template_creation: msg = "Skipping creation of templates." if msg_update_callback: msg_update_callback.info(msg) LOGGER.warning(msg) else: # read remote template cookbook, download all scripts rtm = RemoteTemplateManager( remote_template_cookbook_url=config['broker'] ['remote_template_cookbook_url'], # noqa: E501 logger=LOGGER, msg_update_callback=msg_update_callback) remote_template_cookbook = rtm.get_remote_template_cookbook() # create all templates defined in cookbook for template in remote_template_cookbook['templates']: _install_template( client=client, remote_template_manager=rtm, template=template, org_name=config['broker']['org'], vdc_name=config['broker']['vdc'], catalog_name=config['broker']['catalog'], network_name=config['broker']['network'], ip_allocation_mode=config['broker']['ip_allocation_mode'], storage_profile=config['broker']['storage_profile'], force_update=force_update, retain_temp_vapp=retain_temp_vapp, ssh_key=ssh_key, msg_update_callback=msg_update_callback) # if it's a PKS setup, setup NSX-T constructs if config.get('pks_config'): nsxt_servers = config.get('pks_config')['nsxt_servers'] for nsxt_server in nsxt_servers: msg = f"Configuring NSX-T server ({nsxt_server.get('name')})" \ " for CSE. Please check install logs for details." if msg_update_callback: msg_update_callback.general(msg) LOGGER.info(msg) nsxt_client = NSXTClient(host=nsxt_server.get('host'), username=nsxt_server.get('username'), password=nsxt_server.get('password'), http_proxy=nsxt_server.get('proxy'), https_proxy=nsxt_server.get('proxy'), verify_ssl=nsxt_server.get('verify'), logger_instance=LOGGER, log_requests=True, log_headers=True, log_body=True) setup_nsxt_constructs( nsxt_client=nsxt_client, nodes_ip_block_id=nsxt_server.get('nodes_ip_block_ids'), pods_ip_block_id=nsxt_server.get('pods_ip_block_ids'), ncp_boundary_firewall_section_anchor_id=nsxt_server.get( 'distributed_firewall_section_anchor_id') ) # noqa: E501 except Exception: if msg_update_callback: msg_update_callback.error( "CSE Installation Error. Check CSE install logs") LOGGER.error("CSE Installation Error", exc_info=True) raise # TODO() need installation relevant exceptions for rollback finally: if client is not None: client.logout()