def get_file_from_nodes(config, vapp, password, file_name, nodes, check_tools=True): all_results = [] sys_admin_client = None try: sys_admin_client = get_sys_admin_client() for node in nodes: LOGGER.debug(f"getting file from node {node.get('name')}") vs = get_vsphere(sys_admin_client, vapp, vm_name=node.get('name'), logger=LOGGER) vs.connect() moid = vapp.get_vm_moid(node.get('name')) vm = vs.get_vm_by_moid(moid) if check_tools: vs.wait_until_tools_ready( vm, sleep=5, callback=wait_for_tools_ready_callback) wait_until_ready_to_exec(vs, vm, password) result = vs.download_file_from_guest(vm, 'root', password, file_name) all_results.append(result) finally: if sys_admin_client: sys_admin_client.logout() return all_results
def _load_placement_policy_details(self, msg_update_callback=utils.NullPrinter()): # noqa: E501 msg = "Loading kubernetes runtime placement policies." logger.SERVER_LOGGER.info(msg) msg_update_callback.general(msg) try: sysadmin_client = vcd_utils.get_sys_admin_client() if float(sysadmin_client.get_api_version()) < compute_policy_manager.GLOBAL_PVDC_COMPUTE_POLICY_MIN_VERSION: # noqa: E501 msg = "Placement policies for kubernetes runtimes not " \ " supported in api version " \ f"{sysadmin_client.get_api_version()}" # noqa: E501 logger.SERVER_LOGGER.debug(msg) msg_update_callback.info(msg) return placement_policy_name_to_href = {} cpm = compute_policy_manager.ComputePolicyManager(sysadmin_client, log_wire=self.config['service'].get('log_wire')) # noqa: E501 for runtime_policy in shared_constants.CLUSTER_RUNTIME_PLACEMENT_POLICIES: # noqa: E501 k8_runtime = shared_constants.RUNTIME_INTERNAL_NAME_TO_DISPLAY_NAME_MAP[runtime_policy] # noqa: E501 try: placement_policy_name_to_href[k8_runtime] = \ compute_policy_manager.get_cse_vdc_compute_policy( cpm, runtime_policy, is_placement_policy=True)['href'] except EntityNotFoundException: pass self.config['placement_policy_hrefs'] = placement_policy_name_to_href # noqa: E501 except Exception as e: msg = f"Failed to load placement policies to server runtime configuration: {str(e)}" # noqa: E501 msg_update_callback.error(msg) logger.SERVER_LOGGER.error(msg) raise
def execute_script_in_nodes(vapp, node_names, script, check_tools=True, wait=True): all_results = [] sys_admin_client = None try: sys_admin_client = vcd_utils.get_sys_admin_client() for node_name in node_names: LOGGER.debug(f"will try to execute script on {node_name}:\n" f"{script}") vs = vs_utils.get_vsphere(sys_admin_client, vapp, vm_name=node_name, logger=LOGGER) vs.connect() moid = vapp.get_vm_moid(node_name) vm = vs.get_vm_by_moid(moid) password = vapp.get_admin_password(node_name) if check_tools: LOGGER.debug(f"waiting for tools on {node_name}") vs.wait_until_tools_ready( vm, sleep=5, callback=_wait_for_tools_ready_callback) _wait_until_ready_to_exec(vs, vm, password) LOGGER.debug(f"about to execute script on {node_name} " f"(vm={vm}), wait={wait}") if wait: result = \ vs.execute_script_in_guest( vm, 'root', password, script, target_file=None, wait_for_completion=True, wait_time=10, get_output=True, delete_script=True, callback=_wait_for_guest_execution_callback) result_stdout = result[1].content.decode() result_stderr = result[2].content.decode() else: result = [ vs.execute_program_in_guest(vm, 'root', password, script, wait_for_completion=False, get_output=False) ] result_stdout = '' result_stderr = '' LOGGER.debug(result[0]) LOGGER.debug(result_stderr) LOGGER.debug(result_stdout) all_results.append(result) finally: if sys_admin_client: sys_admin_client.logout() return all_results
def _process_template_compute_policy_compliance(self, msg_update_callback=utils.NullPrinter()): # noqa: E501 msg = "Processing compute policy for k8s templates." logger.SERVER_LOGGER.info(msg) msg_update_callback.general_no_color(msg) org_name = self.config['broker']['org'] catalog_name = self.config['broker']['catalog'] sysadmin_client = None try: sysadmin_client = vcd_utils.get_sys_admin_client() cpm = compute_policy_manager.ComputePolicyManager(sysadmin_client, log_wire=self.config['service'].get('log_wire')) # noqa: E501 for template in self.config['broker']['templates']: policy_name = template[LocalTemplateKey.COMPUTE_POLICY] catalog_item_name = template[LocalTemplateKey.CATALOG_ITEM_NAME] # noqa: E501 # if policy name is not empty, stamp it on the template if policy_name: try: policy = cpm.get_policy(policy_name=policy_name) except EntityNotFoundException: # create the policy if it does not exist msg = f"Creating missing compute policy " \ f"'{policy_name}'." msg_update_callback.info(msg) logger.SERVER_LOGGER.debug(msg) policy = cpm.add_policy(policy_name=policy_name) msg = f"Assigning compute policy '{policy_name}' to " \ f"template '{catalog_item_name}'." msg_update_callback.general(msg) logger.SERVER_LOGGER.debug(msg) cpm.assign_compute_policy_to_vapp_template_vms( compute_policy_href=policy['href'], org_name=org_name, catalog_name=catalog_name, catalog_item_name=catalog_item_name) else: # empty policy name means we should remove policy from # template msg = f"Removing compute policy from template " \ f"'{catalog_item_name}'." msg_update_callback.general(msg) logger.SERVER_LOGGER.debug(msg) cpm.remove_all_compute_policies_from_vapp_template_vms( org_name=org_name, catalog_name=catalog_name, catalog_item_name=catalog_item_name) except OperationNotSupportedException: msg = "Compute policy not supported by vCD. Skipping " \ "assigning/removing it to/from templates." msg_update_callback.info(msg) logger.SERVER_LOGGER.debug(msg) finally: if sysadmin_client is not None: sysadmin_client.logout()
def _load_def_schema(self, msg_update_callback=utils.NullPrinter()): """Load cluster interface and cluster entity type to global context. If defined entity framework is supported by vCD api version, load defined entity interface and defined entity type registered during server install :param utils.ConsoleMessagePrinter msg_update_callback """ sysadmin_client = None try: sysadmin_client = vcd_utils.get_sys_admin_client() logger_wire = logger.NULL_LOGGER if utils.str_to_bool( utils.str_to_bool(self.config['service'].get( 'log_wire', False))): # noqa: E501 logger_wire = logger.SERVER_CLOUDAPI_WIRE_LOGGER cloudapi_client = \ vcd_utils.get_cloudapi_client_from_vcd_client(sysadmin_client, logger.SERVER_LOGGER, # noqa: E501 logger_wire) raise_error_if_def_not_supported(cloudapi_client) schema_svc = def_schema_svc.DefSchemaService(cloudapi_client) defKey = def_utils.DefKey keys_map = def_utils.MAP_API_VERSION_TO_KEYS[float( sysadmin_client.get_api_version())] # noqa: E501 interface_id = def_utils.generate_interface_id( vendor=keys_map[defKey.VENDOR], # noqa: E501 nss=keys_map[defKey.INTERFACE_NSS], # noqa: E501 version=keys_map[defKey.INTERFACE_VERSION]) # noqa: E501 entity_type_id = def_utils.generate_entity_type_id( vendor=keys_map[defKey.VENDOR], # noqa: E501 nss=keys_map[defKey.ENTITY_TYPE_NSS], # noqa: E501 version=keys_map[defKey.ENTITY_TYPE_VERSION]) # noqa: E501 self._nativeInterface = schema_svc.get_interface(interface_id) self._nativeEntityType = schema_svc.get_entity_type(entity_type_id) msg = "Successfully loaded defined entity schema to global context" msg_update_callback.general(msg) logger.SERVER_LOGGER.debug(msg) except cse_exception.DefNotSupportedException: msg = "Skipping initialization of defined entity type" \ " and defined entity interface" msg_update_callback.info(msg) logger.SERVER_LOGGER.debug(msg) except cse_exception.DefSchemaServiceError as e: msg = f"Error while loading defined entity schema: {e.minor_error_code}" # noqa: E501 msg_update_callback.error(msg) logger.SERVER_LOGGER.debug(msg) raise e except Exception as e: msg = f"Failed to load defined entity schema to global context: {str(e)}" # noqa: E501 msg_update_callback.error(e) logger.SERVER_LOGGER.error(e) raise (e) finally: if sysadmin_client: sysadmin_client.logout()
def get_ovdc_k8s_provider_metadata(org_name=None, ovdc_name=None, ovdc_id=None, include_credentials=False, include_nsxt_info=False): """Get k8s provider metadata for an org VDC. :param str org_name: :param str ovdc_name: :param str ovdc_id: :param bool include_credentials: :param bool include_nsxt_info: :return: Dictionary with k8s provider metadata :rtype: Dict """ client = None try: client = vcd_utils.get_sys_admin_client() ovdc = vcd_utils.get_vdc(client=client, vdc_name=ovdc_name, vdc_id=ovdc_id, org_name=org_name, is_admin_operation=True) all_metadata = pyvcd_utils.metadata_to_dict(ovdc.get_all_metadata()) k8s_provider = all_metadata.get(K8S_PROVIDER_KEY, K8sProvider.NONE) result = {K8S_PROVIDER_KEY: k8s_provider} if k8s_provider == K8sProvider.PKS: result.update( {k: all_metadata[k] for k in PksCache.get_pks_keys()}) # noqa: E501 result[PKS_PLANS_KEY] = result[PKS_PLANS_KEY].split(',') # Get the credentials from PksCache if include_credentials or include_nsxt_info: pks_cache = utils.get_pks_cache() pvdc_info = \ pks_cache.get_pvdc_info(vcd_utils.get_pvdc_id(ovdc)) if include_credentials: # noqa: E501 TODO in case only ovdc_id is provided, we need a way to get org_name pks_info = \ pks_cache.get_pks_account_info(org_name, pvdc_info.vc) result.update(pks_info.credentials._asdict()) if include_nsxt_info: nsxt_info = pks_cache.get_nsxt_info(pvdc_info.vc) result['nsxt'] = nsxt_info return result finally: if client is not None: client.logout()
def get_all_ovdc_with_metadata(): client = None try: client = vcd_utils.get_sys_admin_client() q = client.get_typed_query( vcd_client.ResourceType.ADMIN_ORG_VDC.value, query_result_format=vcd_client.QueryResultFormat.RECORDS, fields='metadata@SYSTEM:k8s_provider') ovdc_records = q.execute() return ovdc_records finally: if client is not None: client.logout()
def _construct_pks_compute_profile_name(vdc_id): """Construct pks compute profile name. :param str vdc_id: UUID of the vdc in vcd :return: pks compute profile name :rtype: str """ client = None try: client = get_sys_admin_client() vdc = get_vdc(client=client, vdc_id=vdc_id) return f"cp--{vdc_id}--{vdc.name}" finally: if client: client.logout()
def decorator_wrapper(*args, **kwargs): sys_admin_client = None try: is_authorized = True server_config = get_server_runtime_config() if server_config['service']['enforce_authorization']: sys_admin_client = get_sys_admin_client() broker_instance = args[0] # self user_session = broker_instance.client_session is_authorized = _is_authorized(sys_admin_client, user_session, required_rights) if is_authorized: return func(*args, **kwargs) else: raise Exception( 'Access Forbidden. Missing required rights.') finally: if sys_admin_client is not None: sys_admin_client.logout()
def construct_ctr_prov_ctx_from_pks_cache(ovdc_id, org_name, pks_plans, pks_cluster_domain, container_provider): client = None try: ctr_prov_context = {} ctr_prov_context[K8S_PROVIDER_KEY] = container_provider if container_provider == K8sProviders.PKS: if not is_pks_enabled(): raise CseServerError('CSE is not configured to work with PKS.') client = get_sys_admin_client() ovdc = get_vdc(client=client, vdc_id=ovdc_id, is_admin_operation=True) pks_cache = get_pks_cache() pvdc_id = get_pvdc_id(ovdc) pvdc_info = pks_cache.get_pvdc_info(pvdc_id) if not pvdc_info: LOGGER.debug(f"pvdc '{pvdc_id}' is not backed " f"by PKS-managed-vSphere resources") raise CseServerError(f"VDC '{ovdc.get_resource().get('name')}'" " is not eligible to provide resources" " for PKS clusters.") pks_account_info = pks_cache.get_pks_account_info( org_name, pvdc_info.vc) nsxt_info = pks_cache.get_nsxt_info(pvdc_info.vc) pks_compute_profile_name = \ _construct_pks_compute_profile_name(ovdc_id) ctr_prov_context = construct_pks_context( pks_account_info=pks_account_info, pvdc_info=pvdc_info, nsxt_info=nsxt_info, pks_compute_profile_name=pks_compute_profile_name, pks_plans=pks_plans, pks_cluster_domain=pks_cluster_domain, credentials_required=True) return ctr_prov_context finally: if client: client.logout()
def set_ovdc_container_provider_metadata(self, ovdc_id, container_prov_data=None, container_provider=None): """Set the container provider metadata of given ovdc. :param resource ovdc: vdc resource :param dict container_prov_data: container provider context details :param str container_provider: name of container provider for which the ovdc is being enabled to deploy k8 clusters on. """ client = None try: client = get_sys_admin_client() ovdc = get_vdc(client, vdc_id=ovdc_id) ovdc_name = ovdc.get_resource().get('name') metadata = {} metadata[K8S_PROVIDER_KEY] = container_provider or \ K8sProviders.NONE if container_provider != K8sProviders.PKS: LOGGER.debug(f"Remove existing metadata for ovdc:{ovdc_name}") self._remove_metadata_from_ovdc(ovdc, PksCache.get_pks_keys()) LOGGER.debug(f"Updated metadata for {container_provider}:" f"{metadata}") else: container_prov_data.pop('username') container_prov_data.pop('secret') container_prov_data.pop('nsxt') metadata.update(container_prov_data) # set ovdc metadata into Vcd LOGGER.debug(f"On ovdc:{ovdc_name}, setting metadata:{metadata}") return ovdc.set_multiple_metadata(metadata, MetadataDomain.SYSTEM, MetadataVisibility.PRIVATE) finally: if client: client.logout()
def run(self, msg_update_callback=utils.NullPrinter()): sysadmin_client = None try: sysadmin_client = vcd_utils.get_sys_admin_client() verify_version_compatibility(sysadmin_client, self.config['vcd']['api_version'], utils.should_use_mqtt_protocol(self.config)) # noqa: E501 except Exception as err: logger.SERVER_LOGGER.info(err) raise finally: if sysadmin_client: sysadmin_client.logout() if utils.should_use_mqtt_protocol(self.config): # Store/setup MQTT extension, api filter, and token info try: sysadmin_client = vcd_utils.get_sys_admin_client() mqtt_ext_manager = MQTTExtensionManager(sysadmin_client) ext_info = mqtt_ext_manager.get_extension_info( ext_name=server_constants.CSE_SERVICE_NAME, ext_version=server_constants.MQTT_EXTENSION_VERSION, ext_vendor=server_constants.MQTT_EXTENSION_VENDOR) ext_urn_id = ext_info[MQTTExtKey.EXT_URN_ID] ext_uuid = mqtt_ext_manager.get_extension_uuid(ext_urn_id) api_filters_status = mqtt_ext_manager.check_api_filters_setup( ext_uuid, configure_cse.API_FILTER_PATTERNS) if not api_filters_status: msg = 'MQTT Api filter is not set up' logger.SERVER_LOGGER.error(msg) raise cse_exception.MQTTExtensionError(msg) token_info = mqtt_ext_manager.create_extension_token( token_name=server_constants.MQTT_TOKEN_NAME, ext_urn_id=ext_urn_id) self.config['mqtt'].update(ext_info) self.config['mqtt'].update(token_info) self.config['mqtt'][MQTTExtKey.EXT_UUID] = ext_uuid except Exception as err: msg = f'MQTT extension setup error: {err}' logger.SERVER_LOGGER.error(msg) raise err finally: if sysadmin_client: sysadmin_client.logout() populate_vsphere_list(self.config['vcs']) # Load def entity-type and interface self._load_def_schema(msg_update_callback=msg_update_callback) # Read k8s catalog definition from catalog item metadata and append # the same to to server run-time config self._load_template_definition_from_catalog( msg_update_callback=msg_update_callback) self._load_placement_policy_details( msg_update_callback=msg_update_callback) if float(self.config['vcd']['api_version']) < float(vCDApiVersion.VERSION_35.value): # noqa: E501 # Read templates rules from config and update template deinfition # in server run-time config self._process_template_rules( msg_update_callback=msg_update_callback) # Make sure that all vms in templates are compliant with the # compute policy specified in template definition (can be affected # by rules). self._process_template_compute_policy_compliance( msg_update_callback=msg_update_callback) else: msg = "Template rules are not supported by CSE for vCD api " \ "version 35.0 or above. Skipping template rule processing." msg_update_callback.info(msg) logger.SERVER_LOGGER.debug(msg) if self.should_check_config: configure_cse.check_cse_installation( self.config, msg_update_callback=msg_update_callback) if self.config.get('pks_config'): pks_config = self.config.get('pks_config') self.pks_cache = PksCache( pks_servers=pks_config.get('pks_api_servers', []), pks_accounts=pks_config.get('pks_accounts', []), pvdcs=pks_config.get('pvdcs', []), orgs=pks_config.get('orgs', []), nsxt_servers=pks_config.get('nsxt_servers', [])) num_processors = self.config['service']['processors'] try: self.consumer = MessageConsumer(self.config, num_processors) name = server_constants.MESSAGE_CONSUMER_THREAD consumer_thread = Thread(name=name, target=consumer_thread_run, args=(self.consumer, )) consumer_thread.daemon = True consumer_thread.start() self.consumer_thread = consumer_thread msg = f"Started thread '{name}' ({consumer_thread.ident})" msg_update_callback.general(msg) logger.SERVER_LOGGER.info(msg) except KeyboardInterrupt: if self.consumer: self.consumer.stop() interrupt_msg = f"\nKeyboard interrupt when starting thread " \ f"'{name}'" logger.SERVER_LOGGER.debug(interrupt_msg) raise Exception(interrupt_msg) except Exception: if self.consumer: self.consumer.stop() logger.SERVER_LOGGER.error(traceback.format_exc()) # Updating state to Running before starting watchdog because watchdog # exits when server is not Running self._state = ServerState.RUNNING # Start consumer watchdog name = server_constants.WATCHDOG_THREAD consumer_watchdog = Thread(name=name, target=watchdog_thread_run, args=(self, num_processors)) consumer_watchdog.daemon = True consumer_watchdog.start() self._consumer_watchdog = consumer_watchdog msg = f"Started thread '{name}' ({consumer_watchdog.ident})" msg_update_callback.general(msg) logger.SERVER_LOGGER.info(msg) message = f"Container Service Extension for vCloud Director" \ f"\nServer running using config file: {self.config_file}" \ f"\nLog files: {logger.SERVER_INFO_LOG_FILEPATH}, " \ f"{logger.SERVER_DEBUG_LOG_FILEPATH}" \ f"\nwaiting for requests (ctrl+c to close)" signal.signal(signal.SIGINT, signal_handler) msg_update_callback.general_no_color(message) logger.SERVER_LOGGER.info(message) # Record telemetry on user action and details of operation. cse_params = { PayloadKey.WAS_DECRYPTION_SKIPPED: bool(self.skip_config_decryption), # noqa: E501 PayloadKey.WAS_PKS_CONFIG_FILE_PROVIDED: bool(self.pks_config_file), # noqa: E501 PayloadKey.WAS_INSTALLATION_CHECK_SKIPPED: bool(self.should_check_config) # noqa: E501 } record_user_action_details(cse_operation=CseOperation.SERVICE_RUN, cse_params=cse_params) record_user_action(cse_operation=CseOperation.SERVICE_RUN) while True: try: time.sleep(1) if self._state == ServerState.STOPPING and \ self.active_requests_count() == 0: break except KeyboardInterrupt: break except Exception: msg_update_callback.general_no_color( traceback.format_exc()) logger.SERVER_LOGGER.error(traceback.format_exc()) sys.exit(1) logger.SERVER_LOGGER.info("Stop detected") logger.SERVER_LOGGER.info("Closing connections...") self._state = ServerState.STOPPING try: self.consumer.stop() except Exception: logger.SERVER_LOGGER.error(traceback.format_exc()) self._state = ServerState.STOPPED logger.SERVER_LOGGER.info("Done")
def _connect_sys_admin(self): self.sys_admin_client = get_sys_admin_client()
def add_nodes(client, num_nodes, node_type, org, vdc, vapp, catalog_name, template, network_name, num_cpu=None, memory_in_mb=None, storage_profile=None, ssh_key=None): specs = [] try: if num_nodes < 1: return None # DEV NOTE: With api v33.0 and onwards, get_catalog operation will fail # for non admin users of an an org which is not hosting the catalog, # even if the catalog is explicitly shared with the org in question. # This happens because for api v 33.0 and onwards, the Org XML no # longer returns the href to catalogs accessible to the org, and typed # queries hide the catalog link from non admin users. # As a workaround, we will use a sys admin client to get the href and # pass it forward. Do note that the catalog itself can still be # accessed by these non admin users, just that they can't find by the # href on their own. sys_admin_client = None try: sys_admin_client = vcd_utils.get_sys_admin_client() org_name = org.get_name() org_resource = sys_admin_client.get_org_by_name(org_name) org_sa = Org(sys_admin_client, resource=org_resource) catalog_item = org_sa.get_catalog_item( catalog_name, template[LocalTemplateKey.CATALOG_ITEM_NAME]) catalog_item_href = catalog_item.Entity.get('href') finally: if sys_admin_client: sys_admin_client.logout() source_vapp = VApp(client, href=catalog_item_href) source_vm = source_vapp.get_all_vms()[0].get('name') if storage_profile is not None: storage_profile = vdc.get_storage_profile(storage_profile) cust_script = None if ssh_key is not None: cust_script = \ "#!/usr/bin/env bash\n" \ "if [ x$1=x\"postcustomization\" ];\n" \ "then\n" \ "mkdir -p /root/.ssh\n" \ f"echo '{ssh_key}' >> /root/.ssh/authorized_keys\n" \ "chmod -R go-rwx /root/.ssh\n" \ "fi" for n in range(num_nodes): name = None while True: name = f"{node_type}-{''.join(random.choices(string.ascii_lowercase + string.digits, k=4))}" # noqa: E501 try: vapp.get_vm(name) except Exception: break spec = { 'source_vm_name': source_vm, 'vapp': source_vapp.resource, 'target_vm_name': name, 'hostname': name, 'password_auto': True, 'network': network_name, 'ip_allocation_mode': 'pool' } if cust_script is not None: spec['cust_script'] = cust_script if storage_profile is not None: spec['storage_profile'] = storage_profile specs.append(spec) task = vapp.add_vms(specs, power_on=False) client.get_task_monitor().wait_for_status(task) vapp.reload() if not num_cpu: num_cpu = template[LocalTemplateKey.CPU] if not memory_in_mb: memory_in_mb = template[LocalTemplateKey.MEMORY] for spec in specs: vm_name = spec['target_vm_name'] vm_resource = vapp.get_vm(vm_name) vm = VM(client, resource=vm_resource) task = vm.modify_cpu(num_cpu) client.get_task_monitor().wait_for_status(task) task = vm.modify_memory(memory_in_mb) client.get_task_monitor().wait_for_status(task) task = vm.power_on() client.get_task_monitor().wait_for_status(task) vapp.reload() if node_type == NodeType.NFS: LOGGER.debug(f"Enabling NFS server on {vm_name}") script_filepath = get_local_script_filepath( template[LocalTemplateKey.NAME], template[LocalTemplateKey.REVISION], ScriptFile.NFSD) script = utils.read_data_file(script_filepath, logger=LOGGER) exec_results = execute_script_in_nodes(vapp=vapp, node_names=[vm_name], script=script) errors = _get_script_execution_errors(exec_results) if errors: raise ScriptExecutionError( f"VM customization script execution failed on node " f"{vm_name}:{errors}") except Exception as e: # TODO: get details of the exception to determine cause of failure, # e.g. not enough resources available. node_list = [entry.get('target_vm_name') for entry in specs] raise NodeCreationError(node_list, str(e)) vapp.reload() return {'task': task, 'specs': specs}
def get_ovdc_container_provider_metadata(self, ovdc_name=None, ovdc_id=None, org_name=None, credentials_required=False, nsxt_info_required=False): """Get metadata of given ovdc, pertaining to the container provider. :param str ovdc_name: name of the ovdc :param str ovdc_id: UUID of ovdc :param str org_name: specific org to use if @org is not given. If None, uses currently logged-in org from @client. :param bool credentials_required: Decides if output metadata should include credentials or not. :return: metadata of the ovdc :rtype: dict :raises EntityNotFoundException: if the ovdc could not be found. """ # Get pvdc and pks information from oVdc metadata client = None try: client = get_sys_admin_client() ovdc = get_vdc(client=client, vdc_name=ovdc_name, vdc_id=ovdc_id, org_name=org_name, is_admin_operation=True) all_metadata = metadata_to_dict(ovdc.get_all_metadata()) if K8S_PROVIDER_KEY not in all_metadata: container_provider = K8sProviders.NONE else: container_provider = all_metadata[K8S_PROVIDER_KEY] ctr_prov_details = {} if container_provider == K8sProviders.PKS: # Filter out container provider metadata into a dict ctr_prov_details = { metadata_key: all_metadata[metadata_key] for metadata_key in PksCache.get_pks_keys() } # Get the credentials from PksCache pvdc_id = get_pvdc_id(ovdc) pks_cache = get_pks_cache() pvdc_info = pks_cache.get_pvdc_info(pvdc_id) ctr_prov_details[PKS_PLANS_KEY] = \ ctr_prov_details[PKS_PLANS_KEY].split(',') if credentials_required: pks_info = pks_cache.get_pks_account_info( org_name, pvdc_info.vc) ctr_prov_details.update(pks_info.credentials._asdict()) if nsxt_info_required: nsxt_info = pks_cache.get_nsxt_info(pvdc_info.vc) ctr_prov_details['nsxt'] = nsxt_info ctr_prov_details[K8S_PROVIDER_KEY] = container_provider return ctr_prov_details finally: if client: client.logout()
def execute_script_in_nodes(config, vapp, password, script, nodes, check_tools=True, wait=True): all_results = [] sys_admin_client = None try: sys_admin_client = get_sys_admin_client() for node in nodes: if 'chpasswd' in script: p = re.compile(':.*\"') debug_script = p.sub(':***\"', script) else: debug_script = script LOGGER.debug(f"will try to execute script on {node.get('name')}:\n" f"{debug_script}") vs = get_vsphere(sys_admin_client, vapp, vm_name=node.get('name'), logger=LOGGER) vs.connect() moid = vapp.get_vm_moid(node.get('name')) vm = vs.get_vm_by_moid(moid) if check_tools: LOGGER.debug(f"waiting for tools on {node.get('name')}") vs.wait_until_tools_ready( vm, sleep=5, callback=wait_for_tools_ready_callback) wait_until_ready_to_exec(vs, vm, password) LOGGER.debug(f"about to execute script on {node.get('name')} " f"(vm={vm}), wait={wait}") if wait: result = vs.execute_script_in_guest( vm, 'root', password, script, target_file=None, wait_for_completion=True, wait_time=10, get_output=True, delete_script=True, callback=wait_for_guest_execution_callback) result_stdout = result[1].content.decode() result_stderr = result[2].content.decode() else: result = [ vs.execute_program_in_guest(vm, 'root', password, script, wait_for_completion=False, get_output=False) ] result_stdout = '' result_stderr = '' LOGGER.debug(result[0]) LOGGER.debug(result_stderr) LOGGER.debug(result_stdout) all_results.append(result) finally: if sys_admin_client: sys_admin_client.logout() return all_results
def run(self, msg_update_callback=utils.NullPrinter()): self.config = get_validated_config( self.config_file, pks_config_file_name=self.pks_config_file, skip_config_decryption=self.skip_config_decryption, decryption_password=self.decryption_password, log_wire_file=logger.SERVER_DEBUG_WIRELOG_FILEPATH, logger_debug=logger.SERVER_LOGGER, msg_update_callback=msg_update_callback) sysadmin_client = None try: sysadmin_client = vcd_utils.get_sys_admin_client() verify_version_compatibility(sysadmin_client, self.config['vcd']['api_version']) except Exception as err: logger.SERVER_LOGGER.info(err) raise finally: if sysadmin_client: sysadmin_client.logout() populate_vsphere_list(self.config['vcs']) # Load def entity-type and interface self._load_def_schema(msg_update_callback=msg_update_callback) # Read k8s catalog definition from catalog item metadata and append # the same to to server run-time config self._load_template_definition_from_catalog( msg_update_callback=msg_update_callback) if float(self.config['vcd']['api_version']) < float( vCDApiVersion.VERSION_35.value): # noqa: E501 # Read templates rules from config and update template deinfition # in server run-time config self._process_template_rules( msg_update_callback=msg_update_callback) # Make sure that all vms in templates are compliant with the # compute policy specified in template definition (can be affected # by rules). self._process_template_compute_policy_compliance( msg_update_callback=msg_update_callback) else: msg = "Template rules are not supported by CSE for vCD api " \ "version 35.0 or above. Skipping template rule processing." msg_update_callback.info(msg) logger.SERVER_LOGGER.debug(msg) if self.should_check_config: configure_cse.check_cse_installation( self.config, msg_update_callback=msg_update_callback) if self.config.get('pks_config'): pks_config = self.config.get('pks_config') self.pks_cache = PksCache( pks_servers=pks_config.get('pks_api_servers', []), pks_accounts=pks_config.get('pks_accounts', []), pvdcs=pks_config.get('pvdcs', []), orgs=pks_config.get('orgs', []), nsxt_servers=pks_config.get('nsxt_servers', [])) amqp = self.config['amqp'] num_consumers = self.config['service']['listeners'] for n in range(num_consumers): try: c = MessageConsumer(amqp['host'], amqp['port'], amqp['ssl'], amqp['vhost'], amqp['username'], amqp['password'], amqp['exchange'], amqp['routing_key']) name = 'MessageConsumer-%s' % n t = Thread(name=name, target=consumer_thread, args=(c, )) t.daemon = True t.start() msg = f"Started thread '{name} ({t.ident})'" msg_update_callback.general(msg) logger.SERVER_LOGGER.info(msg) self.threads.append(t) self.consumers.append(c) time.sleep(0.25) except KeyboardInterrupt: break except Exception: logger.SERVER_LOGGER.error(traceback.format_exc()) logger.SERVER_LOGGER.info( f"Number of threads started: {len(self.threads)}") # noqa: E501 self._state = ServerState.RUNNING message = f"Container Service Extension for vCloud Director" \ f"\nServer running using config file: {self.config_file}" \ f"\nLog files: {logger.SERVER_INFO_LOG_FILEPATH}, " \ f"{logger.SERVER_DEBUG_LOG_FILEPATH}" \ f"\nwaiting for requests (ctrl+c to close)" signal.signal(signal.SIGINT, signal_handler) msg_update_callback.general_no_color(message) logger.SERVER_LOGGER.info(message) # Record telemetry on user action and details of operation. cse_params = { PayloadKey.WAS_DECRYPTION_SKIPPED: bool(self.skip_config_decryption), # noqa: E501 PayloadKey.WAS_PKS_CONFIG_FILE_PROVIDED: bool(self.pks_config_file), # noqa: E501 PayloadKey.WAS_INSTALLATION_CHECK_SKIPPED: bool(self.should_check_config) # noqa: E501 } record_user_action_details(cse_operation=CseOperation.SERVICE_RUN, cse_params=cse_params) record_user_action(cse_operation=CseOperation.SERVICE_RUN) while True: try: time.sleep(1) if self._state == ServerState.STOPPING and \ self.active_requests_count() == 0: break except KeyboardInterrupt: break except Exception: msg_update_callback.general_no_color(traceback.format_exc()) logger.SERVER_LOGGER.error(traceback.format_exc()) sys.exit(1) logger.SERVER_LOGGER.info("Stop detected") logger.SERVER_LOGGER.info("Closing connections...") for c in self.consumers: try: c.stop() except Exception: logger.SERVER_LOGGER.error(traceback.format_exc()) self._state = ServerState.STOPPED logger.SERVER_LOGGER.info("Done")
def sys_admin_client(self): if self._sys_admin_client is None: self._sys_admin_client = vcd_utils.get_sys_admin_client() return self._sys_admin_client