def test_0100_vapp_metadata(self): """Test the methods related to metadata manipulation in vapp.py. This test passes if all the metadata operations are successful. """ logger = Environment.get_default_logger() vapp_name = TestVApp._empty_vapp_name vapp = Environment.get_vapp_in_test_vdc( client=TestVApp._client, vapp_name=vapp_name) # add new metadata logger.debug(f'Adding metadata [key={TestVApp._metadata_key},' 'value={TestVApp._metadata_value}]) to vApp:' '{vapp_name}') task = vapp.set_metadata( domain=MetadataDomain.GENERAL.value, visibility=MetadataVisibility.READ_WRITE, key=TestVApp._metadata_key, value=TestVApp._metadata_value) result = TestVApp._client.get_task_monitor().wait_for_success(task) self.assertEqual(result.get('status'), TaskStatus.SUCCESS.value) # retrieve metadata logger.debug(f'Retriving metadata with key=' '{TestVApp._metadata_key} from vApp:{vapp_name}.') entries = metadata_to_dict(vapp.get_metadata()) self.assertTrue( TestVApp._metadata_key in entries, f'Should have ' 'been able to retrieve metadata entry with ' 'key={TestVApp._metadata_key}.') # update metadata value as org admin logger.debug(f'Updtaing metadata on vApp:{vapp_name} with key=' '{TestVApp._metadata_key} to value=' '{TestVApp._metadata_new_value}.') task = vapp.set_metadata( domain=MetadataDomain.GENERAL.value, visibility=MetadataVisibility.READ_WRITE, key=TestVApp._metadata_key, value=TestVApp._metadata_new_value) TestVApp._client.get_task_monitor().wait_for_success(task) entries = metadata_to_dict(vapp.get_metadata()) self.assertEqual(TestVApp._metadata_new_value, entries[TestVApp._metadata_key]) # remove metadata entry logger.debug(f'Removing metadata with ' 'key={TestVApp._metadata_key} from vApp:{vapp_name}.') task = vapp.remove_metadata(key=TestVApp._metadata_key) result = TestVApp._client.get_task_monitor().wait_for_success(task) self.assertEqual(result.get('status'), TaskStatus.SUCCESS.value)
def test_0100_vapp_metadata(self): """Test the methods related to metadata manipulation in vapp.py. This test passes if all the metadata operations are successful. """ logger = Environment.get_default_logger() vapp_name = TestVApp._empty_vapp_name vapp = Environment.get_vapp_in_test_vdc( client=TestVApp._client, vapp_name=vapp_name) # add new metadata logger.debug(f'Adding metadata [key={TestVApp._metadata_key},' 'value={TestVApp._metadata_value}]) to vApp:' '{vapp_name}') task = vapp.set_metadata( domain=MetadataDomain.GENERAL.value, visibility=MetadataVisibility.READ_WRITE, key=TestVApp._metadata_key, value=TestVApp._metadata_value) result = TestVApp._client.get_task_monitor().wait_for_success(task) self.assertEqual(result.get('status'), TaskStatus.SUCCESS.value) # retrieve metadata logger.debug(f'Retriving metadata with key=' '{TestVApp._metadata_key} from vApp:{vapp_name}.') entries = metadata_to_dict(vapp.get_metadata()) self.assertTrue( TestVApp._metadata_key in entries, f'Should have ' 'been able to retrieve metadata entry with ' 'key={TestVApp._metadata_key}.') # update metadata value as org admin logger.debug(f'Updtaing metadata on vApp:{vapp_name} with key=' '{TestVApp._metadata_key} to value=' '{TestVApp._metadata_new_value}.') task = vapp.set_metadata( domain=MetadataDomain.GENERAL.value, visibility=MetadataVisibility.READ_WRITE, key=TestVApp._metadata_key, value=TestVApp._metadata_new_value) TestVApp._client.get_task_monitor().wait_for_success(task) entries = metadata_to_dict(vapp.get_metadata()) self.assertEqual(TestVApp._metadata_new_value, entries[TestVApp._metadata_key]) # remove metadata entry logger.debug(f'Removing metadata with ' 'key={TestVApp._metadata_key} from vApp:{vapp_name}.') task = vapp.remove_metadata(key=TestVApp._metadata_key) result = TestVApp._client.get_task_monitor().wait_for_success(task) self.assertEqual(result.get('status'), TaskStatus.SUCCESS.value)
def _remove_metadata_from_ovdc(ovdc: VDC, keys=None): if keys is None: keys = [] metadata = pyvcd_utils.metadata_to_dict(ovdc.get_all_metadata()) for k in keys: if k in metadata: ovdc.remove_metadata(k, domain=vcd_client.MetadataDomain.SYSTEM)
def get_k8s_local_template_definition(client, catalog_name, catalog_item_name, org=None, org_name=None): """Fetch definition of a template. Read metadata on a catalog item and construct a dictionary that defines the template. If partial data (which indicates a malformed or non k8s template) is retrieved from the metadata, an empty dictionary would be sent back. :param pyvcloud.vcd.Client client: A sys admin client to be used to retrieve metadata off the catalog item. :param str catalog_name: Name of the catalog where the template resides. :param str catalog_item_name: Name of the template. :param pyvcloud.vcd.Org org: Org object which hosts the catalog. :param str org_name: Name of the org that is hosting the catalog. Can be provided in lieu param org, however param org takes precedence. :return: definition of the template. :rtype: dict """ if org is None: org = get_org(client, org_name=org_name) md = org.get_all_metadata_from_catalog_item(catalog_name=catalog_name, item_name=catalog_item_name) try: metadata = metadata_to_dict(md) return _dict_to_k8s_local_template_definition(metadata) except ValueError: return None
def get_ovdc_container_provider_metadata(self, ovdc_name=None, ovdc_id=None, org_name=None, credentials_required=False, nsxt_info_required=False): """Get metadata of given ovdc, pertaining to the container provider. :param str ovdc_name: name of the ovdc :param str ovdc_id: UUID of ovdc :param str org_name: specific org to use if @org is not given. If None, uses currently logged-in org from @client. :param bool credentials_required: Decides if output metadata should include credentials or not. :return: metadata of the ovdc :rtype: dict :raises EntityNotFoundException: if the ovdc could not be found. """ # Get pvdc and pks information from oVdc metadata ovdc = self.get_ovdc(ovdc_name, ovdc_id, org_name) all_metadata = utils.metadata_to_dict(ovdc.get_all_metadata()) if CONTAINER_PROVIDER_KEY not in all_metadata: container_provider = CtrProvType.NONE.value else: container_provider = \ all_metadata[CONTAINER_PROVIDER_KEY] ctr_prov_details = {} if container_provider == CtrProvType.PKS.value: # Filter out container provider metadata into a dict ctr_prov_details = { metadata_key: all_metadata[metadata_key] for metadata_key in PksCache.get_pks_keys() } # Get the credentials from PksCache pvdc_id = self.get_pvdc_id(ovdc) pvdc_info = self.pks_cache.get_pvdc_info(pvdc_id) ctr_prov_details[PKS_PLANS] = \ ctr_prov_details[PKS_PLANS].split(',') if credentials_required: pks_info = self.pks_cache.get_pks_account_info( org_name, pvdc_info.vc) ctr_prov_details.update(pks_info.credentials._asdict()) if nsxt_info_required: nsxt_info = self.pks_cache.get_nsxt_info(pvdc_info.vc) ctr_prov_details['nsxt'] = nsxt_info ctr_prov_details[CONTAINER_PROVIDER_KEY] = container_provider return ctr_prov_details
def get_ovdc_k8s_provider_metadata(org_name=None, ovdc_name=None, ovdc_id=None, include_credentials=False, include_nsxt_info=False): """Get k8s provider metadata for an org VDC. :param str org_name: :param str ovdc_name: :param str ovdc_id: :param bool include_credentials: :param bool include_nsxt_info: :return: Dictionary with k8s provider metadata :rtype: Dict """ client = None try: client = vcd_utils.get_sys_admin_client() ovdc = vcd_utils.get_vdc(client=client, vdc_name=ovdc_name, vdc_id=ovdc_id, org_name=org_name, is_admin_operation=True) all_metadata = pyvcd_utils.metadata_to_dict(ovdc.get_all_metadata()) k8s_provider = all_metadata.get(K8S_PROVIDER_KEY, K8sProvider.NONE) result = {K8S_PROVIDER_KEY: k8s_provider} if k8s_provider == K8sProvider.PKS: result.update( {k: all_metadata[k] for k in PksCache.get_pks_keys()}) # noqa: E501 result[PKS_PLANS_KEY] = result[PKS_PLANS_KEY].split(',') # Get the credentials from PksCache if include_credentials or include_nsxt_info: pks_cache = utils.get_pks_cache() pvdc_info = \ pks_cache.get_pvdc_info(vcd_utils.get_pvdc_id(ovdc)) if include_credentials: # noqa: E501 TODO in case only ovdc_id is provided, we need a way to get org_name pks_info = \ pks_cache.get_pks_account_info(org_name, pvdc_info.vc) result.update(pks_info.credentials._asdict()) if include_nsxt_info: nsxt_info = pks_cache.get_nsxt_info(pvdc_info.vc) result['nsxt'] = nsxt_info return result finally: if client is not None: client.logout()
def get_ovdc_container_provider_metadata(self, ovdc_name, ovdc_id=None, org_name=None): """Get metadata of given ovdc, pertaining to the container provider. :param str ovdc_name: name of the ovdc :param str org_name: specific org to use if @org is not given. If None, uses currently logged-in org from @client. :return: metadata of the ovdc :rtype: dict :raises EntityNotFoundException: if the ovdc could not be found. """ # Get pvdc and pks information from pvdc cache if ovdc_id is None: ovdc = get_vdc(self.client, ovdc_name, org_name=org_name, is_admin_operation=True) else: # TODO() - Implement this in pyvcloud ovdc = self._get_vdc_by_id(ovdc_id) all_metadata = utils.metadata_to_dict(ovdc.get_all_metadata()) if 'container_provider' not in all_metadata: container_provider = None else: container_provider = \ all_metadata['container_provider'] if container_provider == 'pks': # Filter out container provider metadata into a dict metadata = { metadata_key: all_metadata[metadata_key] for metadata_key in self.__ovdc_metadata_keys } pvdc_element = ovdc.resource.ProviderVdcReference pvdc_id = pvdc_element.get('id') pvdc_info = self.pvdc_cache.get_pvdc_info(pvdc_id) pks_info = self.pvdc_cache.get_pks_info(ovdc.name, pvdc_info['vc']) # Get ovdc metadata from vcd; copy the credentials from pvdc cache metadata['rp_path'] = metadata['rp_path'].split(',') metadata['pks_plans'] = metadata['pks_plans'].split(',') metadata['username'] = pks_info['username'] metadata['secret'] = pks_info['secret'] else: metadata = {'container_provider': container_provider} return metadata
def test_0390_update_metadata(self): # update metadata value as org admin vm = VM(TestVM._sys_admin_client, href=TestVM._test_vapp_first_vm_href) task = vm.set_metadata(domain=MetadataDomain.GENERAL.value, visibility=MetadataVisibility.READ_WRITE, key=TestVM._metadata_key, value=TestVM._metadata_new_value) TestVM._client.get_task_monitor().wait_for_success(task) entries = metadata_to_dict(vm.get_metadata()) self.assertEqual(TestVM._metadata_new_value, entries[TestVM._metadata_key])
def get_ovdc_k8s_provider_metadata(sysadmin_client: vcd_client.Client, org_name=None, ovdc_name=None, ovdc_id=None, include_credentials=False, include_nsxt_info=False): """Get k8s provider metadata for an org VDC. :param sysadmin_client: :param org_name: :param ovdc_name: :param ovdc_id: :param include_credentials: :param include_nsxt_info: :return: Dictionary with k8s provider metadata """ vcd_utils.raise_error_if_user_not_from_system_org(sysadmin_client) ovdc = vcd_utils.get_vdc(client=sysadmin_client, vdc_name=ovdc_name, vdc_id=ovdc_id, org_name=org_name, is_admin_operation=True) all_metadata = pyvcd_utils.metadata_to_dict(ovdc.get_all_metadata()) k8s_provider = all_metadata.get(K8S_PROVIDER_KEY, K8sProvider.NONE) result = {K8S_PROVIDER_KEY: k8s_provider} if k8s_provider == K8sProvider.PKS: result.update({k: all_metadata[k] for k in PksCache.get_pks_keys()}) # noqa: E501 result[PKS_PLANS_KEY] = result[PKS_PLANS_KEY].split(',') # Get the credentials from PksCache if include_credentials or include_nsxt_info: pks_cache = server_utils.get_pks_cache() pvdc_info = pks_cache.get_pvdc_info( vcd_utils.get_pvdc_id(sysadmin_client, ovdc)) if include_credentials: # noqa: E501 TODO in case only ovdc_id is provided, we need a way to get org_name pks_info = pks_cache.get_pks_account_info( org_name, pvdc_info.vc) result.update(pks_info.credentials._asdict()) if include_nsxt_info: nsxt_info = pks_cache.get_nsxt_info(pvdc_info.vc) result['nsxt'] = nsxt_info return result
def read_all_tkgm_template(client, org_name, catalog_name, logger=NULL_LOGGER, msg_update_callback=utils.NullPrinter()): """.""" org = vcd_utils.get_org(client, org_name=org_name) catalog_item_names = [ entry['name'] for entry in org.list_catalog_items(catalog_name) ] templates = [] for item_name in catalog_item_names: md = org.get_all_metadata_from_catalog_item(catalog_name=catalog_name, item_name=item_name) metadata_dict = metadata_to_dict(md) filtered_dict = {} kind = metadata_dict.get(server_constants.TKGmTemplateKey.KIND) if kind == shared_constants.ClusterEntityKind.TKG_M.value: msg = f"Found catalog item `{item_name}`." logger.debug(msg) msg_update_callback.general(msg) for entry in server_constants.TKGmTemplateKey: key = entry.value value = metadata_dict.get(key, '') # If catalog item has been renamed directly in VCD, # update the `name` metadata field. if entry == server_constants.TKGmTemplateKey.NAME and value != item_name: # noqa: E501 msg = f"Template `{value}` has been " \ "renamed outside of CSE. Attempting to fix." logger.debug(msg) msg_update_callback.general(msg) org.set_metadata_on_catalog_item(catalog_name=catalog_name, item_name=item_name, key=key, value=item_name) value = item_name filtered_dict[key] = value msg = f"Template `{item_name}` successfully loaded." logger.debug(msg) msg_update_callback.general(msg) templates.append(filtered_dict) return templates
def get_all_k8s_local_template_definition(client, catalog_name, org=None, org_name=None, logger_debug=logger.NULL_LOGGER): """Fetch all CSE k8s templates in a catalog. A CSE k8s template is a catalog item that has all the necessary metadata stamped onto it. If only partial metadata is present on a catalog item, that catalog item will be disqualified from the result. :param pyvcloud.vcd.Client client: A sys admin client to be used to retrieve metadata off the catalog items. :param str catalog_name: Name of the catalog where the template resides. :param pyvcloud.vcd.Org org: Org object which hosts the catalog. :param str org_name: Name of the org that is hosting the catalog. Can be provided in lieu of param org, however param org takes precedence. :return: list of dictionaries containing template data :rtype: list of dicts """ if not org: org = get_org(client, org_name=org_name) catalog_item_names = [ entry['name'] for entry in org.list_catalog_items(catalog_name) ] templates = [] for item_name in catalog_item_names: md = org.get_all_metadata_from_catalog_item(catalog_name=catalog_name, item_name=item_name) metadata_dict = metadata_to_dict(md) # if catalog item doesn't have all the required metadata keys, # CSE should not recognize it as a template expected_metadata_keys = \ set([entry.value for entry in LocalTemplateKey]) missing_metadata_keys = expected_metadata_keys - metadata_dict.keys() num_missing_metadata_keys = len(missing_metadata_keys) if num_missing_metadata_keys == len(expected_metadata_keys): # This catalog item has no CSE related metadata, so skip it. continue if num_missing_metadata_keys > 0: # This catalog item has partial CSE metadata, so skip it but also # log relevant information. msg = f"Catalog item '{item_name}' missing " \ f"{num_missing_metadata_keys} metadata: " \ f"{missing_metadata_keys}" # noqa: F841 logger_debug.debug(msg) continue # non-string metadata is written to the dictionary as a string # when 'upgrade_from' metadata is empty, vcd returns it as: "['']" # when 'upgrade_from' metadata is not empty, vcd returns it as an array # coerce "['']" to the more usable empty array [] if isinstance(metadata_dict[LocalTemplateKey.UPGRADE_FROM], str): metadata_dict[LocalTemplateKey.UPGRADE_FROM] = ast.literal_eval( metadata_dict[LocalTemplateKey.UPGRADE_FROM]) # noqa: E501 if metadata_dict[LocalTemplateKey.UPGRADE_FROM] == ['']: metadata_dict[LocalTemplateKey.UPGRADE_FROM] = [] templates.append(metadata_dict) return templates
def get_ovdc_container_provider_metadata(self, ovdc_name=None, ovdc_id=None, org_name=None, credentials_required=False, nsxt_info_required=False): """Get metadata of given ovdc, pertaining to the container provider. :param str ovdc_name: name of the ovdc :param str ovdc_id: UUID of ovdc :param str org_name: specific org to use if @org is not given. If None, uses currently logged-in org from @client. :param bool credentials_required: Decides if output metadata should include credentials or not. :return: metadata of the ovdc :rtype: dict :raises EntityNotFoundException: if the ovdc could not be found. """ # Get pvdc and pks information from oVdc metadata client = None try: client = get_sys_admin_client() ovdc = get_vdc(client=client, vdc_name=ovdc_name, vdc_id=ovdc_id, org_name=org_name, is_admin_operation=True) all_metadata = metadata_to_dict(ovdc.get_all_metadata()) if K8S_PROVIDER_KEY not in all_metadata: container_provider = K8sProviders.NONE else: container_provider = all_metadata[K8S_PROVIDER_KEY] ctr_prov_details = {} if container_provider == K8sProviders.PKS: # Filter out container provider metadata into a dict ctr_prov_details = { metadata_key: all_metadata[metadata_key] for metadata_key in PksCache.get_pks_keys() } # Get the credentials from PksCache pvdc_id = get_pvdc_id(ovdc) pks_cache = get_pks_cache() pvdc_info = pks_cache.get_pvdc_info(pvdc_id) ctr_prov_details[PKS_PLANS_KEY] = \ ctr_prov_details[PKS_PLANS_KEY].split(',') if credentials_required: pks_info = pks_cache.get_pks_account_info( org_name, pvdc_info.vc) ctr_prov_details.update(pks_info.credentials._asdict()) if nsxt_info_required: nsxt_info = pks_cache.get_nsxt_info(pvdc_info.vc) ctr_prov_details['nsxt'] = nsxt_info ctr_prov_details[K8S_PROVIDER_KEY] = container_provider return ctr_prov_details finally: if client: client.logout()
def get_all_k8s_local_template_definition( client, catalog_name, org=None, org_name=None, legacy_mode=False, logger_debug=logger.NULL_LOGGER, msg_update_callback=utils.NullPrinter()): # noqa: E501 """Fetch all CSE k8s templates in a catalog. A CSE k8s template is a catalog item that has all the necessary metadata stamped onto it. If only partial metadata is present on a catalog item, that catalog item will be disqualified from the result. :param pyvcloud.vcd.Client client: A sys admin client to be used to retrieve metadata off the catalog items. :param str catalog_name: Name of the catalog where the template resides. :param pyvcloud.vcd.Org org: Org object which hosts the catalog. :param str org_name: Name of the org that is hosting the catalog. Can be provided in lieu of param org, however param org takes precedence. :param bool legacy_mode: True, if CSE is running in legacy mode :param logging.Logger logger_debug: :param utils.NullPrinter msg_update_callback: :return: list of dictionaries containing template data :rtype: list of dicts """ if not org: org = get_org(client, org_name=org_name) catalog_item_names = [ entry['name'] for entry in org.list_catalog_items(catalog_name) ] templates = [] # Select the right Key enum based on legacy_mode flag localTemplateKey = LocalTemplateKey if legacy_mode: # if template is loaded in legacy mode, make sure to avoid the keys # min_cse_version and max_cse_version localTemplateKey = LegacyLocalTemplateKey for item_name in catalog_item_names: md = org.get_all_metadata_from_catalog_item(catalog_name=catalog_name, item_name=item_name) metadata_dict = metadata_to_dict(md) # if catalog item doesn't have all the required metadata keys, # CSE should not recognize it as a template expected_metadata_keys = \ set([entry.value for entry in localTemplateKey]) missing_metadata_keys = expected_metadata_keys - metadata_dict.keys() num_missing_metadata_keys = len(missing_metadata_keys) if num_missing_metadata_keys == len(expected_metadata_keys): # This catalog item has no CSE related metadata, so skip it. continue if num_missing_metadata_keys > 0: # This catalog item has partial CSE metadata, so skip it but also # log relevant information. msg = f"Catalog item '{item_name}' missing " \ f"{num_missing_metadata_keys} metadata: " \ f"{missing_metadata_keys}" # noqa: F841 logger_debug.debug(msg) msg_update_callback.info(msg) continue if not legacy_mode: # Do not load the template in non-legacy_mode if # min_cse_version and max_cse_version are not present # in the metadata_dict curr_cse_version = server_utils.get_installed_cse_version() valid_cse_versions = semantic_version.SimpleSpec( f">={metadata_dict[localTemplateKey.MIN_CSE_VERSION]}," f"<={metadata_dict[localTemplateKey.MAX_CSE_VERSION]}") if not valid_cse_versions.match(curr_cse_version): template_name = \ metadata_dict.get(localTemplateKey.NAME, "Unknown") template_revision = \ metadata_dict.get(localTemplateKey.REVISION, "Unknown") msg = f"Template '{template_name}' at " \ f"revision '{template_revision}' exists but is " \ f"not valid for CSE {curr_cse_version}" logger_debug.debug(msg) msg_update_callback.info(msg) continue # non-string metadata is written to the dictionary as a string # when 'upgrade_from' metadata is empty, vcd returns it as: "['']" # when 'upgrade_from' metadata is not empty, vcd returns it as an array # coerce "['']" to the more usable empty array [] if isinstance(metadata_dict[localTemplateKey.UPGRADE_FROM], str): metadata_dict[localTemplateKey.UPGRADE_FROM] = \ ast.literal_eval(metadata_dict[localTemplateKey.UPGRADE_FROM]) if metadata_dict[localTemplateKey.UPGRADE_FROM] == ['']: metadata_dict[localTemplateKey.UPGRADE_FROM] = [] templates.append(metadata_dict) return templates
def test_0380_get_meadata(self): # retrieve metadata vm = VM(TestVM._sys_admin_client, href=TestVM._test_vapp_first_vm_href) entries = metadata_to_dict(vm.get_metadata()) self.assertTrue(len(entries) > 0)
def get_all_k8s_local_template_definition(client, catalog_name, org=None, org_name=None): """Fetch all templates in a catalog. A template is a catalog item that has the LocalTemplateKey.NAME and LocalTemplateKey.REVISION metadata keys. :param pyvcloud.vcd.Client client: A sys admin client to be used to retrieve metadata off the catalog items. :param str catalog_name: Name of the catalog where the template resides. :param pyvcloud.vcd.Org org: Org object which hosts the catalog. :param str org_name: Name of the org that is hosting the catalog. Can be provided in lieu param org, however param org takes precedence. :return: list of dictionaries containing template data :rtype: list of dicts """ if not org: org = get_org(client, org_name=org_name) catalog_item_names = [ entry['name'] for entry in org.list_catalog_items(catalog_name) ] templates = [] for item_name in catalog_item_names: md = org.get_all_metadata_from_catalog_item(catalog_name=catalog_name, item_name=item_name) metadata_dict = metadata_to_dict(md) # make sure all pre-2.6 template metadata exists on catalog item old_metadata_keys = { LocalTemplateKey.CATALOG_ITEM_NAME, LocalTemplateKey.COMPUTE_POLICY, LocalTemplateKey.CPU, LocalTemplateKey.DEPRECATED, LocalTemplateKey.DESCRIPTION, LocalTemplateKey.MEMORY, LocalTemplateKey.NAME, LocalTemplateKey.REVISION, } # if catalog item doesn't have the old metadata keys, CSE should # not recognize it as a template if not metadata_dict.keys() >= old_metadata_keys: continue # non-string metadata is written to the dictionary as a string # 'upgrade_from' should be converted to an array if it is a string # 'upgrade_from' should be converted to [] if it is [''] if LocalTemplateKey.UPGRADE_FROM in metadata_dict: if isinstance(metadata_dict[LocalTemplateKey.UPGRADE_FROM], str): # noqa: E501 metadata_dict[ LocalTemplateKey.UPGRADE_FROM] = ast.literal_eval( metadata_dict[ LocalTemplateKey.UPGRADE_FROM]) # noqa: E501 if metadata_dict[LocalTemplateKey.UPGRADE_FROM] == ['']: metadata_dict[LocalTemplateKey.UPGRADE_FROM] = [] # if 2.5.1+ template metadata is missing, add them to the dict template_name = metadata_dict[LocalTemplateKey.NAME] template_revision = str( metadata_dict.get(LocalTemplateKey.REVISION, '0')) # noqa: E501 k8s_version, docker_version = get_k8s_and_docker_versions( template_name, template_revision=template_revision) # noqa: E501 tokens = template_name.split('_') if LocalTemplateKey.OS not in metadata_dict: metadata_dict[LocalTemplateKey.OS] = tokens[0] if LocalTemplateKey.DOCKER_VERSION not in metadata_dict: metadata_dict[LocalTemplateKey.DOCKER_VERSION] = docker_version if LocalTemplateKey.KUBERNETES not in metadata_dict: metadata_dict[LocalTemplateKey.KUBERNETES] = 'upstream' if LocalTemplateKey.KUBERNETES_VERSION not in metadata_dict: metadata_dict[LocalTemplateKey.KUBERNETES_VERSION] = k8s_version if LocalTemplateKey.CNI not in metadata_dict: metadata_dict[LocalTemplateKey.CNI] = tokens[2].split('-')[0] if LocalTemplateKey.CNI_VERSION not in metadata_dict: metadata_dict[LocalTemplateKey.CNI_VERSION] = tokens[2].split('-')[ 1] # noqa: E501 if LocalTemplateKey.UPGRADE_FROM not in metadata_dict: metadata_dict[LocalTemplateKey.UPGRADE_FROM] = [] # final check that all keys in LocalTemplateKey exist in the template # should never fail, but useful to double check dev work missing_metadata = set(LocalTemplateKey) - metadata_dict.keys() num_missing_metadata = len(missing_metadata) if num_missing_metadata > 0: raise ValueError(f"Template '{template_name}' missing " f"{num_missing_metadata} metadata: " f"{missing_metadata}") templates.append(metadata_dict) return templates
def _remove_metadata(self, ovdc, keys=[]): metadata = utils.metadata_to_dict(ovdc.get_all_metadata()) for k in keys: if k in metadata: ovdc.remove_metadata(k, domain=MetadataDomain.SYSTEM)
def convert_cluster(ctx, config_file_name, skip_config_decryption, cluster_name, admin_password, org_name, vdc_name, skip_wait_for_gc): if skip_config_decryption: decryption_password = None else: decryption_password = os.getenv('CSE_CONFIG_PASSWORD') or prompt_text( PASSWORD_FOR_CONFIG_DECRYPTION_MSG, color='green', hide_input=True) try: check_python_version() except Exception as err: click.secho(str(err), fg='red') sys.exit(1) client = None try: console_message_printer = ConsoleMessagePrinter() config = get_validated_config( config_file_name, skip_config_decryption=skip_config_decryption, decryption_password=decryption_password, msg_update_callback=console_message_printer) log_filename = None log_wire = str_to_bool(config['service'].get('log_wire')) if log_wire: log_filename = 'cluster_convert_wire.log' client = Client(config['vcd']['host'], api_version=config['vcd']['api_version'], verify_ssl_certs=config['vcd']['verify'], log_file=log_filename, log_requests=log_wire, log_headers=log_wire, log_bodies=log_wire) credentials = BasicLoginCredentials(config['vcd']['username'], SYSTEM_ORG_NAME, config['vcd']['password']) client.set_credentials(credentials) msg = f"Connected to vCD as system administrator: " \ f"{config['vcd']['host']}:{config['vcd']['port']}" console_message_printer.general(msg) cluster_records = get_all_clusters(client=client, cluster_name=cluster_name, org_name=org_name, ovdc_name=vdc_name) if len(cluster_records) == 0: console_message_printer.info(f"No clusters were found.") return vms = [] for cluster in cluster_records: console_message_printer.info( f"Processing cluster '{cluster['name']}'.") vapp_href = cluster['vapp_href'] vapp = VApp(client, href=vapp_href) # this step removes the old 'cse.template' metadata and adds # cse.template.name and cse.template.revision metadata # using hard-coded values taken from github history console_message_printer.info("Processing metadata of cluster.") metadata_dict = metadata_to_dict(vapp.get_metadata()) old_template_name = metadata_dict.get( ClusterMetadataKey.BACKWARD_COMPATIBILE_TEMPLATE_NAME ) # noqa: E501 new_template_name = None cse_version = metadata_dict.get(ClusterMetadataKey.CSE_VERSION) if old_template_name: console_message_printer.info( "Determining k8s version on cluster.") if 'photon' in old_template_name: new_template_name = 'photon-v2' if cse_version in ('1.0.0'): new_template_name += '_k8s-1.8_weave-2.0.5' elif cse_version in ('1.1.0', '1.2.0', '1.2.1', '1.2.2', '1.2.3', '1.2.4'): # noqa: E501 new_template_name += '_k8s-1.9_weave-2.3.0' elif cse_version in ( '1.2.5', '1.2.6', '1.2.7', ): # noqa: E501 new_template_name += '_k8s-1.10_weave-2.3.0' elif cse_version in ('2.0.0'): new_template_name += '_k8s-1.12_weave-2.3.0' elif 'ubuntu' in old_template_name: new_template_name = 'ubuntu-16.04' if cse_version in ('1.0.0'): new_template_name += '_k8s-1.9_weave-2.1.3' elif cse_version in ('1.1.0', '1.2.0', '1.2.1', '1.2.2', '1.2.3', '1.2.4', '1.2.5', '1.2.6', '1.2.7'): # noqa: E501 new_template_name += '_k8s-1.10_weave-2.3.0' elif cse_version in ('2.0.0'): new_template_name += '_k8s-1.13_weave-2.3.0' if new_template_name: console_message_printer.info("Updating metadata of cluster.") task = vapp.remove_metadata( ClusterMetadataKey.BACKWARD_COMPATIBILE_TEMPLATE_NAME ) # noqa: E501 client.get_task_monitor().wait_for_success(task) new_metadata_to_add = { ClusterMetadataKey.TEMPLATE_NAME: new_template_name, ClusterMetadataKey.TEMPLATE_REVISION: 0 } task = vapp.set_multiple_metadata(new_metadata_to_add) client.get_task_monitor().wait_for_success(task) # this step uses hard-coded data from the newly updated # cse.template.name and cse.template.revision metadata fields as # well as github history to add [cse.os, cse.docker.version, # cse.kubernetes, cse.kubernetes.version, cse.cni, cse.cni.version] # to the clusters vapp.reload() metadata_dict = metadata_to_dict(vapp.get_metadata()) template_name = metadata_dict.get(ClusterMetadataKey.TEMPLATE_NAME) template_revision = str( metadata_dict.get(ClusterMetadataKey.TEMPLATE_REVISION, '0')) # noqa: E501 if template_name: k8s_version, docker_version = get_k8s_and_docker_versions( template_name, template_revision=template_revision, cse_version=cse_version) # noqa: E501 tokens = template_name.split('_') new_metadata = { ClusterMetadataKey.OS: tokens[0], ClusterMetadataKey.DOCKER_VERSION: docker_version, ClusterMetadataKey.KUBERNETES: 'upstream', ClusterMetadataKey.KUBERNETES_VERSION: k8s_version, ClusterMetadataKey.CNI: tokens[2].split('-')[0], ClusterMetadataKey.CNI_VERSION: tokens[2].split('-')[1], } task = vapp.set_multiple_metadata(new_metadata) client.get_task_monitor().wait_for_success(task) console_message_printer.general( "Finished processing metadata of cluster.") reset_admin_pw = False vm_resources = vapp.get_all_vms() for vm_resource in vm_resources: try: vapp.get_admin_password(vm_resource.get('name')) except EntityNotFoundException: reset_admin_pw = True break if reset_admin_pw: try: console_message_printer.info( f"Undeploying the vApp '{cluster['name']}'") task = vapp.undeploy() client.get_task_monitor().wait_for_success(task) console_message_printer.general( "Successfully undeployed the vApp.") except Exception as err: console_message_printer.error(str(err)) for vm_resource in vm_resources: console_message_printer.info( f"Processing vm '{vm_resource.get('name')}'.") vm = VM(client, href=vm_resource.get('href')) vms.append(vm) console_message_printer.info("Updating vm admin password") task = vm.update_guest_customization_section( enabled=True, admin_password_enabled=True, admin_password_auto=not admin_password, admin_password=admin_password, ) client.get_task_monitor().wait_for_success(task) console_message_printer.general("Successfully updated vm") console_message_printer.info("Deploying vm.") task = vm.power_on_and_force_recustomization() client.get_task_monitor().wait_for_success(task) console_message_printer.general("Successfully deployed vm") console_message_printer.info("Deploying cluster") task = vapp.deploy(power_on=True) client.get_task_monitor().wait_for_success(task) console_message_printer.general( "Successfully deployed cluster") # noqa: E501 console_message_printer.general( f"Successfully processed cluster '{cluster['name']}'") if skip_wait_for_gc: return while True: to_remove = [] for vm in vms: status = vm.get_guest_customization_status() if status != 'GC_PENDING': to_remove.append(vm) for vm in to_remove: vms.remove(vm) console_message_printer.info( f"Waiting on guest customization to finish on {len(vms)} vms.") if not len(vms) == 0: time.sleep(5) else: break except cryptography.fernet.InvalidToken: click.secho(CONFIG_DECRYPTION_ERROR_MSG, fg='red') except Exception as err: click.secho(str(err), fg='red') finally: if client: client.logout()
def convert_cluster(ctx, config_file_name, cluster_name, password, org_name, vdc_name, skip_wait_for_gc): try: check_python_version() except Exception as err: click.secho(str(err), fg='red') sys.exit(1) client = None try: console_message_printer = ConsoleMessagePrinter() config = get_validated_config( config_file_name, msg_update_callback=console_message_printer) log_filename = None log_wire = str_to_bool(config['service'].get('log_wire')) if log_wire: log_filename = 'cluster_convert_wire.log' client = Client(config['vcd']['host'], api_version=config['vcd']['api_version'], verify_ssl_certs=config['vcd']['verify'], log_file=log_filename, log_requests=log_wire, log_headers=log_wire, log_bodies=log_wire) credentials = BasicLoginCredentials(config['vcd']['username'], SYSTEM_ORG_NAME, config['vcd']['password']) client.set_credentials(credentials) msg = f"Connected to vCD as system administrator: " \ f"{config['vcd']['host']}:{config['vcd']['port']}" console_message_printer.general(msg) cluster_records = get_all_clusters(client=client, cluster_name=cluster_name, org_name=org_name, ovdc_name=vdc_name) if len(cluster_records) == 0: console_message_printer.info(f"No clusters were found.") return vms = [] for cluster in cluster_records: console_message_printer.info( f"Processing cluster '{cluster['name']}'.") vapp_href = cluster['vapp_href'] vapp = VApp(client, href=vapp_href) console_message_printer.info("Processing metadata of cluster.") metadata = metadata_to_dict(vapp.get_metadata()) old_template_name = None new_template_name = None if ClusterMetadataKey.BACKWARD_COMPATIBILE_TEMPLATE_NAME in metadata: # noqa: E501 old_template_name = metadata.pop(ClusterMetadataKey.BACKWARD_COMPATIBILE_TEMPLATE_NAME) # noqa: E501 version = metadata.get(ClusterMetadataKey.CSE_VERSION) if old_template_name: console_message_printer.info( "Determining k8s version on cluster.") if 'photon' in old_template_name: new_template_name = 'photon-v2' if '1.0.0' in version: new_template_name += '_k8s-1.8_weave-2.0.5' elif any(ver in version for ver in ('1.1.0', '1.2.0', '1.2.1', '1.2.2', '1.2.3', '1.2.4',)): # noqa: E501 new_template_name += '_k8s-1.9_weave-2.3.0' elif any(ver in version for ver in ('1.2.5', '1.2.6', '1.2.7',)): # noqa: E501 new_template_name += '_k8s-1.10_weave-2.3.0' elif '2.0.0' in version: new_template_name += '_k8s-1.12_weave-2.3.0' elif 'ubuntu' in old_template_name: new_template_name = 'ubuntu-16.04' if '1.0.0' in version: new_template_name += '_k8s-1.9_weave-2.1.3' elif any(ver in version for ver in ('1.1.0', '1.2.0', '1.2.1', '1.2.2', '1.2.3', '1.2.4', '1.2.5', '1.2.6', '1.2.7')): # noqa: E501 new_template_name += '_k8s-1.10_weave-2.3.0' elif '2.0.0' in version: new_template_name += '_k8s-1.13_weave-2.3.0' if new_template_name: console_message_printer.info("Updating metadata of cluster.") task = vapp.remove_metadata(ClusterMetadataKey.BACKWARD_COMPATIBILE_TEMPLATE_NAME) # noqa: E501 client.get_task_monitor().wait_for_success(task) new_metadata_to_add = { ClusterMetadataKey.TEMPLATE_NAME: new_template_name, ClusterMetadataKey.TEMPLATE_REVISION: 0 } task = vapp.set_multiple_metadata(new_metadata_to_add) client.get_task_monitor().wait_for_success(task) console_message_printer.general( "Finished processing metadata of cluster.") try: console_message_printer.info( f"Undeploying the vApp '{cluster['name']}'") task = vapp.undeploy() client.get_task_monitor().wait_for_success(task) console_message_printer.general( "Successfully undeployed the vApp.") except Exception as err: console_message_printer.error(str(err)) vm_resources = vapp.get_all_vms() for vm_resource in vm_resources: console_message_printer.info( f"Processing vm '{vm_resource.get('name')}'.") vm = VM(client, href=vm_resource.get('href')) vms.append(vm) console_message_printer.info("Updating vm admin password.") task = vm.update_guest_customization_section( enabled=True, admin_password_enabled=True, admin_password_auto=not password, admin_password=password, ) client.get_task_monitor().wait_for_success(task) console_message_printer.general("Successfully updated vm .") console_message_printer.info("Deploying vm.") task = vm.power_on_and_force_recustomization() client.get_task_monitor().wait_for_success(task) console_message_printer.general("Successfully deployed vm.") console_message_printer.info("Deploying cluster") task = vapp.deploy(power_on=True) client.get_task_monitor().wait_for_success(task) console_message_printer.general("Successfully deployed cluster.") console_message_printer.general( f"Successfully processed cluster '{cluster['name']}'.") if skip_wait_for_gc: return while True: for vm in vms: status = vm.get_guest_customization_status() if status != 'GC_PENDING': vms.remove(vm) console_message_printer.info( f"Waiting on guest customization to finish on {len(vms)} vms.") if not len(vms) == 0: time.sleep(5) else: break except Exception as err: click.secho(str(err), fg='red') finally: if client: client.logout()