def test_1002_deploy_vm(self): logged_in_org = self.client.get_org() org = Org(self.client, resource=logged_in_org) v = org.get_vdc(self.config['vcd']['vdc']) vdc = VDC(self.client, href=v.get('href')) assert self.config['vcd']['vdc'] == vdc.get_resource().get('name') vapp_resource = vdc.get_vapp(self.config['vcd']['vapp']) assert vapp_resource.get('name') == self.config['vcd']['vapp'] vapp = VApp(self.client, resource=vapp_resource) vm_resource = vapp.get_vm(self.config['vcd']['vm']) vm = VM(self.client, resource=vm_resource) result = vm.deploy() # result = vm.shutdown() task = self.client.get_task_monitor().wait_for_status( task=result, timeout=60, poll_frequency=2, fail_on_statuses=None, expected_target_statuses=[ TaskStatus.SUCCESS, TaskStatus.ABORTED, TaskStatus.ERROR, TaskStatus.CANCELED ], callback=None) assert task.get('status') == TaskStatus.SUCCESS.value
def add_vm(ctx, name, source_vapp, source_vm, catalog, target_vm, hostname, network, ip_allocation_mode, storage_profile, password_auto, accept_all_eulas): try: restore_session(ctx, vdc_required=True) client = ctx.obj['client'] in_use_org_href = ctx.obj['profiles'].get('org_href') org = Org(client, in_use_org_href) vdc_href = ctx.obj['profiles'].get('vdc_href') vdc = VDC(client, href=vdc_href) source_vapp_resource = None if catalog is None: source_vapp_resource = vdc.get_vapp(source_vapp) else: catalog_item = org.get_catalog_item(catalog, source_vapp) source_vapp_resource = client.get_resource( catalog_item.Entity.get('href')) assert source_vapp_resource is not None vapp_resource = vdc.get_vapp(name) vapp = VApp(client, resource=vapp_resource) spec = {'source_vm_name': source_vm, 'vapp': source_vapp_resource} if target_vm is not None: spec['target_vm_name'] = target_vm if hostname is not None: spec['hostname'] = hostname if network is not None: spec['network'] = network spec['ip_allocation_mode'] = ip_allocation_mode if storage_profile is not None: spec['storage_profile'] = vdc.get_storage_profile(storage_profile) if password_auto is not None: spec['password_auto'] = password_auto task = vapp.add_vms([spec], all_eulas_accepted=accept_all_eulas) stdout(task, ctx) except Exception as e: stderr(e, ctx)
def get_cluster_info(self, data): """Get cluster metadata as well as node data. Common broker function that validates data for the 'cluster info' operation and returns cluster/node metadata as dictionary. Required data: cluster_name Optional data and default values: org_name=None, ovdc_name=None """ required = [RequestKey.CLUSTER_NAME] utils.ensure_keys_in_dict(required, data, dict_name='data') defaults = {RequestKey.ORG_NAME: None, RequestKey.OVDC_NAME: None} validated_data = {**defaults, **data} cluster_name = validated_data[RequestKey.CLUSTER_NAME] cluster = get_cluster(self.tenant_client, cluster_name, org_name=validated_data[RequestKey.ORG_NAME], ovdc_name=validated_data[RequestKey.OVDC_NAME]) cluster[K8S_PROVIDER_KEY] = K8sProvider.NATIVE vapp = VApp(self.tenant_client, href=cluster['vapp_href']) vms = vapp.get_all_vms() for vm in vms: node_info = {'name': vm.get('name'), 'ipAddress': ''} try: node_info['ipAddress'] = vapp.get_primary_ip(vm.get('name')) except Exception: LOGGER.debug(f"Unable to get ip address of node " f"{vm.get('name')}") if vm.get('name').startswith(NodeType.MASTER): cluster.get('master_nodes').append(node_info) elif vm.get('name').startswith(NodeType.WORKER): cluster.get('nodes').append(node_info) elif vm.get('name').startswith(NodeType.NFS): cluster.get('nfs_nodes').append(node_info) return cluster
def deploy(ctx, name, vm_names, power_on, force_customization): try: restore_session(ctx, vdc_required=True) client = ctx.obj['client'] vdc_href = ctx.obj['profiles'].get('vdc_href') vdc = VDC(client, href=vdc_href) vapp_resource = vdc.get_vapp(name) vapp = VApp(client, resource=vapp_resource) if power_on is not None: power_on = False if force_customization is not None: force_customization = True if len(vm_names) == 0: task = vapp.deploy(power_on=power_on) stdout(task, ctx) else: for vm_name in vm_names: vm = VM(client, href=vapp.get_vm(vm_name).get('href')) vm.reload() task = vm.deploy(power_on=power_on, force_customization=force_customization) stdout(task, ctx) except Exception as e: stderr(e, ctx)
def test_0002_modify_memory(self): logged_in_org = self.client.get_org() org = Org(self.client, resource=logged_in_org) vdc_resource = org.get_vdc(self.config['vcd']['vdc']) vdc = VDC(self.client, resource=vdc_resource) assert self.config['vcd']['vdc'] == vdc.get_resource().get('name') vapp_resource = vdc.get_vapp(self.config['vcd']['vapp']) vapp = VApp(self.client, resource=vapp_resource) vm_resource = vapp.get_vm(self.config['vcd']['vm']) vm = VM(self.client, resource=vm_resource) task = vm.modify_memory(self.config['vcd']['memory']) task = self.client.get_task_monitor().wait_for_status( task=task, timeout=60, poll_frequency=2, fail_on_statuses=None, expected_target_statuses=[ TaskStatus.SUCCESS, TaskStatus.ABORTED, TaskStatus.ERROR, TaskStatus.CANCELED ], callback=None) assert task.get('status') == TaskStatus.SUCCESS.value vm.reload() assert vm.get_memory() == self.config['vcd']['memory']
def test_1006_snapshot_create(self): logged_in_org = self.client.get_org() org = Org(self.client, resource=logged_in_org) vdc_resource = org.get_vdc(self.config['vcd']['vdc']) vdc = VDC(self.client, resource=vdc_resource) assert self.config['vcd']['vdc'] == vdc.get_resource().get('name') vapp_resource = vdc.get_vapp(self.config['vcd']['vapp']) assert vapp_resource.get('name') == self.config['vcd']['vapp'] vapp = VApp(self.client, resource=vapp_resource) vm_resource = vapp.get_vm(self.config['vcd']['vm']) assert vm_resource.get('name') == self.config['vcd']['vm'] vm = VM(self.client, resource=vm_resource) task = vm.snapshot_create(memory=False, quiesce=False) task = self.client.get_task_monitor().wait_for_status( task=task, timeout=120, poll_frequency=2, fail_on_statuses=None, expected_target_statuses=[ TaskStatus.SUCCESS, TaskStatus.ABORTED, TaskStatus.ERROR, TaskStatus.CANCELED ], callback=None) assert task.get('status') == TaskStatus.SUCCESS.value
def delete_nodes_thread(self): LOGGER.debug('about to delete nodes from cluster with name: %s', self.cluster_name) try: vapp = VApp(self.client_tenant, href=self.cluster['vapp_href']) template = self.get_template() self.update_task( TaskStatus.RUNNING, message='Deleting %s node(s) from %s(%s)' % (len(self.body['nodes']), self.cluster_name, self.cluster_id)) delete_nodes_from_cluster(self.config, vapp, template, self.body['nodes'], self.body['force']) self.update_task( TaskStatus.RUNNING, message='Undeploying %s node(s) for %s(%s)' % (len(self.body['nodes']), self.cluster_name, self.cluster_id)) for vm_name in self.body['nodes']: vm = VM(self.client_tenant, resource=vapp.get_vm(vm_name)) try: task = vm.undeploy() self.client_tenant.get_task_monitor().wait_for_status(task) except Exception as e: LOGGER.warning('couldn\'t undeploy VM %s' % vm_name) self.update_task( TaskStatus.RUNNING, message='Deleting %s VM(s) for %s(%s)' % (len(self.body['nodes']), self.cluster_name, self.cluster_id)) task = vapp.delete_vms(self.body['nodes']) self.client_tenant.get_task_monitor().wait_for_status(task) self.update_task( TaskStatus.SUCCESS, message='Deleted %s node(s) to cluster %s(%s)' % (len(self.body['nodes']), self.cluster_name, self.cluster_id)) except Exception as e: LOGGER.error(traceback.format_exc()) self.update_task(TaskStatus.ERROR, error_message=str(e))
def get_vapp(self): vapp_name = self.params.get('vapp_name') vapp_resource = self.vdc.get_vapp(vapp_name) return VApp(self.client, name=vapp_name, resource=vapp_resource)
def add_nodes(qty, template, node_type, config, client, org, vdc, vapp, body): if qty < 1: return None specs = [] catalog_item = org.get_catalog_item(config['broker']['catalog'], template['catalog_item']) source_vapp = VApp(client, href=catalog_item.Entity.get('href')) source_vm = source_vapp.get_all_vms()[0].get('name') storage_profile = None if 'storage_profile' in body and body['storage_profile'] is not None: storage_profile = vdc.get_storage_profile(body['storage_profile']) cust_script_init = \ """#!/usr/bin/env bash if [ x$1=x"postcustomization" ]; then """ # NOQA cust_script_common = '' cust_script_end = \ """ fi """ # NOQA if 'ssh_key' in body and body['ssh_key'] is not None: cust_script_common += \ """ mkdir -p /root/.ssh echo '{ssh_key}' >> /root/.ssh/authorized_keys chmod -R go-rwx /root/.ssh """.format(ssh_key=body['ssh_key']) # NOQA if cust_script_common is '': cust_script = None else: cust_script = cust_script_init + cust_script_common + cust_script_end for n in range(qty): name = None while True: name = '%s-%s' % (node_type, ''.join( random.choices(string.ascii_lowercase + string.digits, k=4))) try: vapp.get_vm(name) except Exception: break spec = { 'source_vm_name': source_vm, 'vapp': source_vapp.resource, 'target_vm_name': name, 'hostname': name, 'network': body['network'], 'ip_allocation_mode': 'pool' } if cust_script is not None: spec['cust_script'] = cust_script if storage_profile is not None: spec['storage_profile'] = storage_profile specs.append(spec) if ('cpu' in body and body['cpu'] is not None) or \ ('memory' in body and body['memory'] is not None): reconfigure_hw = True else: reconfigure_hw = False task = vapp.add_vms(specs, power_on=not reconfigure_hw) # TODO(get details of the exception like not enough resources avail) client.get_task_monitor().wait_for_status(task) if reconfigure_hw: vapp.reload() for spec in specs: vm_resource = vapp.get_vm(spec['target_vm_name']) if 'cpu' in body and body['cpu'] is not None: vm = VM(client, resource=vm_resource) task = vm.modify_cpu(body['cpu']) client.get_task_monitor().wait_for_status(task) if 'memory' in body and body['memory'] is not None: vm = VM(client, resource=vm_resource) task = vm.modify_memory(body['memory']) client.get_task_monitor().wait_for_status(task) vm = VM(client, resource=vm_resource) task = vm.power_on() client.get_task_monitor().wait_for_status(task) password = source_vapp.get_admin_password(source_vm) vapp.reload() for spec in specs: vm_resource = vapp.get_vm(spec['target_vm_name']) command = '/bin/echo "root:{password}" | chpasswd'.format( password=template['admin_password']) nodes = [vm_resource] execute_script_in_nodes(config, vapp, password, command, nodes, check_tools=True, wait=False) if node_type == TYPE_NFS: LOGGER.debug('Enabling NFS server on %s' % spec['target_vm_name']) script = get_data_file('nfsd-%s.sh' % template['name']) execute_script_in_nodes(config, vapp, template['admin_password'], script, nodes) return {'task': task, 'specs': specs}
def __init__(self, **kwargs): super(VappVM, self).__init__(**kwargs) vapp_resource = self.get_target_resource() self.vapp = VApp(self.client, resource=vapp_resource)
config = json.load(f) client = Client(config['url']) client.set_highest_supported_version() client.set_credentials( BasicLoginCredentials(config['user'], config['org'], config['password'])) print("Fetching Org...") org = Org(client, resource=client.get_org()) print("Fetching VDC...") vdc = VDC(client, resource=org.get_vdc(config['vdc'])) print("Fetching vApp...") vapp_resource = vdc.get_vapp(config['vapp']) vapp = VApp(client, resource=vapp_resource) print("Validating VMs...") vms = vapp.get_all_vms() names = map(lambda vm: vm.get('name'), vms) names = list(names) services = config['services'] for service in services: name = service['vm'] index = names.index(name) service['resource'] = vms[index] def health_check_tcp(service):
def create_nodes_thread(self): LOGGER.debug(f"About to add nodes to cluster with name: " f"{self.cluster_name}") try: server_config = get_server_runtime_config() org_resource = self.tenant_client.get_org() org = Org(self.tenant_client, resource=org_resource) vdc = VDC(self.tenant_client, href=self.cluster['vdc_href']) vapp = VApp(self.tenant_client, href=self.cluster['vapp_href']) template = self._get_template() self._update_task( TaskStatus.RUNNING, message=f"Creating {self.req_spec.get(RequestKey.NUM_WORKERS)}" f" node(s) for {self.cluster_name}({self.cluster_id})") node_type = NodeType.WORKER if self.req_spec.get(RequestKey.ENABLE_NFS): node_type = NodeType.NFS new_nodes = add_nodes(self.req_spec.get(RequestKey.NUM_WORKERS), template, node_type, server_config, self.tenant_client, org, vdc, vapp, self.req_spec) if node_type == NodeType.NFS: self._update_task( TaskStatus.SUCCESS, message=f"Created " f"{self.req_spec.get(RequestKey.NUM_WORKERS)} " f"node(s) for " f"{self.cluster_name}({self.cluster_id})") elif node_type == NodeType.WORKER: self._update_task( TaskStatus.RUNNING, message=f"Adding " f"{self.req_spec.get(RequestKey.NUM_WORKERS)} " f"node(s) to cluster " f"{self.cluster_name}({self.cluster_id})") target_nodes = [] for spec in new_nodes['specs']: target_nodes.append(spec['target_vm_name']) vapp.reload() join_cluster(server_config, vapp, template, target_nodes) self._update_task( TaskStatus.SUCCESS, message=f"Added " f"{self.req_spec.get(RequestKey.NUM_WORKERS)} " f"node(s) to cluster " f"{self.cluster_name}({self.cluster_id})") except NodeCreationError as e: error_obj = error_to_json(e) LOGGER.error(traceback.format_exc()) stack_trace = \ ''.join(error_obj[ERROR_MESSAGE_KEY][ERROR_STACKTRACE_KEY]) self._update_task( TaskStatus.ERROR, error_message=error_obj[ERROR_MESSAGE_KEY][ERROR_DESCRIPTION_KEY], # noqa: E501 stack_trace=stack_trace) raise except Exception as e: error_obj = error_to_json(e) LOGGER.error(traceback.format_exc()) stack_trace = \ ''.join(error_obj[ERROR_MESSAGE_KEY][ERROR_STACKTRACE_KEY]) self._update_task( TaskStatus.ERROR, error_message=error_obj[ERROR_MESSAGE_KEY][ERROR_DESCRIPTION_KEY], # noqa: E501 stack_trace=stack_trace) finally: self._disconnect_sys_admin()
def add_vm(self): params = self.params source_vapp_resource = self.get_source_resource() target_vm_name = params.get('target_vm_name') source_vm_name = params.get('source_vm_name') hostname = params.get('hostname') vmpassword = params.get('vmpassword') vmpassword_auto = params.get('vmpassword_auto') vmpassword_reset = params.get('vmpassword_reset') network = params.get('network') all_eulas_accepted = params.get('all_eulas_accepted') power_on = params.get('power_on') ip_allocation_mode = params.get('ip_allocation_mode') cust_script = params.get('cust_script') storage_profile = params.get('storage_profile') properties = params.get('properties') response = dict() response['changed'] = False try: self.get_vm() except EntityNotFoundException: spec = { 'source_vm_name': source_vm_name, 'vapp': source_vapp_resource, 'target_vm_name': target_vm_name, 'hostname': hostname, 'password': vmpassword, 'password_auto': vmpassword_auto, 'password_reset': vmpassword_reset, 'ip_allocation_mode': ip_allocation_mode, 'network': network, 'cust_script': cust_script } if storage_profile!='': spec['storage_profile'] = self.get_storage_profile(storage_profile) spec = {k: v for k, v in spec.items() if v} source_vm = self.vapp.to_sourced_item(spec) # Check the source vm if we need to inject OVF properties. source_vapp = VApp(self.client, resource=source_vapp_resource) vm = source_vapp.get_vm(source_vm_name) productsection = vm.find('ovf:ProductSection', NSMAP) if productsection is not None: for prop in productsection.iterfind('ovf:Property', NSMAP): if properties and prop.get('{'+NSMAP['ovf']+'}key') in properties: val = prop.find('ovf:Value', NSMAP) if val: prop.remove(val) val = E_OVF.Value() val.set('{'+NSMAP['ovf']+'}value', properties[prop.get('{'+NSMAP['ovf']+'}key')]) prop.append(val) source_vm.InstantiationParams.append(productsection) source_vm.VmGeneralParams.NeedsCustomization = E.NeedsCustomization('true') params = E.RecomposeVAppParams(deploy='true', powerOn='true' if power_on else 'false') params.append(source_vm) if all_eulas_accepted is not None: params.append(E.AllEULAsAccepted(all_eulas_accepted)) add_vms_task = self.client.post_linked_resource( self.get_target_resource(), RelationType.RECOMPOSE, EntityType.RECOMPOSE_VAPP_PARAMS.value, params) self.execute_task(add_vms_task) response['msg'] = 'Vapp VM {} has been created.'.format( target_vm_name) response['changed'] = True else: response['warnings'] = 'Vapp VM {} is already present.'.format( target_vm_name) return response
def _create_cluster_async(self, *args, org_name, ovdc_name, cluster_name, cluster_id, template_name, template_revision, num_workers, network_name, num_cpu, mb_memory, storage_profile_name, ssh_key_filepath, enable_nfs, rollback): org = vcd_utils.get_org(self.tenant_client, org_name=org_name) vdc = vcd_utils.get_vdc( self.tenant_client, vdc_name=ovdc_name, org=org) LOGGER.debug(f"About to create cluster {cluster_name} on {ovdc_name}" f" with {num_workers} worker nodes, " f"storage profile={storage_profile_name}") try: self._update_task( TaskStatus.RUNNING, message=f"Creating cluster vApp {cluster_name}({cluster_id})") try: vapp_resource = \ vdc.create_vapp(cluster_name, description=f"cluster {cluster_name}", network=network_name, fence_mode='bridged') except Exception as e: msg = f"Error while creating vApp: {e}" LOGGER.debug(str(e)) raise ClusterOperationError(msg) self.tenant_client.get_task_monitor().wait_for_status(vapp_resource.Tasks.Task[0]) # noqa: E501 template = get_template(template_name, template_revision) tags = { ClusterMetadataKey.CLUSTER_ID: cluster_id, ClusterMetadataKey.CSE_VERSION: pkg_resources.require('container-service-extension')[0].version, # noqa: E501 ClusterMetadataKey.TEMPLATE_NAME: template[LocalTemplateKey.NAME], # noqa: E501 ClusterMetadataKey.TEMPLATE_REVISION: template[LocalTemplateKey.REVISION] # noqa: E501 } vapp = VApp(self.tenant_client, href=vapp_resource.get('href')) task = vapp.set_multiple_metadata(tags) self.tenant_client.get_task_monitor().wait_for_status(task) self._update_task( TaskStatus.RUNNING, message=f"Creating master node for " f"{cluster_name} ({cluster_id})") vapp.reload() server_config = utils.get_server_runtime_config() catalog_name = server_config['broker']['catalog'] try: add_nodes(client=self.tenant_client, num_nodes=1, node_type=NodeType.MASTER, org=org, vdc=vdc, vapp=vapp, catalog_name=catalog_name, template=template, network_name=network_name, num_cpu=num_cpu, memory_in_mb=mb_memory, storage_profile=storage_profile_name, ssh_key_filepath=ssh_key_filepath) except Exception as e: raise MasterNodeCreationError("Error adding master node:", str(e)) self._update_task( TaskStatus.RUNNING, message=f"Initializing cluster {cluster_name} ({cluster_id})") vapp.reload() init_cluster(vapp, template[LocalTemplateKey.NAME], template[LocalTemplateKey.REVISION]) master_ip = get_master_ip(vapp) task = vapp.set_metadata('GENERAL', 'READWRITE', 'cse.master.ip', master_ip) self.tenant_client.get_task_monitor().wait_for_status(task) self._update_task( TaskStatus.RUNNING, message=f"Creating {num_workers} node(s) for " f"{cluster_name}({cluster_id})") try: add_nodes(client=self.tenant_client, num_nodes=num_workers, node_type=NodeType.WORKER, org=org, vdc=vdc, vapp=vapp, catalog_name=catalog_name, template=template, network_name=network_name, num_cpu=num_cpu, memory_in_mb=mb_memory, storage_profile=storage_profile_name, ssh_key_filepath=ssh_key_filepath) except Exception as e: raise WorkerNodeCreationError("Error creating worker node:", str(e)) self._update_task( TaskStatus.RUNNING, message=f"Adding {num_workers} node(s) to " f"{cluster_name}({cluster_id})") vapp.reload() join_cluster(vapp, template[LocalTemplateKey.NAME], template[LocalTemplateKey.REVISION]) if enable_nfs: self._update_task( TaskStatus.RUNNING, message=f"Creating NFS node for " f"{cluster_name} ({cluster_id})") try: add_nodes(client=self.tenant_client, num_nodes=1, node_type=NodeType.NFS, org=org, vdc=vdc, vapp=vapp, catalog_name=catalog_name, template=template, network_name=network_name, num_cpu=num_cpu, memory_in_mb=mb_memory, storage_profile=storage_profile_name, ssh_key_filepath=ssh_key_filepath) except Exception as e: raise NFSNodeCreationError("Error creating NFS node:", str(e)) self._update_task( TaskStatus.SUCCESS, message=f"Created cluster {cluster_name} ({cluster_id})") except (MasterNodeCreationError, WorkerNodeCreationError, NFSNodeCreationError, ClusterJoiningError, ClusterInitializationError, ClusterOperationError) as e: if rollback: msg = f"Error creating cluster {cluster_name}. " \ f"Deleting cluster (rollback=True)" self._update_task(TaskStatus.RUNNING, message=msg) LOGGER.info(msg) try: cluster = get_cluster(self.tenant_client, cluster_name, cluster_id=cluster_id, org_name=org_name, ovdc_name=ovdc_name) self._delete_cluster(cluster_name=cluster_name, cluster_vdc_href=cluster['vdc_href']) except Exception: LOGGER.error(f"Failed to delete cluster {cluster_name}", exc_info=True) LOGGER.error(f"Error creating cluster {cluster_name}", exc_info=True) error_obj = error_to_json(e) stack_trace = ''.join(error_obj[ERROR_MESSAGE_KEY][ERROR_STACKTRACE_KEY]) # noqa: E501 self._update_task( TaskStatus.ERROR, error_message=error_obj[ERROR_MESSAGE_KEY][ERROR_DESCRIPTION_KEY], # noqa: E501 stack_trace=stack_trace) # raising an exception here prints a stacktrace to server console except Exception as e: LOGGER.error(f"Unknown error creating cluster {cluster_name}", exc_info=True) error_obj = error_to_json(e) stack_trace = ''.join(error_obj[ERROR_MESSAGE_KEY][ERROR_STACKTRACE_KEY]) # noqa: E501 self._update_task( TaskStatus.ERROR, error_message=error_obj[ERROR_MESSAGE_KEY][ERROR_DESCRIPTION_KEY], # noqa: E501 stack_trace=stack_trace) finally: self.logout_sys_admin_client()
def add_nodes(qty, template, node_type, config, client, org, vdc, vapp, req_spec): try: if qty < 1: return None specs = [] catalog_item = org.get_catalog_item(config['broker']['catalog'], template['catalog_item_name']) source_vapp = VApp(client, href=catalog_item.Entity.get('href')) source_vm = source_vapp.get_all_vms()[0].get('name') storage_profile = req_spec.get(RequestKey.STORAGE_PROFILE_NAME) if storage_profile is not None: storage_profile = vdc.get_storage_profile(storage_profile) cust_script_common = '' cust_script_init = \ """ #!/usr/bin/env bash if [ x$1=x"postcustomization" ]; then """ # noqa: E128 cust_script_end = \ """ fi """ # noqa: E128 ssh_key_filepath = req_spec.get(RequestKey.SSH_KEY_FILEPATH) if ssh_key_filepath is not None: cust_script_common += \ f""" mkdir -p /root/.ssh echo '{ssh_key_filepath}' >> /root/.ssh/authorized_keys chmod -R go-rwx /root/.ssh """ # noqa if cust_script_common == '': cust_script = None else: cust_script = cust_script_init + cust_script_common + \ cust_script_end for n in range(qty): name = None while True: name = f"{node_type}-{''.join(random.choices(string.ascii_lowercase + string.digits, k=4))}" # noqa: E501 try: vapp.get_vm(name) except Exception: break spec = { 'source_vm_name': source_vm, 'vapp': source_vapp.resource, 'target_vm_name': name, 'hostname': name, 'password_auto': True, 'network': req_spec.get(RequestKey.NETWORK_NAME), 'ip_allocation_mode': 'pool' } if cust_script is not None: spec['cust_script'] = cust_script if storage_profile is not None: spec['storage_profile'] = storage_profile specs.append(spec) num_cpu = req_spec.get(RequestKey.NUM_CPU) mb_memory = req_spec.get(RequestKey.MB_MEMORY) configure_hw = bool(num_cpu or mb_memory) task = vapp.add_vms(specs, power_on=not configure_hw) # TODO(get details of the exception like not enough resources avail) client.get_task_monitor().wait_for_status(task) vapp.reload() if configure_hw: for spec in specs: vm_resource = vapp.get_vm(spec['target_vm_name']) if num_cpu: vm = VM(client, resource=vm_resource) task = vm.modify_cpu(num_cpu) client.get_task_monitor().wait_for_status(task) if mb_memory: vm = VM(client, resource=vm_resource) task = vm.modify_memory(mb_memory) client.get_task_monitor().wait_for_status(task) vm = VM(client, resource=vm_resource) task = vm.power_on() client.get_task_monitor().wait_for_status(task) vapp.reload() password = vapp.get_admin_password(spec['target_vm_name']) for spec in specs: vm_resource = vapp.get_vm(spec['target_vm_name']) command = \ f"/bin/echo \"root:{template['admin_password']}\" | chpasswd" nodes = [vm_resource] execute_script_in_nodes(config, vapp, password, command, nodes, check_tools=True, wait=False) if node_type == NodeType.NFS: LOGGER.debug( f"enabling NFS server on {spec['target_vm_name']}") script_filepath = get_local_script_filepath( template['name'], template['revision'], ScriptFile.NFSD) script = read_data_file(script_filepath, logger=LOGGER) exec_results = execute_script_in_nodes( config, vapp, template['admin_password'], script, nodes) errors = get_script_execution_errors(exec_results) if errors: raise ScriptExecutionError( f"Script execution failed on node " f"{spec['target_vm_name']}:{errors}") except Exception as e: node_list = [entry.get('target_vm_name') for entry in specs] raise NodeCreationError(node_list, str(e)) return {'task': task, 'specs': specs}
def test_0070_install_update(config, blank_cust_scripts, unregister_cse): """Tests installation option: '--update'. Tests that installation: - registers cse (when answering yes to prompt), - creates all templates correctly, - customizes temp vapps correctly. command: cse install --config cse_test_config.yaml --ssh-key ~/.ssh/id_rsa.pub --update --no-capture required files: cse_test_config.yaml, ~/.ssh/id_rsa.pub, ubuntu/photon init/cust scripts expected: cse registered, ubuntu/photon ovas exist, temp vapps exist, templates exist. """ env.prepare_customization_scripts() result = env.CLI_RUNNER.invoke(cli, ['install', '--config', env.ACTIVE_CONFIG_FILEPATH, '--ssh-key', env.SSH_KEY_FILEPATH, '--update', '--no-capture'], input='y\ny', catch_exceptions=False) assert result.exit_code == 0 vdc = VDC(env.CLIENT, href=env.VDC_HREF) # check that cse was registered correctly is_cse_registered = env.is_cse_registered() assert is_cse_registered, \ 'CSE is not registered as an extension when it should be.' if is_cse_registered: assert env.is_cse_registration_valid(config['amqp']['routing_key'], config['amqp']['exchange']), \ 'CSE is registered as an extension, but the extension settings ' \ 'on vCD are not the same as config settings.' # ssh into vms to check for installed software ssh_client = paramiko.SSHClient() ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # check that ova files and temp vapps exist for template_config in config['broker']['templates']: assert env.catalog_item_exists(template_config['source_ova_name']), \ 'Source ova files do not exist when they should.' temp_vapp_name = template_config['temp_vapp'] try: vapp_resource = vdc.get_vapp(temp_vapp_name) except EntityNotFoundException: assert False, 'vApp does not exist when it should (--no-capture)' vapp = VApp(env.CLIENT, resource=vapp_resource) ip = vapp.get_primary_ip(temp_vapp_name) try: ssh_client.connect(ip, username='******') # run different commands depending on OS if 'photon' in temp_vapp_name: script = utils.get_data_file(env.STATIC_PHOTON_CUST_SCRIPT) pattern = r'(kubernetes\S*)' packages = re.findall(pattern, script) stdin, stdout, stderr = ssh_client.exec_command("rpm -qa") installed = [line.strip('.x86_64\n') for line in stdout] for package in packages: assert package in installed, \ f"{package} not found in Photon VM" elif 'ubuntu' in temp_vapp_name: script = utils.get_data_file(env.STATIC_UBUNTU_CUST_SCRIPT) pattern = r'((kubernetes|docker\S*|kubelet|kubeadm|kubectl)\S*=\S*)' # noqa packages = [tup[0] for tup in re.findall(pattern, script)] cmd = "dpkg -l | grep '^ii' | awk '{print $2\"=\"$3}'" stdin, stdout, stderr = ssh_client.exec_command(cmd) installed = [line.strip() for line in stdout] for package in packages: assert package in installed, \ f"{package} not found in Ubuntu VM" finally: ssh_client.close()
for k in range(len(orgnets)): orgnettable.add_row([ o.get('name'), vdc['name'], orgnets[k].attrib['name'], orgnets[k].attrib['href'] ]) print(orgnettable) # Retrieve all vApps from vCD ------------------------------------------------------------- vapps_list = vdc_instance.list_resources() for vapp in vapps_list: # Exclude VM Templates from Catalogs # There're two types vApp+xml or vAppTemplate+xml if vapp.get('type').split('.')[-1] == 'vApp+xml': # print("\nFetching vAPP {}".format(vapp['name'])) vapp_resource = vdc_instance.get_vapp(vapp['name']) vapp_instance = VApp(client, resource=vapp_resource) vapptable = PrettyTable([ "VAPP name", "VM name", "VM href", "Connection", "MAC", "IP", "Primary" ]) 'application/vnd.vmware.vcloud.vAppTemplate+xml' vms = vapp_resource.xpath( '//vcloud:VApp/vcloud:Children/vcloud:Vm', namespaces=NSMAP) for vm in vms: vm_instance = VM(client, resource=vm) items = vm.xpath('//ovf:VirtualHardwareSection/ovf:Item',
def _create_nodes_async(self, *args, cluster_name, cluster_vdc_href, cluster_vapp_href, cluster_id, template_name, template_revision, num_workers, network_name, num_cpu, mb_memory, storage_profile_name, ssh_key_filepath, enable_nfs, rollback): org = vcd_utils.get_org(self.tenant_client) vdc = VDC(self.tenant_client, href=cluster_vdc_href) vapp = VApp(self.tenant_client, href=cluster_vapp_href) template = get_template(name=template_name, revision=template_revision) msg = f"Creating {num_workers} node(s) from template " \ f"'{template_name}' (revision {template_revision}) and " \ f"adding to {cluster_name} ({cluster_id})" LOGGER.debug(msg) try: self._update_task(TaskStatus.RUNNING, message=msg) node_type = NodeType.WORKER if enable_nfs: node_type = NodeType.NFS server_config = utils.get_server_runtime_config() catalog_name = server_config['broker']['catalog'] new_nodes = add_nodes(client=self.tenant_client, num_nodes=num_workers, node_type=node_type, org=org, vdc=vdc, vapp=vapp, catalog_name=catalog_name, template=template, network_name=network_name, num_cpu=num_cpu, memory_in_mb=mb_memory, storage_profile=storage_profile_name, ssh_key_filepath=ssh_key_filepath) if node_type == NodeType.NFS: self._update_task( TaskStatus.SUCCESS, message=f"Created {num_workers} node(s) for " f"{cluster_name}({cluster_id})") elif node_type == NodeType.WORKER: self._update_task( TaskStatus.RUNNING, message=f"Adding {num_workers} node(s) to cluster " f"{cluster_name}({cluster_id})") target_nodes = [] for spec in new_nodes['specs']: target_nodes.append(spec['target_vm_name']) vapp.reload() join_cluster(vapp, template[LocalTemplateKey.NAME], template[LocalTemplateKey.REVISION], target_nodes) self._update_task( TaskStatus.SUCCESS, message=f"Added {num_workers} node(s) to cluster " f"{cluster_name}({cluster_id})") except NodeCreationError as e: if rollback: msg = f"Error adding nodes to {cluster_name} {cluster_id}." \ f" Deleting nodes: {e.node_names} (rollback=True)" self._update_task(TaskStatus.RUNNING, message=msg) LOGGER.info(msg) try: self._delete_nodes(cluster_name=cluster_name, cluster_vapp_href=cluster_vapp_href, node_names_list=e.node_names) except Exception: LOGGER.error(f"Failed to delete nodes {e.node_names} " f"from cluster {cluster_name}", exc_info=True) LOGGER.error(f"Error adding nodes to {cluster_name}", exc_info=True) error_obj = error_to_json(e) LOGGER.error(str(e), exc_info=True) stack_trace = ''.join(error_obj[ERROR_MESSAGE_KEY][ERROR_STACKTRACE_KEY]) # noqa: E501 self._update_task( TaskStatus.ERROR, error_message=error_obj[ERROR_MESSAGE_KEY][ERROR_DESCRIPTION_KEY], # noqa: E501 stack_trace=stack_trace) # raising an exception here prints a stacktrace to server console except Exception as e: error_obj = error_to_json(e) LOGGER.error(str(e), exc_info=True) stack_trace = ''.join(error_obj[ERROR_MESSAGE_KEY][ERROR_STACKTRACE_KEY]) # noqa: E501 self._update_task( TaskStatus.ERROR, error_message=error_obj[ERROR_MESSAGE_KEY][ERROR_DESCRIPTION_KEY], # noqa: E501 stack_trace=stack_trace) finally: self.logout_sys_admin_client()
def create_cluster_thread(self): network_name = self.req_spec.get(RequestKey.NETWORK_NAME) try: clusters = load_from_metadata(self.tenant_client, name=self.cluster_name) if len(clusters) != 0: raise ClusterAlreadyExistsError(f"Cluster {self.cluster_name} " "already exists.") org_resource = self.tenant_client.get_org_by_name( self.req_spec.get(RequestKey.ORG_NAME)) org = Org(self.tenant_client, resource=org_resource) vdc_resource = org.get_vdc(self.req_spec.get(RequestKey.OVDC_NAME)) vdc = VDC(self.tenant_client, resource=vdc_resource) template = self._get_template() self._update_task( TaskStatus.RUNNING, message=f"Creating cluster vApp {self.cluster_name}" f"({self.cluster_id})") try: vapp_resource = vdc.create_vapp( self.cluster_name, description=f"cluster {self.cluster_name}", network=network_name, fence_mode='bridged') except Exception as e: raise ClusterOperationError( "Error while creating vApp:", str(e)) self.tenant_client.get_task_monitor().wait_for_status( vapp_resource.Tasks.Task[0]) tags = {} tags['cse.cluster.id'] = self.cluster_id tags['cse.version'] = pkg_resources.require( 'container-service-extension')[0].version tags['cse.template'] = template['name'] vapp = VApp(self.tenant_client, href=vapp_resource.get('href')) for k, v in tags.items(): task = vapp.set_metadata('GENERAL', 'READWRITE', k, v) self.tenant_client.get_task_monitor().wait_for_status(task) self._update_task( TaskStatus.RUNNING, message=f"Creating master node for {self.cluster_name}" f"({self.cluster_id})") vapp.reload() server_config = get_server_runtime_config() try: add_nodes(1, template, NodeType.MASTER, server_config, self.tenant_client, org, vdc, vapp, self.req_spec) except Exception as e: raise MasterNodeCreationError( "Error while adding master node:", str(e)) self._update_task( TaskStatus.RUNNING, message=f"Initializing cluster {self.cluster_name}" f"({self.cluster_id})") vapp.reload() init_cluster(server_config, vapp, template) master_ip = get_master_ip(server_config, vapp, template) task = vapp.set_metadata('GENERAL', 'READWRITE', 'cse.master.ip', master_ip) self.tenant_client.get_task_monitor().wait_for_status(task) if self.req_spec.get(RequestKey.NUM_WORKERS) > 0: self._update_task( TaskStatus.RUNNING, message=f"Creating " f"{self.req_spec.get(RequestKey.NUM_WORKERS)} " f"node(s) for " f"{self.cluster_name}({self.cluster_id})") try: add_nodes(self.req_spec.get(RequestKey.NUM_WORKERS), template, NodeType.WORKER, server_config, self.tenant_client, org, vdc, vapp, self.req_spec) except Exception as e: raise WorkerNodeCreationError( "Error while creating worker node:", str(e)) self._update_task( TaskStatus.RUNNING, message=f"Adding " f"{self.req_spec.get(RequestKey.NUM_WORKERS)} " f"node(s) to " f"{self.cluster_name}({self.cluster_id})") vapp.reload() join_cluster(server_config, vapp, template) if self.req_spec.get(RequestKey.ENABLE_NFS): self._update_task( TaskStatus.RUNNING, message=f"Creating NFS node for {self.cluster_name}" f"({self.cluster_id})") try: add_nodes(1, template, NodeType.NFS, server_config, self.tenant_client, org, vdc, vapp, self.req_spec) except Exception as e: raise NFSNodeCreationError( "Error while creating NFS node:", str(e)) self._update_task( TaskStatus.SUCCESS, message=f"Created cluster {self.cluster_name}" f"({self.cluster_id})") except (MasterNodeCreationError, WorkerNodeCreationError, NFSNodeCreationError, ClusterJoiningError, ClusterInitializationError, ClusterOperationError) as e: LOGGER.error(traceback.format_exc()) error_obj = error_to_json(e) stack_trace = \ ''.join(error_obj[ERROR_MESSAGE_KEY][ERROR_STACKTRACE_KEY]) self._update_task( TaskStatus.ERROR, error_message=error_obj[ERROR_MESSAGE_KEY] [ERROR_DESCRIPTION_KEY], stack_trace=stack_trace) raise e except Exception as e: LOGGER.error(traceback.format_exc()) error_obj = error_to_json(e) stack_trace = \ ''.join(error_obj[ERROR_MESSAGE_KEY][ERROR_STACKTRACE_KEY]) self._update_task( TaskStatus.ERROR, error_message=error_obj[ERROR_MESSAGE_KEY][ERROR_DESCRIPTION_KEY], # noqa: E501 stack_trace=stack_trace) finally: self._disconnect_sys_admin()
def create(client): print("=============== __LOG__Create_VDC =======================\n\n") vdc_name = "ACME_PAYG" vapp_name = "test2" org_resource = client.get_org() org = Org(client, resource=org_resource) print("Org name: ", org.get_name()) print("Vdc name: ", vdc_name) try: vdc_resource = org.get_vdc(vdc_name) vdc = VDC(client, name=vdc_name, resource=vdc_resource) vapp_resource = vdc.get_vapp(vapp_name) vapp = VApp(client, name=vapp_name, resource=vapp_resource) print("vapp : ", vapp) catalog_item = org.get_catalog_item('ACME', 'tinyova') source_vapp_resource = client.get_resource( catalog_item.Entity.get('href')) print("source_vapp_resource: ", source_vapp_resource) spec = { 'source_vm_name': 'Tiny Linux template', 'vapp': source_vapp_resource } storage_profiles = [{ 'name': 'Performance', 'enabled': True, 'units': 'MB', 'limit': 0, 'default': True }] spec['target_vm_name'] = 'ubuntu_pcp_11' spec['hostname'] = 'ubuntu' spec['network'] = 'global' spec['ip_allocation_mode'] = 'dhcp' #spec['storage_profile'] = storage_profiles vms = [spec] result = vapp.add_vms(vms) print("result: ", result) #task = client.get_task_monitor().wait_for_status( # task=result, # timeout=60, # poll_frequency=2, # fail_on_statuses=None, # expected_target_statuses=[ # TaskStatus.SUCCESS, # TaskStatus.ABORTED, # TaskStatus.ERROR, # TaskStatus.CANCELED], # callback=None) #st = task.get('status') #if st == TaskStatus.SUCCESS.value: # message = 'status : {0} '.format(st) # logging.info(message) #else: # print("st : ", st) # raise Exception(task) print("=============================================\n\n") return True except Exception as e: error_message = '__ERROR_ [create_vdc] failed for vdc {0} '.format( vdc_name) logging.warn(error_message, e) return False
def __init__(self, **kwargs): super(VappVMNIC, self).__init__(**kwargs) vapp_resource = self.get_resource() self.vapp = VApp(self.client, resource=vapp_resource) self.nic_mapping = defaultdict(list)
def create_template(ctx, config, client, org, vdc_resource, catalog, no_capture, template): ctx.obj = {} ctx.obj['client'] = client try: source_ova_item = org.get_catalog_item(config['broker']['catalog'], template['source_ova_name']) except Exception: source_ova_item = upload_source_ova(config, client, org, template) click.secho('Find source ova \'%s\': %s' % (template['source_ova_name'], bool_to_msg(source_ova_item is not None))) if source_ova_item is None: return None item_id = source_ova_item.get('id') flag = False while True: q = client.get_typed_query( 'adminCatalogItem', query_result_format=QueryResultFormat.ID_RECORDS, qfilter='id==%s' % item_id) records = list(q.execute()) if records[0].get('status') == 'RESOLVED': if flag: click.secho('done', fg='blue') break else: if flag: click.secho('.', nl=False, fg='green') else: click.secho('Waiting for upload to complete...', nl=False, fg='green') flag = True time.sleep(5) vdc = VDC(client, resource=vdc_resource) try: vapp_resource = vdc.get_vapp(template['temp_vapp']) except Exception: vapp_resource = None if vapp_resource is None: click.secho('Creating vApp template \'%s\'' % template['temp_vapp'], fg='green') init_script = get_data_file('init-%s.sh' % template['name']) vapp_resource = vdc.instantiate_vapp( template['temp_vapp'], catalog.get('name'), template['source_ova_name'], network=config['broker']['network'], fence_mode='bridged', ip_allocation_mode=config['broker']['ip_allocation_mode'], deploy=True, power_on=True, memory=template['mem'], cpu=template['cpu'], password=None, cust_script=init_script, accept_all_eulas=True, vm_name=template['temp_vapp'], hostname=template['temp_vapp'], storage_profile=config['broker']['storage_profile']) stdout(vapp_resource.Tasks.Task[0], ctx) vapp = VApp(client, resource=vapp_resource) vapp.reload() vs = get_vsphere(config, vapp, template['temp_vapp']) vs.connect() moid = vapp.get_vm_moid(template['temp_vapp']) vm = vs.get_vm_by_moid(moid) vs.wait_until_tools_ready(vm, sleep=5, callback=wait_for_tools_ready_callback) click.secho('Customizing vApp template \'%s\'' % template['temp_vapp'], fg='green') vapp.reload() password_auto = vapp.get_admin_password(template['temp_vapp']) cust_script = get_data_file('cust-%s.sh' % template['name']) result = vs.execute_script_in_guest( vm, 'root', password_auto, cust_script, target_file=None, wait_for_completion=True, wait_time=10, get_output=True, delete_script=True, callback=wait_for_guest_execution_callback) click.secho('Result: %s' % result, fg='green') result_stdout = result[1].content.decode() result_stderr = result[2].content.decode() click.secho('stderr:') if len(result_stderr) > 0: click.secho(result_stderr, err=True) click.secho('stdout:') if len(result_stdout) > 0: click.secho(result_stdout, err=False) if result[0] != 0: raise Exception('Failed customizing VM') if not no_capture: capture_as_template(ctx, config, vapp_resource, org, catalog, template) if template['cleanup']: click.secho('Deleting vApp template \'%s\' ' % template['temp_vapp'], fg='green') vdc.reload() task = vdc.delete_vapp(template['temp_vapp'], force=True) stdout(task, ctx)
def get_vapp(ctx, vapp_name): client = ctx.obj['client'] vdc_href = ctx.obj['profiles'].get('vdc_href') vdc = VDC(client, href=vdc_href) return VApp(client, resource=vdc.get_vapp(vapp_name))
def add_nodes(client, num_nodes, node_type, org, vdc, vapp, catalog_name, template, network_name, num_cpu=None, memory_in_mb=None, storage_profile=None, ssh_key_filepath=None): specs = [] try: if num_nodes < 1: return None catalog_item = org.get_catalog_item( catalog_name, template[LocalTemplateKey.CATALOG_ITEM_NAME]) source_vapp = VApp(client, href=catalog_item.Entity.get('href')) source_vm = source_vapp.get_all_vms()[0].get('name') if storage_profile is not None: storage_profile = vdc.get_storage_profile(storage_profile) cust_script = None if ssh_key_filepath is not None: cust_script = \ "#!/usr/bin/env bash\n" \ "if [ x$1=x\"postcustomization\" ];\n" \ "then\n" \ "mkdir -p /root/.ssh\n" \ f"echo '{ssh_key_filepath}' >> /root/.ssh/authorized_keys\n" \ "chmod -R go-rwx /root/.ssh\n" \ "fi" for n in range(num_nodes): name = None while True: name = f"{node_type}-{''.join(random.choices(string.ascii_lowercase + string.digits, k=4))}" # noqa: E501 try: vapp.get_vm(name) except Exception: break spec = { 'source_vm_name': source_vm, 'vapp': source_vapp.resource, 'target_vm_name': name, 'hostname': name, 'password_auto': True, 'network': network_name, 'ip_allocation_mode': 'pool' } if cust_script is not None: spec['cust_script'] = cust_script if storage_profile is not None: spec['storage_profile'] = storage_profile specs.append(spec) task = vapp.add_vms(specs, power_on=False) client.get_task_monitor().wait_for_status(task) vapp.reload() if not num_cpu: num_cpu = template[LocalTemplateKey.CPU] if not memory_in_mb: memory_in_mb = template[LocalTemplateKey.MEMORY] for spec in specs: vm_name = spec['target_vm_name'] vm_resource = vapp.get_vm(vm_name) vm = VM(client, resource=vm_resource) task = vm.modify_cpu(num_cpu) client.get_task_monitor().wait_for_status(task) task = vm.modify_memory(memory_in_mb) client.get_task_monitor().wait_for_status(task) task = vm.power_on() client.get_task_monitor().wait_for_status(task) vapp.reload() if node_type == NodeType.NFS: LOGGER.debug(f"Enabling NFS server on {vm_name}") script_filepath = get_local_script_filepath( template[LocalTemplateKey.NAME], template[LocalTemplateKey.REVISION], ScriptFile.NFSD) script = utils.read_data_file(script_filepath, logger=LOGGER) exec_results = execute_script_in_nodes(vapp=vapp, node_names=[vm_name], script=script) errors = _get_script_execution_errors(exec_results) if errors: raise ScriptExecutionError( f"VM customization script execution failed on node " f"{vm_name}:{errors}") except Exception as e: # TODO: get details of the exception to determine cause of failure, # e.g. not enough resources available. node_list = [entry.get('target_vm_name') for entry in specs] raise NodeCreationError(node_list, str(e)) vapp.reload() return {'task': task, 'specs': specs}
def test_0000_setup(self): """Load configuration and create a click runner to invoke CLI.""" logger = Environment.get_default_logger() VmTest._config = Environment.get_config() VmTest._logger = logger VmTest._client = Environment.get_client_in_default_org( CommonRoles.ORGANIZATION_ADMINISTRATOR) VmTest._media_resource = Environment.get_test_media_resource() VmTest._runner = CliRunner() default_org = VmTest._config['vcd']['default_org_name'] VmTest._login(self) VmTest._runner.invoke(org, ['use', default_org]) default_ovdc = VmTest._config['vcd']['default_ovdc_name'] VmTest._default_ovdc = default_ovdc VmTest._runner.invoke(vdc, ['use', VmTest._default_ovdc]) VmTest._test_vdc = Environment.get_test_vdc(VmTest._client) VmTest._test_vapp = Environment.get_test_vapp_with_network( VmTest._client) VmTest._test_old_vapp_href = VmTest._test_vapp.get_resource().get( 'href') self.assertIsNotNone(VmTest._test_old_vapp_href) logger.debug("Old vapp href is : " + VmTest._test_old_vapp_href) VmTest._test_vm = VM(VmTest._client, href=VmTest._test_vapp.get_vm( VAppConstants.vm1_name).get('href')) self.assertIsNotNone( VmTest._test_vapp.get_vm(VAppConstants.vm1_name).get('href')) logger.debug( "Old vapp VM href is : " + VmTest._test_vapp.get_vm(VAppConstants.vm1_name).get('href')) vdc1 = Environment.get_test_vdc(VmTest._client) logger.debug('Creating empty vApp.') VmTest._empty_vapp_href = \ create_empty_vapp(client=VmTest._client, vdc=vdc1, name=VmTest._empty_vapp_name, description=VmTest._empty_vapp_description) self.assertIsNotNone(VmTest._empty_vapp_href) logger.debug("Empty vapp href is: " + VmTest._empty_vapp_href) # Create independent disk VmTest._idisk_id = create_independent_disk( client=VmTest._client, vdc=vdc1, name=self._idisk_name, size=self._idisk_size, description=self._idisk_description) self.assertIsNotNone(VmTest._idisk_id) logger.debug("Independent disk id is: " + VmTest._idisk_id) # Upload template with vm tools. catalog_author_client = Environment.get_client_in_default_org( CommonRoles.CATALOG_AUTHOR) org_admin_client = Environment.get_client_in_default_org( CommonRoles.ORGANIZATION_ADMINISTRATOR) org1 = Environment.get_test_org(org_admin_client) catalog_name = Environment.get_config()['vcd']['default_catalog_name'] catalog_items = org1.list_catalog_items(catalog_name) config = Environment.get_config() template_name = config['vcd']['default_template_vmtools_file_name'] catalog_item_flag = False for item in catalog_items: if item.get('name').lower() == template_name.lower(): logger.debug('Reusing existing template ' + template_name) catalog_item_flag = True break if not catalog_item_flag: logger.debug('Uploading template ' + template_name + ' to catalog ' + catalog_name + '.') org1.upload_ovf(catalog_name=catalog_name, file_name=template_name) # wait for the template import to finish in vCD. catalog_item = org1.get_catalog_item(name=catalog_name, item_name=template_name) template = catalog_author_client.get_resource( catalog_item.Entity.get('href')) catalog_author_client.get_task_monitor().wait_for_success( task=template.Tasks.Task[0]) logger.debug("Template upload comleted for: " + template_name) # Create Vapp with template of vmware tools logger.debug('Creating vApp ' + VmTest._test_vapp_vmtools_name) VmTest._test_vapp_vmtools_href = create_customized_vapp_from_template( client=VmTest._client, vdc=vdc1, name=VmTest._test_vapp_vmtools_name, catalog_name=catalog_name, template_name=template_name) self.assertIsNotNone(VmTest._test_vapp_vmtools_href) logger.debug("vmtools vapp href is: " + VmTest._test_vapp_vmtools_href) vapp = VApp(VmTest._client, href=VmTest._test_vapp_vmtools_href) VmTest._test_vapp_vmtools = vapp vm_resource = vapp.get_vm(VmTest._test_vapp_vmtools_vm_name) VmTest._test_vapp_vmtools_vm_href = vm_resource.get('href') self.assertIsNotNone(VmTest._test_vapp_vmtools_vm_href) temp_name = config['vcd']['default_template_file_name'] VmTest._test_vapp_href = create_customized_vapp_from_template( client=VmTest._client, vdc=vdc1, name=VmTest._vapp_name, catalog_name=catalog_name, template_name=temp_name) self.assertIsNotNone(VmTest._test_vapp_href) VmTest._sys_admin_client = Environment.get_sys_admin_client() resource = VmTest._sys_admin_client.get_extension() result = VmTest._sys_admin_client.get_linked_resource( resource, RelationType.DOWN, EntityType.DATASTORE_REFERENCES.value) if hasattr(result, '{' + NSMAP['vcloud'] + '}Reference'): for reference in result['{' + NSMAP['vcloud'] + '}Reference']: datastore_id = reference.get('id') VmTest._datastore_id = datastore_id.split(':')[3] break self.assertIsNotNone(VmTest._datastore_id)
def test_0000_setup(self): """Setup the vms required for the other tests in this module. Create a vApp with just one vm as per the configuration stated above. This test passes if the vApp and vm hrefs are not None. """ logger = Environment.get_default_logger() TestVM._client = Environment.get_client_in_default_org( TestVM._test_runner_role) TestVM._sys_admin_client = Environment.get_sys_admin_client() vdc = Environment.get_test_vdc(TestVM._client) TestVM._media_resource = Environment.get_test_media_resource() logger.debug('Creating vApp ' + TestVM._test_vapp_name + '.') TestVM._test_vapp_href = create_customized_vapp_from_template( client=TestVM._client, vdc=vdc, name=TestVM._test_vapp_name, catalog_name=Environment.get_default_catalog_name(), template_name=Environment.get_default_template_name(), memory_size=TestVM._test_vapp_first_vm_memory_size, num_cpu=TestVM._test_vapp_first_vm_num_cpu, disk_size=TestVM._test_vapp_first_vm_first_disk_size, vm_name=TestVM._test_vapp_first_vm_name, nw_adapter_type=TestVM._test_vapp_first_vm_network_adapter_type) self.assertIsNotNone(TestVM._test_vapp_href) vapp = VApp(TestVM._client, href=TestVM._test_vapp_href) TestVM._test_vapp = vapp vm_resource = vapp.get_vm(TestVM._test_vapp_first_vm_name) TestVM._test_vapp_first_vm_href = vm_resource.get('href') self.assertIsNotNone(TestVM._test_vapp_first_vm_href) logger.debug('Creating empty vApp.') TestVM._empty_vapp_href = \ create_empty_vapp(client=TestVM._client, vdc=vdc, name=TestVM._empty_vapp_name, description=TestVM._empty_vapp_description) TestVM._empty_vapp_owner_name = Environment. \ get_username_for_role_in_test_org(TestVM._test_runner_role) #Create independent disk TestVM._idisk = vdc.create_disk(name=self._idisk_name, size=self._idisk_size, description=self._idisk_description) # Upload template with vm tools. catalog_author_client = Environment.get_client_in_default_org( CommonRoles.CATALOG_AUTHOR) org_admin_client = Environment.get_client_in_default_org( CommonRoles.ORGANIZATION_ADMINISTRATOR) org = Environment.get_test_org(org_admin_client) catalog_name = Environment.get_config()['vcd']['default_catalog_name'] catalog_items = org.list_catalog_items(catalog_name) template_name = Environment.get_config( )['vcd']['default_template_vmtools_file_name'] catalog_item_flag = False for item in catalog_items: if item.get('name').lower() == template_name.lower(): logger.debug('Reusing existing template ' + template_name) catalog_item_flag = True break if not catalog_item_flag: logger.debug('Uploading template ' + template_name + ' to catalog ' + catalog_name + '.') org.upload_ovf(catalog_name=catalog_name, file_name=template_name) # wait for the template import to finish in vCD. catalog_item = org.get_catalog_item(name=catalog_name, item_name=template_name) template = catalog_author_client.get_resource( catalog_item.Entity.get('href')) catalog_author_client.get_task_monitor().wait_for_success( task=template.Tasks.Task[0]) # Create Vapp with template of vmware tools logger.debug('Creating vApp ' + TestVM._test_vapp_vmtools_name + '.') TestVM._test_vapp_vmtools_href = create_customized_vapp_from_template( client=TestVM._client, vdc=vdc, name=TestVM._test_vapp_vmtools_name, catalog_name=catalog_name, template_name=template_name) self.assertIsNotNone(TestVM._test_vapp_href) vapp = VApp(TestVM._client, href=TestVM._test_vapp_vmtools_href) TestVM._test_vapp_vmtools = vapp vm_resource = vapp.get_vm(TestVM._test_vapp_vmtools_vm_name) TestVM._test_vapp_vmtools_vm_href = vm_resource.get('href') self.assertIsNotNone(TestVM._test_vapp_vmtools_vm_href)
def create_cluster_thread(self): network_name = self.body['network'] try: clusters = load_from_metadata(self.client_tenant, name=self.cluster_name) if len(clusters) != 0: raise Exception('Cluster already exists.') org_resource = self.client_tenant.get_org() org = Org(self.client_tenant, resource=org_resource) vdc_resource = org.get_vdc(self.body['vdc']) vdc = VDC(self.client_tenant, resource=vdc_resource) template = self.get_template() self.update_task(TaskStatus.RUNNING, self.op, message='Creating cluster vApp %s(%s)' % (self.cluster_name, self.cluster_id)) vapp_resource = vdc.create_vapp(self.cluster_name, description='cluster %s' % self.cluster_name, network=network_name, fence_mode='bridged') t = self.client_tenant.get_task_monitor().wait_for_status( task=vapp_resource.Tasks.Task[0], timeout=60, poll_frequency=2, fail_on_status=None, expected_target_statuses=[ TaskStatus.SUCCESS, TaskStatus.ABORTED, TaskStatus.ERROR, TaskStatus.CANCELED ], callback=None) assert t.get('status').lower() == TaskStatus.SUCCESS.value tags = {} tags['cse.cluster.id'] = self.cluster_id tags['cse.version'] = pkg_resources.require( 'container-service-extension')[0].version tags['cse.template'] = template['name'] vapp = VApp(self.client_tenant, href=vapp_resource.get('href')) for k, v in tags.items(): t = vapp.set_metadata('GENERAL', 'READWRITE', k, v) self.client_tenant.get_task_monitor().\ wait_for_status( task=t, timeout=600, poll_frequency=5, fail_on_status=None, expected_target_statuses=[TaskStatus.SUCCESS], callback=None) self.update_task(TaskStatus.RUNNING, self.op, message='Creating master node for %s(%s)' % (self.cluster_name, self.cluster_id)) vapp.reload() add_nodes(1, template, TYPE_MASTER, self.config, self.client_tenant, org, vdc, vapp, self.body, wait=True) self.update_task(TaskStatus.RUNNING, self.op, message='Initializing cluster %s(%s)' % (self.cluster_name, self.cluster_id)) vapp.reload() init_cluster(self.config, vapp, template) master_ip = get_master_ip(self.config, vapp, template) t = vapp.set_metadata('GENERAL', 'READWRITE', 'cse.master.ip', master_ip) self.client_tenant.get_task_monitor().\ wait_for_status( task=t, timeout=600, poll_frequency=5, fail_on_status=None, expected_target_statuses=[TaskStatus.SUCCESS], callback=None) if self.body['node_count'] > 0: self.update_task(TaskStatus.RUNNING, self.op, message='Creating %s node(s) for %s(%s)' % (self.body['node_count'], self.cluster_name, self.cluster_id)) add_nodes(self.body['node_count'], template, TYPE_NODE, self.config, self.client_tenant, org, vdc, vapp, self.body, wait=True) self.update_task(TaskStatus.RUNNING, self.op, message='Adding %s node(s) to %s(%s)' % (self.body['node_count'], self.cluster_name, self.cluster_id)) vapp.reload() join_cluster(self.config, vapp, template) self.update_task(TaskStatus.SUCCESS, self.op, message='Created cluster %s(%s)' % (self.cluster_name, self.cluster_id)) except Exception as e: LOGGER.error(traceback.format_exc()) self.update_task(TaskStatus.ERROR, self.op, error_message=str(e))
def add_nodes(client, num_nodes, node_type, org, vdc, vapp, catalog_name, template, network_name, num_cpu=None, memory_in_mb=None, storage_profile=None, ssh_key=None): specs = [] try: if num_nodes < 1: return None # DEV NOTE: With api v33.0 and onwards, get_catalog operation will fail # for non admin users of an an org which is not hosting the catalog, # even if the catalog is explicitly shared with the org in question. # This happens because for api v 33.0 and onwards, the Org XML no # longer returns the href to catalogs accessible to the org, and typed # queries hide the catalog link from non admin users. # As a workaround, we will use a sys admin client to get the href and # pass it forward. Do note that the catalog itself can still be # accessed by these non admin users, just that they can't find by the # href on their own. sys_admin_client = None try: sys_admin_client = vcd_utils.get_sys_admin_client() org_name = org.get_name() org_resource = sys_admin_client.get_org_by_name(org_name) org_sa = Org(sys_admin_client, resource=org_resource) catalog_item = org_sa.get_catalog_item( catalog_name, template[LocalTemplateKey.CATALOG_ITEM_NAME]) catalog_item_href = catalog_item.Entity.get('href') finally: if sys_admin_client: sys_admin_client.logout() source_vapp = VApp(client, href=catalog_item_href) source_vm = source_vapp.get_all_vms()[0].get('name') if storage_profile is not None: storage_profile = vdc.get_storage_profile(storage_profile) cust_script = None if ssh_key is not None: cust_script = \ "#!/usr/bin/env bash\n" \ "if [ x$1=x\"postcustomization\" ];\n" \ "then\n" \ "mkdir -p /root/.ssh\n" \ f"echo '{ssh_key}' >> /root/.ssh/authorized_keys\n" \ "chmod -R go-rwx /root/.ssh\n" \ "fi" for n in range(num_nodes): name = None while True: name = f"{node_type}-{''.join(random.choices(string.ascii_lowercase + string.digits, k=4))}" # noqa: E501 try: vapp.get_vm(name) except Exception: break spec = { 'source_vm_name': source_vm, 'vapp': source_vapp.resource, 'target_vm_name': name, 'hostname': name, 'password_auto': True, 'network': network_name, 'ip_allocation_mode': 'pool' } if cust_script is not None: spec['cust_script'] = cust_script if storage_profile is not None: spec['storage_profile'] = storage_profile specs.append(spec) task = vapp.add_vms(specs, power_on=False) client.get_task_monitor().wait_for_status(task) vapp.reload() if not num_cpu: num_cpu = template[LocalTemplateKey.CPU] if not memory_in_mb: memory_in_mb = template[LocalTemplateKey.MEMORY] for spec in specs: vm_name = spec['target_vm_name'] vm_resource = vapp.get_vm(vm_name) vm = VM(client, resource=vm_resource) task = vm.modify_cpu(num_cpu) client.get_task_monitor().wait_for_status(task) task = vm.modify_memory(memory_in_mb) client.get_task_monitor().wait_for_status(task) task = vm.power_on() client.get_task_monitor().wait_for_status(task) vapp.reload() if node_type == NodeType.NFS: LOGGER.debug(f"Enabling NFS server on {vm_name}") script_filepath = get_local_script_filepath( template[LocalTemplateKey.NAME], template[LocalTemplateKey.REVISION], ScriptFile.NFSD) script = utils.read_data_file(script_filepath, logger=LOGGER) exec_results = execute_script_in_nodes(vapp=vapp, node_names=[vm_name], script=script) errors = _get_script_execution_errors(exec_results) if errors: raise ScriptExecutionError( f"VM customization script execution failed on node " f"{vm_name}:{errors}") except Exception as e: # TODO: get details of the exception to determine cause of failure, # e.g. not enough resources available. node_list = [entry.get('target_vm_name') for entry in specs] raise NodeCreationError(node_list, str(e)) vapp.reload() return {'task': task, 'specs': specs}
def __init__(self, **kwargs): super(VappVMDisk, self).__init__(**kwargs) self.org = self.get_org() vapp_resource = self.get_resource() self.vapp = VApp(self.client, resource=vapp_resource)
def add_nodes(qty, template, node_type, config, client, org, vdc, vapp, body, wait=True): if qty < 1: return None specs = [] catalog_item = org.get_catalog_item(config['broker']['catalog'], template['catalog_item']) source_vapp = VApp(client, href=catalog_item.Entity.get('href')) source_vm = source_vapp.get_all_vms()[0].get('name') storage_profile = None if 'storage_profile' in body and body['storage_profile'] is not None: storage_profile = vdc.get_storage_profile(body['storage_profile']) cust_script_init = \ """#!/usr/bin/env bash if [ x$1=x"postcustomization" ]; then """ # NOQA cust_script_common = \ """ echo "root:{password}" | chpasswd """.format(password=template['admin_password']) # NOQA if 'ssh_key' in body: cust_script_common += \ """ mkdir -p /root/.ssh echo '{ssh_key}' >> /root/.ssh/authorized_keys chmod -R go-rwx /root/.ssh """.format(ssh_key=body['ssh_key']) # NOQA cust_script_end = \ """ fi """ # NOQA cust_script = cust_script_init cust_script += cust_script_common cust_script += cust_script_end for n in range(qty): name = None while True: name = '%s-%s' % (node_type, ''.join( random.choices(string.ascii_lowercase + string.digits, k=4))) try: vapp.get_vm(name) except Exception: break spec = { 'source_vm_name': source_vm, 'vapp': source_vapp.resource, 'target_vm_name': name, 'hostname': name, 'network': body['network'], 'ip_allocation_mode': 'pool', 'cust_script': cust_script } if storage_profile is not None: spec['storage_profile'] = storage_profile specs.append(spec) if ('cpu_count' in body and body['cpu_count'] is not None) or ( 'memory' in body and body['memory'] is not None): reconfigure_hw = True else: reconfigure_hw = False task = vapp.add_vms(specs, power_on=not reconfigure_hw) if wait: task = client.get_task_monitor().wait_for_status( task=task, timeout=600, poll_frequency=5, fail_on_status=None, expected_target_statuses=[ TaskStatus.SUCCESS, TaskStatus.ABORTED, TaskStatus.ERROR, TaskStatus.CANCELED ], callback=None) if task.get('status').lower() != TaskStatus.SUCCESS.value: task_resource = client.get_resource(task.get('href')) if hasattr(task_resource, 'taskDetails'): raise Exception(task_resource.get('taskDetails')) elif hasattr(task_resource, 'Details'): raise Exception(task_resource.Details.text) else: raise Exception('Couldn\'t add node(s).') if wait and reconfigure_hw: vapp.reload() for spec in specs: vm_resource = vapp.get_vm(spec['target_vm_name']) if 'cpu_count' in body and body['cpu_count'] is not None: vm = VM(client, resource=vm_resource) task = vm.modify_cpu(body['cpu_count']) task = client.get_task_monitor().wait_for_status( task=task, timeout=600, poll_frequency=5, fail_on_status=None, expected_target_statuses=[ TaskStatus.SUCCESS, TaskStatus.ABORTED, TaskStatus.ERROR, TaskStatus.CANCELED ], callback=None) if 'memory' in body and body['memory'] is not None: vm = VM(client, resource=vm_resource) task = vm.modify_memory(body['memory']) task = client.get_task_monitor().wait_for_status( task=task, timeout=600, poll_frequency=5, fail_on_status=None, expected_target_statuses=[ TaskStatus.SUCCESS, TaskStatus.ABORTED, TaskStatus.ERROR, TaskStatus.CANCELED ], callback=None) vm = VM(client, resource=vm_resource) task = vm.power_on() if wait: task = client.get_task_monitor().wait_for_status( task=task, timeout=600, poll_frequency=5, fail_on_status=None, expected_target_statuses=[ TaskStatus.SUCCESS, TaskStatus.ABORTED, TaskStatus.ERROR, TaskStatus.CANCELED ], callback=None) if task.get('status').lower() != TaskStatus.SUCCESS.value: task_resource = client.get_resource(task.get('href')) if hasattr(task_resource, 'taskDetails'): raise Exception(task_resource.get('taskDetails')) elif hasattr(task_resource, 'Details'): raise Exception(task_resource.Details.text) else: raise Exception('Couldn\'t add node(s).') return {'task': task, 'specs': specs}