def set_vnc_extraconfig(content, vm, enabled, ip, port, password): result = dict( changed=False, failed=False, ) # set new values key_prefix = "remotedisplay.vnc." new_values = dict() for key in ['enabled', 'ip', 'port', 'password']: new_values[key_prefix + key] = "" if enabled: new_values[key_prefix + "enabled"] = "true" new_values[key_prefix + "password"] = str(password).strip() new_values[key_prefix + "ip"] = str(ip).strip() new_values[key_prefix + "port"] = str(port).strip() # get current vnc config current_values = get_vnc_extraconfig(vm) # check if any value is changed reconfig_vm = False for key, val in new_values.items(): key = key.replace(key_prefix, "") current_value = current_values.get(key, "") # enabled is not case-sensitive if key == "enabled": current_value = current_value.lower() val = val.lower() if current_value != val: reconfig_vm = True if not reconfig_vm: return result # reconfigure vm spec = vim.vm.ConfigSpec() spec.extraConfig = [] for key, val in new_values.items(): opt = vim.option.OptionValue() opt.key = key opt.value = val spec.extraConfig.append(opt) task = vm.ReconfigVM_Task(spec) try: wait_for_task(task) except TaskError as task_err: result['failed'] = True result['msg'] = to_native(task_err) if task.info.state == 'error': result['failed'] = True result['msg'] = task.info.error.msg else: result['changed'] = True result['instance'] = gather_vm_facts(content, vm) return result
def deploy(self): facts = {} if self.params['power_on']: facts = set_vm_power_state(self.content, self.entity, 'poweredon', force=False) if self.params['wait_for_ip_address']: _facts = wait_for_vm_ip(self.content, self.entity) if not _facts: self.module.fail_json(msg='Waiting for IP address timed out') if not facts: facts.update(gather_vm_facts(self.content, self.entity)) return facts
def get_new_vm_info(self, vm): # to check if vm has been cloned in the destination vc # query for the vm in destination vc # get the host and datastore info # get the power status of the newly cloned vm info = {} vm_obj = find_vm_by_name(content=self.destination_content, vm_name=vm) if vm_obj is None: self.module.fail_json(msg="Newly cloned VM is not found in the destination VCenter") else: vm_facts = gather_vm_facts(self.destination_content, vm_obj) info['vm_name'] = vm info['vcenter'] = self.destination_vcenter info['host'] = vm_facts['hw_esxi_host'] info['datastore'] = vm_facts['hw_datastores'] info['vm_folder'] = vm_facts['hw_folder'] info['power_on'] = vm_facts['hw_power_status'] return info
def deploy(self): facts = {} if self.params['inject_ovf_env']: self.inject_ovf_env() if self.params['power_on']: task = self.entity.PowerOn() if self.params['wait']: wait_for_task(task) if self.params['wait_for_ip_address']: _facts = wait_for_vm_ip(self.content, self.entity) if not _facts: self.module.fail_json( msg='Waiting for IP address timed out') facts.update(_facts) if not facts: facts.update(gather_vm_facts(self.content, self.entity)) return facts
def get_lease(self): datastore, datacenter, resource_pool, network_mappings = self.get_objects() params = { 'diskProvisioning': self.params['disk_provisioning'], } if self.params['name']: params['entityName'] = self.params['name'] if network_mappings: params['networkMapping'] = network_mappings if self.params['deployment_option']: params['deploymentOption'] = self.params['deployment_option'] if self.params['properties']: params['propertyMapping'] = [] for key, value in self.params['properties'].items(): property_mapping = vim.KeyValue() property_mapping.key = key property_mapping.value = str(value) if isinstance(value, bool) else value params['propertyMapping'].append(property_mapping) if self.params['folder']: folder = self.content.searchIndex.FindByInventoryPath(self.params['folder']) if not folder: self.module.fail_json(msg="Unable to find the specified folder %(folder)s" % self.params) else: folder = datacenter.vmFolder spec_params = vim.OvfManager.CreateImportSpecParams(**params) ovf_descriptor = self.get_ovf_descriptor() self.import_spec = self.content.ovfManager.CreateImportSpec( ovf_descriptor, resource_pool, datastore, spec_params ) errors = [to_native(e.msg) for e in getattr(self.import_spec, 'error', [])] if self.params['fail_on_spec_warnings']: errors.extend( (to_native(w.msg) for w in getattr(self.import_spec, 'warning', [])) ) if errors: self.module.fail_json( msg='Failure validating OVF import spec: %s' % '. '.join(errors) ) for warning in getattr(self.import_spec, 'warning', []): self.module.warn('Problem validating OVF import spec: %s' % to_native(warning.msg)) name = self.params.get('name') if not self.params['allow_duplicates']: name = self.import_spec.importSpec.configSpec.name match = find_vm_by_name(self.content, name, folder=folder) if match: self.module.exit_json(instance=gather_vm_facts(self.content, match), changed=False) if self.module.check_mode: self.module.exit_json(changed=True, instance={'hw_name': name}) try: self.lease = resource_pool.ImportVApp( self.import_spec.importSpec, folder ) except vmodl.fault.SystemError as e: self.module.fail_json( msg='Failed to start import: %s' % to_native(e.msg) ) while self.lease.state != vim.HttpNfcLease.State.ready: time.sleep(0.1) self.entity = self.lease.info.entity return self.lease, self.import_spec
def gather_facts(self, vm): return gather_vm_facts(self.content, vm)
def main(): argument_spec = vmware_argument_spec() argument_spec.update( state=dict(type='str', default='present', choices=[ 'present', 'powered-off', 'powered-on', 'reboot-guest', 'restarted', 'shutdown-guest', 'suspended' ]), name=dict(type='str'), name_match=dict(type='str', choices=['first', 'last'], default='first'), uuid=dict(type='str'), moid=dict(type='str'), use_instance_uuid=dict(type='bool', default=False), folder=dict(type='str'), force=dict(type='bool', default=False), scheduled_at=dict(type='str'), schedule_task_name=dict(), schedule_task_description=dict(), schedule_task_enabled=dict(type='bool', default=True), state_change_timeout=dict(type='int', default=0), answer=dict(type='list', elements='dict', options=dict(question=dict(type='str', required=True), response=dict(type='str', required=True)))) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=False, mutually_exclusive=[['name', 'uuid', 'moid'], ['scheduled_at', 'answer']], ) result = dict(changed=False, ) pyv = PyVmomi(module) # Check if the VM exists before continuing vm = pyv.get_vm() if vm: # VM already exists, so set power state scheduled_at = module.params.get('scheduled_at', None) if scheduled_at: if not pyv.is_vcenter(): module.fail_json( msg="Scheduling task requires vCenter, hostname %s " "is an ESXi server." % module.params.get('hostname')) powerstate = { 'present': vim.VirtualMachine.PowerOn, 'powered-off': vim.VirtualMachine.PowerOff, 'powered-on': vim.VirtualMachine.PowerOn, 'reboot-guest': vim.VirtualMachine.RebootGuest, 'restarted': vim.VirtualMachine.Reset, 'shutdown-guest': vim.VirtualMachine.ShutdownGuest, 'suspended': vim.VirtualMachine.Suspend, } dt = '' try: dt = datetime.strptime(scheduled_at, '%d/%m/%Y %H:%M') except ValueError as e: module.fail_json( msg= "Failed to convert given date and time string to Python datetime object," "please specify string in 'dd/mm/yyyy hh:mm' format: %s" % to_native(e)) schedule_task_spec = vim.scheduler.ScheduledTaskSpec() schedule_task_name = module.params[ 'schedule_task_name'] or 'task_%s' % str(randint(10000, 99999)) schedule_task_desc = module.params['schedule_task_description'] if schedule_task_desc is None: schedule_task_desc = 'Schedule task for vm %s for ' \ 'operation %s at %s' % (vm.name, module.params['state'], scheduled_at) schedule_task_spec.name = schedule_task_name schedule_task_spec.description = schedule_task_desc schedule_task_spec.scheduler = vim.scheduler.OnceTaskScheduler() schedule_task_spec.scheduler.runAt = dt schedule_task_spec.action = vim.action.MethodAction() schedule_task_spec.action.name = powerstate[module.params['state']] schedule_task_spec.enabled = module.params['schedule_task_enabled'] try: pyv.content.scheduledTaskManager.CreateScheduledTask( vm, schedule_task_spec) # As this is async task, we create scheduled task and mark state to changed. module.exit_json(changed=True) except vim.fault.InvalidName as e: module.fail_json( msg="Failed to create scheduled task %s for %s : %s" % (module.params.get('state'), vm.name, to_native(e.msg))) except vim.fault.DuplicateName as e: module.exit_json(changed=False, details=to_native(e.msg)) except vmodl.fault.InvalidArgument as e: module.fail_json( msg="Failed to create scheduled task %s as specifications " "given are invalid: %s" % (module.params.get('state'), to_native(e.msg))) else: # Check if a virtual machine is locked by a question if check_answer_question_status(vm) and module.params['answer']: try: responses = make_answer_response(vm, module.params['answer']) answer_question(vm, responses) except Exception as e: module.fail_json(msg="%s" % e) # Wait until a virtual machine is unlocked while True: if check_answer_question_status(vm) is False: break result['changed'] = True result['instance'] = gather_vm_facts(pyv.content, vm) else: result = set_vm_power_state( pyv.content, vm, module.params['state'], module.params['force'], module.params['state_change_timeout'], module.params['answer']) result['answer'] = module.params['answer'] else: id = module.params.get('uuid') or module.params.get( 'moid') or module.params.get('name') module.fail_json( msg= "Unable to set power state for non-existing virtual machine : '%s'" % id) if result.get('failed') is True: module.fail_json(**result) module.exit_json(**result)