Exemplo n.º 1
0
 def enter_maintenance_mode(host, si):
     try:
         task = host['obj'].EnterMaintenanceMode_Task(
             timeout=300, evacuatePoweredOffVms=False)
         VMwareUtils.wait_for_task(task, si)
     except Exception as e:
         LOG.exception("Caught exception %s" % e)
Exemplo n.º 2
0
 def configure_network_infrastructure(self):
     """
     Wrapper method to configure the DVS and PGs
     """
     cluster = VMwareUtils.get_cluster(self.content, self.dc['hostFolder'],
                                       self.vc['cluster_moid'])
     all_hosts = VMwareUtils.get_all_hosts(self.content, cluster)
     self.network_adapter.configure_dvs_portgroup(self.input_json,
                                                  all_hosts)
 def __init__(self, input_json):
     self.input_json = input_json
     vc = self.input_json.get('vcenter_configuration')
     self.si = VMwareUtils.get_vcenter_session(vc['ip_address'], vc['port'],
                                               vc['username'],
                                               vc['password'])
     self.content = self.si.RetrieveContent()
     self.dc = VMwareUtils.get_data_center(self.content, vc['datacenter'])
     self.vc = vc
Exemplo n.º 4
0
 def destroy_failed_commissioned_vapps(host, si):
     vms = host['obj'].vm
     LOG.info("Destroying OVSvApps to enter maintenance mode")
     for vm in vms:
         if const.OVS_VAPP_IDENTIFIER in vm.config.annotation:
             if vm.runtime.powerState != 'poweredOff':
                 shutdown_task = vm.PowerOff()
                 VMwareUtils.wait_for_task(shutdown_task, si)
                 destroy_task = vm.Destroy()
                 VMwareUtils.wait_for_task(destroy_task, si)
Exemplo n.º 5
0
 def move_host_back_to_cluster(si, host, cluster, prep_folder, err=False):
     if err:
         OVSvAppUtil.destroy_failed_commissioned_vapps(host, si)
         OVSvAppUtil.enter_maintenance_mode(host, si)
     prep_folder_name = prep_folder.name
     move_task = cluster['obj'].MoveInto_Task([host['obj']])
     VMwareUtils.wait_for_task(move_task, si)
     LOG.debug("Successfully moved host '%s' to cluster '%s'" %
               (host['name'], cluster['name']))
     if not prep_folder.childEntity:
         del_task = prep_folder.Destroy_Task()
         VMwareUtils.wait_for_task(del_task, si)
         LOG.debug("Deleted folder '%s'" % prep_folder_name)
Exemplo n.º 6
0
 def move_hosts_in_to_folder(si, hosts):
     host_map = {}
     for host in hosts:
         folder = host['folder']
         host_map.setdefault(folder, [])
         host_map[folder].append(host)
     for folder in host_map.keys():
         host_objs = [host['obj'] for host in host_map.get(folder)]
         task = folder.MoveIntoFolder_Task(host_objs)
         VMwareUtils.wait_for_task(task, si)
         LOG.debug("Successfully moved the hosts in folder '%s'" %
                   folder.name)
     for host in hosts:
         task = host['obj'].ExitMaintenanceMode_Task(timeout=1800)
         VMwareUtils.wait_for_task(task, si)
         LOG.debug("Host '%s' is out of maintenance mode" % host['name'])
Exemplo n.º 7
0
 def get_ovsvapps(content, vm_folder, cluster):
     ovsvapps = dict()
     all_vms = VMwareUtils.get_all_vms(content, cluster['resourcePool'])
     for vm in all_vms:
         if vm['name'].startswith(const.OVS_VAPP_PREFIX):
             if const.OVS_VAPP_IDENTIFIER in vm['config.annotation']:
                 ovsvapps[vm['runtime.host']] = vm
     return ovsvapps
Exemplo n.º 8
0
def move_host_back_to_cluster(inputs):
    host_name = inputs.get('host_name')
    err = inputs.get('status') == constants.HOST_COMM_FAILURE
    si = VMwareUtils.get_vcenter_session(inputs['vcenter_host'],
                                         inputs['vcenter_https_port'],
                                         inputs['vcenter_username'],
                                         inputs['vcenter_password'])

    host_view = VMwareUtils.get_view_ref(si.content, si.content.rootFolder,
                                         [vim.HostSystem])
    host_prop = ['name', 'vm']
    host_refs = VMwareUtils.collect_properties(si.content, host_view,
                                               vim.HostSystem, host_prop, True)
    host = None
    for host_ref in host_refs:
        if host_ref['name'] == host_name:
            host = host_ref
            break
    if not host:
        raise OVSvAppException(
            "Couldn't find the commissioned host '{}'".format(host_name))
    datacenter = OVSvAppUtil.get_host_parent(host['obj'], vim.Datacenter)
    prep_folder = OVSvAppUtil.get_host_parent(host['obj'], vim.Folder)
    if prep_folder.name != 'host':
        cluster_id = prep_folder.name
        cluster = VMwareUtils.get_cluster(si.content, datacenter.hostFolder,
                                          cluster_id)
        if not cluster:
            raise OVSvAppException(
                _("Couldn't find the Cluster from the prep "
                  "folder name !"))
        OVSvAppUtil.move_host_back_to_cluster(si, host, cluster, prep_folder,
                                              err)
        if not err:
            vm_obj = get_ovsvapp_from_host(host)
            OVSvAppUtil.disable_ha_on_ovsvapp(si, vm_obj, cluster, host)
 def invoke_ovsvapp_installer(self):
     try:
         ValidateInputs(self.input_json).validate_inputs()
         is_new_hosts = False
         ovsvapp_result = dict()
         cluster = VMwareUtils.get_cluster(self.content,
                                           self.dc['hostFolder'],
                                           self.vc['cluster_moid'])
         vapps = OVSvAppUtil.get_ovsvapps(self.content, self.dc['vmFolder'],
                                          cluster)
         vapp_hosts = vapps.keys()
         if not (len(cluster['host']) == len(vapp_hosts)):
             if vapp_hosts:
                 is_new_hosts = True
                 LOG.info("Found existing OVSvApps in cluster. OVSvApp "
                          "installer will commission the new hosts.")
             else:
                 LOG.info("No existing OVSvApps found in cluster. OVSvApp "
                          "installer will proceed for fresh activation.")
             session = dict()
             session['si'] = self.si
             session['content'] = self.content
             ovsvapp_result = VappInstaller(self.input_json).run_installer(
                 session, self.dc, cluster, vapp_hosts, is_new_hosts)
             if not ovsvapp_result:
                 raise exception.OVSvAppException(
                     _("Couldn't find any valid host for OVSvApp "
                       "installation"))
         return ovsvapp_result
     except exception.OVSvAppException as ex:
         if not is_new_hosts:
             LOG.info("Invoking cleanup script due to the error that "
                      "occurred previously.")
             Cleanup(self.input_json).unimport_cluster()
         raise ex
     except exception.OVSvAppValidationError as ev:
         LOG.exception(ev)
         raise ev
Exemplo n.º 10
0
 def get_active_hosts(content, vm_folder, vapp_hosts, cluster):
     """
     Filter out active hosts from clusters
     @return: List of active hosts.
     """
     hosts = []
     all_hosts = VMwareUtils.get_all_hosts(content, cluster)
     host_power_state = {'poweredOn': 'poweredOn',
                         'poweredOff': 'poweredOff',
                         'standBy': 'standBy',
                         'unknown': 'unknown',
                         'maintenanceMode': 'Maintenance Mode'}
     for host in all_hosts:
         if host['obj'] in vapp_hosts:
             continue
         host['cluster'] = cluster
         host_name = host['name']
         power_state = host['summary.runtime.powerState']
         maintenance_mode = host['summary.runtime.inMaintenanceMode']
         host_api_version = host['config.product.apiVersion']
         is_new_host = False
         if vapp_hosts:
             is_new_host = True
             if not maintenance_mode:
                 LOG.error("Cannot add host '%s' which is not in "
                           "maintenance mode" % host['name'])
                 continue
             else:
                 maintenance_mode = False
         if power_state == 'poweredOn' and not maintenance_mode:
             if host_api_version >= const.MIN_SUPPORTED_VERSION:
                 shared_storage = VMwareUtils.get_shared_datastore(
                     host, is_new_host)
                 if shared_storage:
                     host['shared_storage'] = shared_storage
                     hosts.append(host)
                 else:
                     msg = get_status(322, status='failed',
                                      host=host['name'])
                     LOG.error(msg)
                     raise exception.OVSvAppException(msg)
             else:
                 LOG.error("Esxi host '%s' version is '%s' which is"
                           " not supported. Minimum supported "
                           "version is '%s' . Excluding this host "
                           "from OVSvApp installation" %
                           (host_name, host_api_version,
                            const.MIN_SUPPORTED_VERSION))
                 msg = get_status(309, status='failed',
                                  host=host['name'])
                 raise exception.OVSvAppException(msg)
         else:
             if power_state == 'poweredOn':
                 if maintenance_mode:
                     state = host_power_state['maintenanceMode']
             else:
                 state = host_power_state[power_state]
             LOG.warn("Esxi host '%s' is in '%s' state. Excluding this "
                      "host from OVSvApp installation" %
                      (host_name, state))
     return hosts
Exemplo n.º 11
0
 def disable_ha_on_ovsvapp(si, vm, cluster, host):
     try:
         ovs_vm_name = vm.name
         cluster_spec_ex = vim.cluster.ConfigSpecEx()
         if cluster['configuration.dasConfig.enabled']:
             settings = []
             config_spec = vim.cluster.DasVmConfigSpec()
             config_spec.operation = \
                 vim.option.ArrayUpdateSpec.Operation.add
             config_info = vim.cluster.DasVmConfigInfo()
             config_info.key = vm
             config_info.powerOffOnIsolation = False
             config_info.restartPriority = \
                 vim.cluster.DasVmConfigInfo.Priority.disabled
             vm_settings = vim.cluster.DasVmSettings()
             vm_settings.restartPriority = \
                 vim.cluster.DasVmSettings.RestartPriority.disabled
             monitor = vim.cluster.VmToolsMonitoringSettings()
             monitor.vmMonitoring = \
                 vim.cluster.DasConfigInfo.VmMonitoringState. \
                 vmMonitoringDisabled
             monitor.clusterSettings = False
             vm_settings.vmToolsMonitoringSettings = monitor
             config_info.dasSettings = vm_settings
             config_spec.info = config_info
             settings.append(config_spec)
             cluster_spec_ex.dasVmConfigSpec = settings
         else:
             LOG.warn("HA is not enabled on cluster %s . Couldn't disable "
                      "HA for %s" % (cluster['name'], ovs_vm_name))
         if cluster['configuration.drsConfig.enabled']:
             drs_config_spec = vim.cluster.DrsVmConfigSpec()
             drs_config_spec.operation = \
                 vim.option.ArrayUpdateSpec.Operation.add
             drs_vm_config_info = vim.cluster.DrsVmConfigInfo()
             drs_vm_config_info.key = vm
             drs_vm_config_info.enabled = False
             drs_vm_config_info.behavior = \
                 vim.cluster.DrsConfigInfo.DrsBehavior.manual
             drs_config_spec.info = drs_vm_config_info
             cluster_spec_ex.drsVmConfigSpec = [drs_config_spec]
             host_group_spec = vim.cluster.GroupSpec()
             host_group = vim.cluster.HostGroup()
             vm_group_spec = vim.cluster.GroupSpec()
             vm_group = vim.cluster.VmGroup()
             host_group_spec.operation = 'add'
             host_group.host = [host['obj']]
             vm_group_spec.operation = 'add'
             vm_group.vm = [vm]
             host_group.name = host['name']
             host_group_spec.info = host_group
             vm_group.name = ovs_vm_name
             vm_group_spec.info = vm_group
             cluster_spec_ex.groupSpec = [host_group_spec, vm_group_spec]
             rules_spec = vim.cluster.RuleSpec()
             rules_spec.operation = 'add'
             host_vm_info = vim.cluster.VmHostRuleInfo()
             host_vm_info.affineHostGroupName = host['name']
             host_vm_info.vmGroupName = ovs_vm_name
             host_vm_info.enabled = True
             host_vm_info.mandatory = True
             host_vm_info.name = ovs_vm_name
             rules_spec.info = host_vm_info
             cluster_spec_ex.rulesSpec = [rules_spec]
         else:
             LOG.warn("DRS is not enabled on cluster %s . Couldn't"
                      " disable DRS for %s" % (cluster['name'],
                                               ovs_vm_name))
         LOG.info("Disabling HA & DRS for %s" % ovs_vm_name)
         task = cluster['obj'].ReconfigureComputeResource_Task(
             cluster_spec_ex, True)
         VMwareUtils.wait_for_task(task, si)
         LOG.info("Successfully disabled HA & DRS for %s" % ovs_vm_name)
     except vmodl.MethodFault as e:
         # Vmware related exception
         msg = e.msg
         if(msg.startswith("The setting of vmConfig is invalid")):
             LOG.warn("Couldn't disable HA & DRS for %s" % ovs_vm_name)
             LOG.warn("Please turn off and turn on HA & DRS from Cluster "
                      "settings.")
         else:
             LOG.error("Caught VMware API fault: %s" % e.msg)
             return
     except Exception as e:
         # Unknown Exception
         LOG.error("Caught exception: %s" % e)
         return