def vm_run(prefix): engine = prefix.virt_env.engine_vm() api = engine.get_api() host_names = [h.name() for h in prefix.virt_env.host_vms()] start_params = params.Action( use_cloud_init=True, vm=params.VM( placement_policy=params.VmPlacementPolicy( host=params.Host( name=sorted(host_names)[0] ), ), initialization=params.Initialization( domain=params.Domain( name='lago.example.com' ), cloud_init=params.CloudInit( host=params.Host( address='VM0' ), ), ), ), ) api.vms.get(VM0_NAME).start(start_params) testlib.assert_true_within_long( lambda: api.vms.get(VM0_NAME).status.state == 'up', )
def vm_run(prefix): api = prefix.virt_env.engine_vm().get_api() host_names = [h.name() for h in prefix.virt_env.host_vms()] start_params = params.Action( use_cloud_init=True, vm=params.VM( placement_policy=params.VmPlacementPolicy( host=params.Host( name=sorted(host_names)[0] ), ), initialization=params.Initialization( domain=params.Domain( name='lago.example.com' ), cloud_init=params.CloudInit( host=params.Host( address='VM0' ), users=params.Users( active=True, user=[params.User( user_name='root', password='******' )] ), network_configuration=params.NetworkConfiguration( nics=params.Nics( nic=[params.NIC( name='eth0', boot_protocol='STATIC', on_boot='True', network=params.Network( ip=params.IP( address='192.168.1.2.', netmask='255.255.255.0', gateway='192.168.1.1', ), ), )] ), ), ), ), ), ) api.vms.get(VM0_NAME).start(start_params) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'up', )
def run_vms(prefix): engine = prefix.virt_env.engine_vm() api = engine.get_api() vm_ip = '.'.join(engine.ip().split('.')[0:3] + ['199']) vm_gw = '.'.join(engine.ip().split('.')[0:3] + ['1']) host_names = [h.name() for h in prefix.virt_env.host_vms()] start_params = params.Action( use_cloud_init=True, vm=params.VM( placement_policy=params.VmPlacementPolicy( host=params.Host(name=sorted(host_names)[0]), ), initialization=params.Initialization( domain=params.Domain(name='lago.example.com'), cloud_init=params.CloudInit( host=params.Host(address='VM0'), users=params.Users(active=True, user=[ params.User(user_name='root', password='******') ]), network_configuration=params.NetworkConfiguration( nics=params.Nics(nic=[ params.NIC( name='eth0', boot_protocol='STATIC', on_boot=True, network=params.Network(ip=params.IP( address=vm_ip, netmask='255.255.255.0', gateway=vm_gw, ), ), ) ]), ), ), ), ), ) api.vms.get(VM0_NAME).start(start_params) api.vms.get(BACKUP_VM_NAME).start(start_params) start_params.vm.initialization.cloud_init = params.CloudInit( host=params.Host(address='VM2'), ) api.vms.get(VM2_NAME).start(start_params) testlib.assert_true_within_long( lambda: api.vms.get(VM0_NAME).status.state == 'up' and api.vms.get( BACKUP_VM_NAME).status.state == 'up', )
def add_iscsi_storage_domain(prefix): api = prefix.virt_env.engine_vm().get_api() # Find LUN GUIDs ret = prefix.virt_env.get_vm(SD_ISCSI_HOST_NAME).ssh( ['multipath', '-ll', '-v1', '|sort']) nt.assert_equals(ret.code, 0) lun_guids = ret.out.splitlines()[:SD_ISCSI_NR_LUNS] p = params.StorageDomain( name=SD_ISCSI_NAME, data_center=params.DataCenter(name=DC_NAME, ), type_='data', storage_format='v3', host=params.Host(name=api.hosts.list().pop().name, ), storage=params.Storage( type_='iscsi', volume_group=params.VolumeGroup(logical_unit=[ params.LogicalUnit( id=lun_id, address=_get_host_ip( prefix, SD_ISCSI_HOST_NAME, ), port=SD_ISCSI_PORT, target=SD_ISCSI_TARGET, ) for lun_id in lun_guids ]), ), ) _add_storage_domain(api, p)
def deploy_template(self, template, *args, **kwargs): self.logger.debug(' Deploying RHEV template %s to VM %s' % (template, kwargs["vm_name"])) timeout = kwargs.pop('timeout', 900) power_on = kwargs.pop('power_on', True) vm_kwargs = { 'name': kwargs['vm_name'], 'cluster': self.api.clusters.get(kwargs['cluster']), 'template': self.api.templates.get(template) } if 'placement_policy_host' in kwargs and 'placement_policy_affinity' in kwargs: host = params.Host(name=kwargs['placement_policy_host']) policy = params.VmPlacementPolicy(host=host, affinity=kwargs['placement_policy_affinity']) vm_kwargs['placement_policy'] = policy vm = params.VM(**vm_kwargs) self.api.vms.add(vm) self.wait_vm_stopped(kwargs['vm_name'], num_sec=timeout) if power_on: version = self.api.get_product_info().get_full_version() cfme_template = any( template.startswith(pfx) for pfx in ["cfme-55", "s_tpl", "sprout_template"]) if cfme_template and version.startswith("3.4"): action = params.Action(vm=params.VM(initialization=params.Initialization( cloud_init=params.CloudInit(users=params.Users( user=[params.User(user_name="root", password="******")]))))) ciargs = {} ciargs['initialization'] = action self.start_vm(vm_name=kwargs['vm_name'], **ciargs) else: self.start_vm(vm_name=kwargs['vm_name']) return kwargs['vm_name']
def add(self, host_address, host_password, cluster_name, timeout=300): """ Register a host into specified cluster. """ end_time = time.time() + timeout if not self.name: self.name = 'my_host' clusters = self.api.clusters.get(cluster_name) host_params = param.Host(name=self.name, address=host_address, cluster=clusters, root_password=host_password) try: logging.info('Registing a host %s into cluster %s' % (self.name, cluster_name)) if self.api.hosts.add(host_params): logging.info('Waiting for host to reach the <Up> status ...') host_up = False while time.time() < end_time: if self.state() == 'up': host_up = True break time.sleep(1) if not host_up: raise WaitHostStateTimeoutError("UP", self.state()) logging.info('Host was installed successfully') except Exception as e: logging.error('Failed to install host:\n%s' % str(e))
def deploy_template(self, template, *args, **kwargs): self.logger.debug(' Deploying RHEV template %s to VM %s' % (template, kwargs["vm_name"])) timeout = kwargs.pop('timeout', 900) power_on = kwargs.pop('power_on', True) vm_kwargs = { 'name': kwargs['vm_name'], 'cluster': self.api.clusters.get(kwargs['cluster']), 'template': self.api.templates.get(template) } if 'placement_policy_host' in kwargs and 'placement_policy_affinity' in kwargs: host = params.Host(name=kwargs['placement_policy_host']) policy = params.VmPlacementPolicy( host=host, affinity=kwargs['placement_policy_affinity']) vm_kwargs['placement_policy'] = policy if 'cpu' in kwargs: vm_kwargs['cpu'] = params.CPU(topology=params.CpuTopology( cores=int(kwargs['cpu']))) if 'ram' in kwargs: vm_kwargs['memory'] = int(kwargs['ram']) * 1024 * 1024 # MB self.api.vms.add(params.VM(**vm_kwargs)) self.wait_vm_stopped(kwargs['vm_name'], num_sec=timeout) if power_on: self.start_vm(kwargs['vm_name']) return kwargs['vm_name']
def vm_migrate(api): migrate_params = params.Action(host=params.Host(name=HOSTS[1], ), ) api.vms.get(VM1_NAME).migrate(migrate_params) testlib.assert_true_within( func=lambda: api.vms.get(VM1_NAME).status.state == 'up', timeout=SHORT_TIMEOUT, )
def add_iscsi_storage_domain(prefix): api = prefix.virt_env.engine_vm().get_api() # Find LUN GUIDs ret = prefix.virt_env.get_vm('storage-iscsi').ssh(['multipath', '-ll'], ) nt.assert_equals(ret.code, 0) lun_guids = [ line.split()[0] for line in ret.out.split('\n') if line.find('LIO-ORG') != -1 ] lun_guids = lun_guids[:SD_ISCSI_NR_LUNS] p = params.StorageDomain( name=SD_ISCSI_NAME, data_center=params.DataCenter(name=DC_NAME, ), type_='data', storage_format='v3', host=params.Host(name=api.hosts.list().pop().name, ), storage=params.Storage( type_='iscsi', volume_group=params.VolumeGroup(logical_unit=[ params.LogicalUnit( id=lun_id, address=SD_ISCSI_ADDRESS, port=SD_ISCSI_PORT, target=SD_ISCSI_TARGET, ) for lun_id in lun_guids ]), ), ) _add_storage_domain(api, p)
def vm_migrate(prefix): api = prefix.virt_env.engine_vm().get_api() host_names = [h.name() for h in prefix.virt_env.host_vms()] migrate_params = params.Action( host=params.Host(name=sorted(host_names)[2]), ) api.vms.get(VM0_NAME).migrate(migrate_params) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'up', )
def vm_run(api): start_params = params.Action(vm=params.VM( placement_policy=params.VmPlacementPolicy(host=params.Host( name=HOSTS[0], ), ), ), ) api.vms.get(VM1_NAME).start(start_params) testlib.assert_true_within( func=lambda: api.vms.get(VM1_NAME).status.state == 'up', timeout=SHORT_TIMEOUT, )
def he_vm_migrate(prefix): api = prefix.virt_env.engine_vm().get_api() host_names = [h.name() for h in prefix.virt_env.host_vms()] migrate_params = params.Action( host=params.Host(name=sorted(host_names)[1]), ) api.vms.get(HE_VM_NAME).migrate(migrate_params) host = api.hosts.get(name=sorted(host_names)[1]) testlib.assert_true_within_long( lambda: api.vms.get(HE_VM_NAME).host.id == host.id, )
def _add_host(vm): p = params.Host( name=vm.name(), address=vm.ip(), cluster=params.Cluster(name='Default', ), root_password=vm.root_password(), override_iptables=True, ) return api.hosts.add(p)
def vm_run(prefix): api = prefix.virt_env.engine_vm().get_api() host_names = [h.name() for h in prefix.virt_env.host_vms()] start_params = params.Action(vm=params.VM( placement_policy=params.VmPlacementPolicy( host=params.Host(name=sorted(host_names)[1]), ), ), ) api.vms.get(VM0_NAME).start(start_params) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'up', )
def add_iso_storage_domain(api): p = params.StorageDomain( name=SD_ISO_NAME, data_center=params.DataCenter(name=DC_NAME, ), type_='iso', host=params.Host(name=api.hosts.list().pop().name, ), storage=params.Storage( type_='nfs', address=SD_ISO_ADDRESS, path=SD_ISO_PATH, ), ) _add_storage_domain(api, p)
def add_templates_storage_domain(api): p = params.StorageDomain( name=SD_TEMPLATES_NAME, data_center=params.DataCenter(name=DC_NAME, ), type_='data', storage_format='v3', host=params.Host(name=api.hosts.list().pop().name), storage=params.Storage( type_='nfs', address=SD_TEMPLATES_ADDRESS, path=SD_TEMPLATES_PATH, ), ) _add_storage_domain(api, p)
def createHost(): global controller logging.debug("Adding the local host") try: controller.CONF["API_OBJECT"].hosts.add( params.Host(name=LOCAL_HOST, address=controller.CONF["HOST_FQDN"], reboot_after_installation=False, cluster=controller.CONF["API_OBJECT"].clusters.get( LOCAL_CLUSTER), root_password=controller.CONF["SUPERUSER_PASS"])) except: logging.error(traceback.format_exc()) raise Exception(ERROR_CREATE_LOCAL_HOST)
def add_iso_storage_domain(prefix): api = prefix.virt_env.engine_vm().get_api() p = params.StorageDomain( name=SD_ISO_NAME, data_center=params.DataCenter(name=DC_NAME, ), type_='iso', host=params.Host(name=api.hosts.list().pop().name, ), storage=params.Storage( type_='nfs', address=_get_host_ip(prefix, SD_ISO_HOST_NAME), path=SD_ISO_PATH, ), ) _add_storage_domain(api, p)
def process_cluster(clusid): """Processes cluster with specified cluster ID""" query = "cluster = %s" % api.clusters.get(id=clusid).name for vm in listvms(api, query): if vm.cluster.id == clusid: if vm.tags.get("elas_manage"): for tag in vm.tags.list(): if tag.name[0:8] == "cluster_": if vm.placement_policy.affinity != "migratable": if options.verbosity > 1: print "VM %s pinning removed" % vm.name vm.placement_policy.affinity = "migratable" vm.placement_policy.host = params.Host() vm.update() return
def vm_run_once(oe_api, vm_name, vm_password, vm_nic_info): """ vm run once with cloud-init """ try: if vm_name not in [vm_online.name for vm_online in oe_api.vms.list()]: print("[E] VM not found: {0}".format(vm_name)) return 1 elif vm_nic_info is None: print('[E] VM nic info is needed: "name_of_nic, ip_address, net_mask, gateway"') return 2 elif oe_api.vms.get(vm_name).status.state == 'down': print('[I] Starting VM with cloud-init.') p_host = params.Host(address="{0}".format(vm_name)) p_users = params.Users(user=[params.User(user_name="root", password=vm_password)]) vm_nic = [nic for nic in vm_nic_info.split(', ')] if len(vm_nic) != 4: print('[E] VM nic info need 4 args: "name_of_nic, ip_address, net_mask, gateway"') return 3 p_nic = params.Nics(nic=[params.NIC(name=vm_nic[0], boot_protocol="STATIC", on_boot=True, network=params.Network(ip=params.IP(address=vm_nic[1], netmask=vm_nic[2], gateway=vm_nic[3])))]) p_network = params.NetworkConfiguration(nics=p_nic) p_cloud_init = params.CloudInit(host=p_host, users=p_users, regenerate_ssh_keys=True, network_configuration=p_network) p_initialization = params.Initialization(cloud_init=p_cloud_init) vm_params = params.VM(initialization=p_initialization) vm_action = params.Action(vm=vm_params, use_cloud_init=True) oe_api.vms.get(vm_name).start(vm_action) print('[I] Waiting for VM to reach Up status... ', end='') while oe_api.vms.get(vm_name).status.state != 'up': print('.', end='') sleep(1) print('VM {0} is up!'.format(vm_name)) else: print('[E] VM already up.') except Exception as err: print('[E] Failed to Start VM with cloud-init: {0}'.format(str(err)))
def createHost(api, name, ipAddress): print 'Creating host' host = api.hosts.get(name) if host is None: host = api.hosts.add( params.Host(name=name, cluster=api.clusters.get(name='Default'), address=ipAddress, root_password='******')) print 'Host created' else: print 'Host already exists' while not (host.get_status().state == 'up'): print 'host status: %s' % host.get_status().state time.sleep(1) host = api.hosts.get(id=host.id) print 'host status: %s' % host.get_status().state return host
def migrate_vm(prefix, api): def current_running_host(): host_id = api.vms.get(VM0_NAME).host.id return api.hosts.get(id=host_id).name src_host = current_running_host() dst_host = sorted([ h.name() for h in prefix.virt_env.host_vms() if h.name() != src_host ])[0] migrate_params = params.Action(host=params.Host(name=dst_host), ) nt.assert_true(api.vms.get(VM0_NAME).migrate(migrate_params)) testlib.assert_true_within_short( lambda: api.vms.get(VM0_NAME).status.state == 'up') nt.assert_equals(current_running_host(), dst_host)
def add_generic_nfs_storage_domain(prefix, sd_nfs_name, nfs_host_name, mount_path, sd_format='v3', sd_type='data'): api = prefix.virt_env.engine_vm().get_api() p = params.StorageDomain( name=sd_nfs_name, data_center=params.DataCenter( name=DC_NAME, ), type_=sd_type, storage_format=sd_format, host=params.Host( name=api.hosts.list().pop().name, ), storage=params.Storage( type_='nfs', address=_get_host_ip(prefix, nfs_host_name), path=mount_path, ), ) _add_storage_domain(api, p)
def migrate_VM(self, vmname, vmhost): VM = self.get_VM(vmname) HOST = self.get_Host_byid(VM.host.id) if str(HOST.name) != vmhost: try: VM.migrate( action=params.Action( host=params.Host( name=vmhost, ) ), ) setChanged() setMsg("VM migrated to " + vmhost) except Exception as e: setMsg("Failed to set startup host.") setMsg(str(e)) setFailed() return False return True
def add(self, host_address, host_password, cluster_name): """ Register a host into specified cluster. """ if not self.name: self.name = 'my_host' clusters = self.api.clusters.get(cluster_name) host_params = param.Host(name=self.name, address=host_address, cluster=clusters, root_password=host_password) try: logging.info('Registing a host %s into cluster %s' % (self.name, cluster_name)) if self.api.hosts.add(host_params): logging.info('Waiting for host to reach the <Up> status ...') while self.state() != 'up': time.sleep(1) else: logging.info('Host is up') logging.info('Host was installed successfully') except Exception, e: logging.error('Failed to install host:\n%s' % str(e))
def start_vm(vm_name, host_ip): try: api = API(url="https://engine167.eayun.com", username="******", password="******", ca_file="ca.crt") vm = api.vms.get(name=vm_name) try: vm.start( action = params.Action( vm = params.VM( host = params.Host(address = host_ip) ) ) ) print "Started '%s'." % vm.get_name() except Exception as ex: print "Unable to start '%s': %s" % (vm.get_name(), ex) api.disconnect() except Exception as ex: print "Unexpected error: %s" % ex
8. 9. VERSION = params.Version(major='3', minor='0') 10. ##change to fit your host 11. URL = input(' enter ip address, example: 'https://192.168.1.1:8443/api') 12. USERNAME = input('Enter username, example: [email protected] ') 13. PASSWORD = input('Enter your password) 14. 15. DC_NAME = input('Enter name of your datacenter') 16. CLUSTER_NAME = input('cluster name') 17. HOST_NAME = input('input host name') 18. STORAGE_NAME = input('input storage name') 19. VM_NAME = input('enter VM name') 20. 21. api = API(url=URL, username=USERNAME, password=PASSWORD)30 22. try: 23. if api.hosts.add(params.Host(name=HOST_NAME, address=HOST_ADDRESS, cluster=api.clusters.get(CLUSTER_NAME), root_password=ROOT_PASSWORD)): 24. 25. print 'Host was installed successfully' 26. print 'Waiting for host to reach the Up status' 27. while api.hosts.get(HOST_NAME).status.state != 'up': 28. sleep(1) 29. print "Host is up" 30. except Exception as e: 31. print 'Failed to install Host:\n%s' % str(e) 32. 33. 34. def connectToHost(host,host_user,host_pw): 35. apiurl="https://"+host+"/api" #change for your host 36. #insecure -> skips SSL check 37. api = API(url=apiurl,username=host_user,password=host_pw,insecure=True) 38. return api
def process_cluster(cluster): """Processes cluster""" # Emtpy vars for further processing hosts_in_cluster = [] vms_in_cluster = [] tags_in_cluster = [] tags_vm = {} tags_with_more_than_one = [] # Get host list from this cluster query = "cluster = %s and status = up" % api.clusters.get( id=cluster.id).name for host in listhosts(api, query): if host.cluster.id == cluster.id: if host.status.state == "up": hosts_in_cluster.append(host.id) if options.verbosity > 2: print "\nProcessing cluster %s..." % cluster.name print "##############################################" #Create the empty set of vars that we'll populate later for tag in api.tags.list(): tags_vm[tag.name] = [] #Populate the list of tags and VM's query = "cluster = %s and status = up and tag = elas_manage" % api.clusters.get( id=cluster.id).name for vm in listvms(api, query): if vm.cluster.id == cluster.id: if vm.status.state == "up": if not vm.tags.get("elas_manage"): if options.verbosity > 3: print "VM %s is discarded because it has no tag elas_manage" % vm.name else: # Add the VM Id to the list of VMS to manage in this cluster vms_in_cluster.append(vm.id) for tag in vm.tags.list(): if tag.name[0:8] == "cluster_": if options.verbosity > 3: print "VM %s in cluster %s has tag %s" % ( vm.name, cluster.name, tag.name) # Put the TAG in the list of used for this cluster and put the VM to the ones with this tag tags_in_cluster.append(tag.id) tags_vm[tag.name].append(vm.name) #Construct a list of tags with more than one vm in state == up to process for tag in api.tags.list(): if len(tags_vm[tag.name]) > 1: if tag.name[0:8] == "cluster_": tags_with_more_than_one.append(tag.name) if options.verbosity > 3: print "\nTAGS/VM organization: %s" % tags_vm print "TAGS with more than one vm: %s" % tags_with_more_than_one tags_to_manage = [] for etiqueta in tags_with_more_than_one: if len(tags_vm[etiqueta]) > len(hosts_in_cluster): if options.verbosity > 3: print "\nMore VM's with tag than available hosts for tag %s, will do as much as I can..." % etiqueta else: if options.verbosity > 3: print "\nContinuing for tag %s" % etiqueta if etiqueta[0:8] == "cluster_": tags_to_manage.append(etiqueta) #Removing duplicates tags = sorted(set(tags_in_cluster)) tags_in_cluster = tags if options.verbosity > 3: print "Hosts in cluster:" print hosts_in_cluster print "Vm's in cluster" print vms_in_cluster print "Tags in cluster" print tags_in_cluster for etiqueta in tags_to_manage: tags_vm_used = set([]) if options.verbosity > 3: print "Managing tag %s" % etiqueta for vm in tags_vm[etiqueta]: if options.verbosity > 4: print "Processing vm %s for tag %s at host %s" % ( vm, etiqueta, api.hosts.get(id=api.vms.get(name=vm).host.id).name) #Set target as actual running host target = api.vms.get(name=vm).host.id if api.vms.get(name=vm).host.id not in tags_vm_used: #Host not yet used, accept it directly tags_vm_used.add(target) else: # Host was in use, searching for new target for host in hosts_in_cluster: if host in tags_vm_used: if options.verbosity > 4: print "Host %s used, skipping" % host else: if options.verbosity > 4: print "Host %s not used, migrating there" % host # Setting new host target = host nombre = api.hosts.get(id=target).name # Only migrate if VM if there's host change maquina = api.vms.get(name=vm) if maquina.host.id != target: if options.verbosity > 3: print "Processing vm %s for tag %s at host %s needs migration to host %s" % ( vm, etiqueta, api.hosts.get(id=api.vms.get(name=vm).host.id).name, nombre) # Allow migration maquina.placement_policy.host = params.Host() maquina.placement_policy.affinity = "migratable" maquina.update() #Migrate VM to target HOST to satisfy rules migra(api, options, api.vms.get(name=vm), params.Action(host=api.hosts.get(id=target))) tags_vm_used.add(target) else: if options.verbosity > 4: print "Skipping migration target=host" # Discard further migration of any machine maquina.placement_policy.affinity = "pinned" maquina.placement_policy.host = api.hosts.get(id=target) try: maquina.update() except: if options.verbosity > 4: print "Problem updating VM parameters for pinning"
except: print "Error adding elas_manage tag to vm %s" % vm.name # CLEANUP # Remove pinning from vm's in down state to allow to start in any host query = "status = down" for vm in listvms(api, query): if vm.status.state == "down": if vm.tags.get("elas_manage"): for tag in vm.tags.list(): if tag.name[0:8] == "cluster_": if options.verbosity >= 5: print "Cleaning VM %s pinning to allow to start on any host" % vm.name # If powered down, allow machine to be migratable so it can start on any host maquina = vm maquina.placement_policy.host = params.Host() maquina.placement_policy.affinity = "migratable" maquina.update() if vm.tags.get("elas_start"): if options.verbosity >= 5: print "VM %s should be running, starting..." % vm.name # Start machine, as if it had host pinning it couldn't be autostarted using HA vm.start() if not options.cluster: # Processing each cluster of our RHEVM for cluster in api.clusters.list(): process_cluster(cluster) else: process_cluster(api.clusters.get(name=options.cluster))
def set_Host(self, host_name, cluster, ifaces): HOST = self.get_Host(host_name) CLUSTER = self.get_cluster(cluster) if HOST is None: setMsg("Host does not exist.") ifacelist = dict() networklist = [] manageip = '' try: for iface in ifaces: try: setMsg('creating host interface ' + iface['name']) if 'management' in iface: manageip = iface['ip'] if 'boot_protocol' not in iface: if 'ip' in iface: iface['boot_protocol'] = 'static' else: iface['boot_protocol'] = 'none' if 'ip' not in iface: iface['ip'] = '' if 'netmask' not in iface: iface['netmask'] = '' if 'gateway' not in iface: iface['gateway'] = '' if 'network' in iface: if 'bond' in iface: bond = [] for slave in iface['bond']: bond.append(ifacelist[slave]) try: tmpiface = params.Bonding( slaves=params.Slaves(host_nic=bond), options=params.Options( option=[ params.Option(name='miimon', value='100'), params.Option(name='mode', value='4') ] ) ) except Exception as e: setMsg('Failed to create the bond for ' + iface['name']) setFailed() setMsg(str(e)) return False try: tmpnetwork = params.HostNIC( network=params.Network(name=iface['network']), name=iface['name'], boot_protocol=iface['boot_protocol'], ip=params.IP( address=iface['ip'], netmask=iface['netmask'], gateway=iface['gateway'] ), override_configuration=True, bonding=tmpiface) networklist.append(tmpnetwork) setMsg('Applying network ' + iface['name']) except Exception as e: setMsg('Failed to set' + iface['name'] + ' as network interface') setFailed() setMsg(str(e)) return False else: tmpnetwork = params.HostNIC( network=params.Network(name=iface['network']), name=iface['name'], boot_protocol=iface['boot_protocol'], ip=params.IP( address=iface['ip'], netmask=iface['netmask'], gateway=iface['gateway'] )) networklist.append(tmpnetwork) setMsg('Applying network ' + iface['name']) else: tmpiface = params.HostNIC( name=iface['name'], network=params.Network(), boot_protocol=iface['boot_protocol'], ip=params.IP( address=iface['ip'], netmask=iface['netmask'], gateway=iface['gateway'] )) ifacelist[iface['name']] = tmpiface except Exception as e: setMsg('Failed to set ' + iface['name']) setFailed() setMsg(str(e)) return False except Exception as e: setMsg('Failed to set networks') setMsg(str(e)) setFailed() return False if manageip == '': setMsg('No management network is defined') setFailed() return False try: HOST = params.Host(name=host_name, address=manageip, cluster=CLUSTER, ssh=params.SSH(authentication_method='publickey')) if self.conn.hosts.add(HOST): setChanged() HOST = self.get_Host(host_name) state = HOST.status.state while (state != 'non_operational' and state != 'up'): HOST = self.get_Host(host_name) state = HOST.status.state time.sleep(1) if state == 'non_responsive': setMsg('Failed to add host to RHEVM') setFailed() return False setMsg('status host: up') time.sleep(5) HOST = self.get_Host(host_name) state = HOST.status.state setMsg('State before setting to maintenance: ' + str(state)) HOST.deactivate() while state != 'maintenance': HOST = self.get_Host(host_name) state = HOST.status.state time.sleep(1) setMsg('status host: maintenance') try: HOST.nics.setupnetworks(params.Action( force=True, check_connectivity=False, host_nics=params.HostNics(host_nic=networklist) )) setMsg('nics are set') except Exception as e: setMsg('Failed to apply networkconfig') setFailed() setMsg(str(e)) return False try: HOST.commitnetconfig() setMsg('Network config is saved') except Exception as e: setMsg('Failed to save networkconfig') setFailed() setMsg(str(e)) return False except Exception as e: if 'The Host name is already in use' in str(e): setMsg("Host already exists") else: setMsg("Failed to add host") setFailed() setMsg(str(e)) return False HOST.activate() while state != 'up': HOST = self.get_Host(host_name) state = HOST.status.state time.sleep(1) if state == 'non_responsive': setMsg('Failed to apply networkconfig.') setFailed() return False setMsg('status host: up') else: setMsg("Host exists.") return True