def run(**kwargs): """Calls all the functions needed to upload new template to RHEVM. This is called either by template_upload_all script, or by main function. Args: **kwargs: Kwargs generated from cfme_data['template_upload']['template_upload_rhevm']. """ ovaname = get_ova_name(kwargs.get('image_url')) mgmt_sys = cfme_data['management_systems'][kwargs.get('provider')] rhevurl = mgmt_sys['hostname'] rhevm_credentials = mgmt_sys['credentials'] username = credentials[rhevm_credentials]['username'] password = credentials[rhevm_credentials]['password'] ssh_rhevm_creds = mgmt_sys['hosts'][0]['credentials'] sshname = credentials[ssh_rhevm_creds]['username'] sshpass = credentials[ssh_rhevm_creds]['password'] rhevip = mgmt_sys['ipaddress'] apiurl = 'https://%s:443/api' % rhevurl ssh_client = make_ssh_client(rhevip, sshname, sshpass) api = API(url=apiurl, username=username, password=password, insecure=True, persistent_auth=False) template_name = kwargs.get('template_name', None) if template_name is None: template_name = cfme_data['basic_info']['appliance_template'] kwargs = update_params_api(api, **kwargs) check_kwargs(**kwargs) if api.templates.get(template_name) is not None: print "RHEVM: Found finished template with this name." print "RHEVM: The script will now end." else: print "RHEVM: Downloading .ova file..." download_ova(ssh_client, kwargs.get('image_url')) try: print "RHEVM: Templatizing .ova file..." template_from_ova(api, username, password, rhevip, kwargs.get('edomain'), ovaname, ssh_client) print "RHEVM: Importing new template..." import_template(api, kwargs.get('edomain'), kwargs.get('sdomain'), kwargs.get('cluster')) print "RHEVM: Making a temporary VM from new template..." make_vm_from_template(api, kwargs.get('cluster')) print "RHEVM: Adding disk to created VM..." add_disk_to_vm(api, kwargs.get('sdomain'), kwargs.get('disk_size'), kwargs.get('disk_format'), kwargs.get('disk_interface')) print "RHEVM: Templatizing VM..." templatize_vm(api, template_name, kwargs.get('cluster')) finally: cleanup(api, kwargs.get('edomain'), ssh_client, ovaname) ssh_client.close() api.disconnect() print "RHEVM: Done."
def start_tag(tag): try: api = API(url=URL, username=USERNAME, password=PASSWORD, ca_file=CERT, insecure=True) print "Connected to %s successfully!" % api.get_product_info().name for vm in api.vms.list(query="tag=%s" % tag): print("VM %s is in state %s" % (vm.get_name(), vm.status.state)) # print vm.get_name() # print(dir(vm)) try: if vm.status.state != 'up': # print("VM %s is in state %s" %( vm.get_name() , vm.status.state)) print 'Starting VM' # vm.get(VM_NAME).start() vm.start() print 'Waiting for VM to reach Up status' # while vm.get(VM_NAME).status.state != 'up': count = 30 while vm.status.state != 'up' and count > 0: time.sleep(1) count = count - 1 else: print 'VM already up' except Exception as e: print 'Failed to Start VM:\n%s' % str(e) time.sleep(6) api.disconnect() except Exception as ex: print "Unexpected error: %s" % ex
def get_all_vm_endpoints(pattern='*', provider='perf-rhevm'): """ Takes in a pattern and returns all VM ip addresses and names. @pattern: Appliance(VM) Name pattern. Example: 'Infra*' @provider: Provider Name as per 'providers:' in cfme_performance[local].yml Default is 'perf-rhevm'. """ provider = cfme_performance.providers[provider] ip_address = provider.ip_address username = provider.credentials.username password = provider.credentials.password api = API(url='https://%s' % ip_address, username=username, password=password, insecure=True) vmlist = api.vms.list(name=pattern) results = {} for vm in vmlist: addresses = [] if vm.guest_info is not None: ips = vm.guest_info.get_ips() for ip in ips.get_ip(): addresses.append(ip.get_address()) results[vm.name] = addresses api.disconnect() return results
def main(): url = 'https://vm-rhevm01.infoplus-ot.ris:443/api' username = '******' password = getpass.getpass("Supply password for user %s: " % username) api = API(url=url, username=username, password=password, insecure=True) vm_list = api.vms.list() for vm in vm_list: print vm.name api.disconnect()
def main(): url='https://vm-rhevm01.infoplus-ot.ris:443/api' username='******' password=getpass.getpass("Supply password for user %s: " % username) api = API(url=url, username=username, password=password,insecure=True) vm_list=api.vms.list() for vm in vm_list: print vm.name api.disconnect()
def main(): URL='https://<ovirt-host>:443/api' USERNAME='******' PASSWORD='******' api = API(url=URL, username=USERNAME, password=PASSWORD,insecure=True) vm_list=api.vms.list() for vm in vm_list: print vm.name api.disconnect()
def main(): URL = 'https://192.168.1.112:443/api' USERNAME = '******' PASSWORD = '******' api = API(url=URL, username=USERNAME, password=PASSWORD, insecure=True) vm = api.vms.get(name="ubuntu14.04") print vm.name #vm_list = api.vms.list() #for vm in vm_list: # print vm.name api.disconnect()
def start_vm(vm_name, host_ip): try: api = API(url="https://engine167.eayun.com", username="******", password="******", ca_file="ca.crt") vm = api.vms.get(name=vm_name) try: vm.start(action=params.Action(vm=params.VM(host=params.Host(address=host_ip)))) print "Started '%s'." % vm.get_name() except Exception as ex: print "Unable to start '%s': %s" % (vm.get_name(), ex) api.disconnect() except Exception as ex: print "Unexpected error: %s" % ex
def run(**kwargs): ovaname = get_ova_name(kwargs.get('image_url')) mgmt_sys = cfme_data['management_systems'][kwargs.get('provider')] rhevurl = mgmt_sys['hostname'] rhevm_credentials = mgmt_sys['credentials'] username = credentials[rhevm_credentials]['username'] password = credentials[rhevm_credentials]['password'] ssh_rhevm_creds = mgmt_sys['hosts'][0]['credentials'] sshname = credentials[ssh_rhevm_creds]['username'] sshpass = credentials[ssh_rhevm_creds]['password'] rhevip = mgmt_sys['ipaddress'] apiurl = 'https://%s:443/api' % rhevurl ssh_client = make_ssh_client(rhevip, sshname, sshpass) api = API(url=apiurl, username=username, password=password, insecure=True, persistent_auth=False) template_name = kwargs.get('template_name', None) if template_name is None: template_name = cfme_data['basic_info']['cfme_template_name'] if api.templates.get(template_name) is not None: print "RHEVM: Found finished template with this name." print "RHEVM: The script will now end." else: print "RHEVM: Downloading .ova file..." download_ova(ssh_client, kwargs.get('image_url')) print "RHEVM: Templatizing .ova file..." template_from_ova(api, username, password, rhevip, kwargs.get('edomain'), ovaname, ssh_client) print "RHEVM: Importing new template..." import_template(api, kwargs.get('edomain'), kwargs.get('sdomain'), kwargs.get('cluster')) print "RHEVM: Making a temporary VM from new template..." make_vm_from_template(api, kwargs.get('cluster')) print "RHEVM: Adding disk to created VM..." add_disk_to_vm(api, kwargs.get('sdomain'), kwargs.get('disk_size'), kwargs.get('disk_format'), kwargs.get('disk_interface')) print "RHEVM: Templatizing VM..." templatize_vm(api, template_name, kwargs.get('cluster')) print "RHEVM: Cleaning up..." cleanup(api, kwargs.get('edomain')) ssh_client.close() api.disconnect() print "RHEVM: Done."
def get_up_nodes(user, domain, passwd, cafile, engine_host): url = "https://%s/ovirt-engine/api" % (engine_host) uname = "%s@%s" % (user, domain) try: api = API(url=url, username=uname, password=passwd, ca_file=cafile) list_hosts = api.hosts.list() li_up = [] for host in list_hosts: status = host.status.state if 'up' in status: li_up.append(host.name) api.disconnect() except Exception: logging.warn("Could not connect to %s" % (url)) return [] return li_up
def process(api_url, username, password, cert_file=None): api = API(url=api_url, username=username, password=password, cert_file=cert_file, insecure=(not cert_file)) print('Connected to %s' % api_url) problematic_vms = list(iter_problematic_vms(api)) api.disconnect() if problematic_vms: print(build_search_criteria(problematic_vms, get_single_vm_criteria_by_name)) print(build_search_criteria(problematic_vms, get_single_vm_criteria_by_id)) else: print('All MAC addresses are in range')
def start_vms(vmObj): logging.info('Thread to start %s', vmObj.name) try: vmObj.start() #time.sleep(5) except Exception as e: logging.debug('Exception caught on VM ( %s) start:\n%s' % (vmObj.name, str(e))) failedVms.append(vmObj.name) if __name__ == "__main__": try: api = API(url=APIURL, username=APIUSER, password=APIPASS, ca_file=CAFILE) print 'Connected to oVIRT API %s Successfully' % APIURL logging.info ( 'Successfully Connected to %s' % APIURL) try: print ' \n I am logging in %s \n' % LOGFILENAME vmsList = api.vms.list() for i in vmsList: print i.name if i.status.state != 'up': logging.warning('%s is not up, trying to start it' % i.name) threadMe = Thread(target=start_vms, args=[i]) threadMe.start() threads.append(threadMe) except Exception as e: logging.debug('Error:\n%s' % str(e)) logging.warning ('No of VMs to start : %s' % len(threads)) print 'No of VMs to start: %s' % len(threads) for th in threads: logging.info ('Waiting for %s to join' % th) th.join (30) if not th.isAlive(): logging.info ('Thread : %s terminated' % (th.getName())) else: logging.debug( 'Thread : %s is still alive, you may check this task..' % (th)) logging.debug (' Below Vms failed to start with an exception:%s' % (failedVms)); api.disconnect() except Exception as ex: logging.debug('Unexpected error: %s' % ex)
def stop_vm(vm_name): try: api = API(url="https://engine167.eayun.com", username="******", password="******", ca_file="ca.crt") vm = api.vms.get(name=vm_name) try: vm.stop() print "Stoped '%s'." % vm.get_name() except Exception as ex: print "Unable to stop '%s': %s" % (vm.get_name(), ex) api.disconnect() except Exception as ex: print "Unexpected error: %s" % ex
def start_vm(vm_name, host_ip): try: api = API(url="https://engine167.eayun.com", username="******", password="******", ca_file="ca.crt") vm = api.vms.get(name=vm_name) try: vm.start( action = params.Action( vm = params.VM( host = params.Host(address = host_ip) ) ) ) print "Started '%s'." % vm.get_name() except Exception as ex: print "Unable to start '%s': %s" % (vm.get_name(), ex) api.disconnect() except Exception as ex: print "Unexpected error: %s" % ex
vm_start(oe_conn, opt.vm_name) elif opt.action == 'stop': vm_stop(oe_conn, opt.vm_name) elif opt.action == 'delete': vm_delete(oe_conn, opt.vm_name) elif opt.action == 'create': vm_create_from_tpl(oe_conn, opt.vm_name, opt.vm_template, opt.vm_cluster) elif opt.action == 'init': vm_run_once(oe_conn, opt.vm_name, opt.vm_password, opt.vm_nic_info) elif opt.action == 'start-list': for vm in opt.vm_list.replace(' ', '').split(','): print('[I] try to start vm: {0}'.format(vm)) vm_start(oe_conn, vm) elif opt.action == 'stop-list': for vm in opt.vm_list.replace(' ', '').split(','): print('[I] try to stop vm: {0}'.format(vm)) vm_stop(oe_conn, vm) elif opt.action == 'delete-list': for vm in opt.vm_list.replace(' ', '').split(','): print('[I] try to delete: {0}'.format(vm)) vm_delete(oe_conn, vm) elif opt.action == 'create-list': for vm in opt.vm_list.replace(' ', '').split(','): print('[I] try to create: {0}'.format(vm)) vm_create_from_tpl(oe_conn, vm, opt.vm_template, opt.vm_cluster) except Exception as e: print('[E] Failed to init API: {0}'.format(str(e))) finally: if oe_conn is not None: oe_conn.disconnect()
class RHEVMSystem(MgmtSystemAPIBase): """ Client to RHEVM API This class piggy backs off ovirtsdk. Benefits of ovirtsdk: - Don't need intimite knowledge w/ RHEVM api itself. Detriments of ovirtsdk: - Response to most quaries are returned as an object rather than a string. This makes it harder to do simple stuff like getting the status of a vm. - Because of this, it makes listing VMs based on **kwargs impossible since ovirtsdk relies on re class to find matches. E.G. List out VM with this name (positive case) Ideal: self.api.vms.list(name='test_vm') Underneath the hood: - ovirtsdk fetches list of all vms [ovirtsdk.infrastructure.brokers.VM object, ...] - ovirtsdk then tries to filter the result using re. - tries to look for 'name' attr in ovirtsdk.infrastructure.brokers.VM object - found name attribute, in this case, the type of the value of the attribute is string. - match() succeed in comparing the value to 'test_vm' E.G. List out VM with that's powered on (negative case) Ideal: self.api.vms.list(status='up') Underneath the hood: - '^same step as above except^' - found status attribute, in this case, the type of the value of the attribute is ovirtsdk.xml.params.Status - match() failed because class is compared to string 'up' This problem should be attributed to how RHEVM api was designed rather than how ovirtsdk handles RHEVM api responses. - Obj. are not updated after action calls. - E.G. vm = api.vms.get(name='test_vm') vm.status.get_state() # returns 'down' vm.start() # wait a few mins vm.status.get_state() # returns 'down'; wtf? vm = api.vms.get(name='test_vm') vm.status.get_state() # returns 'up' """ _stats_available = { 'num_vm': lambda self: len(self.list_vm()), 'num_host': lambda self: len(self.list_host()), 'num_cluster': lambda self: len(self.list_cluster()), 'num_template': lambda self: len(self.list_template()), 'num_datastore': lambda self: len(self.list_datastore()), } def __init__(self, hostname, username, password, **kwargs): # generate URL from hostname if 'port' in kwargs: url = 'https://%s:%s/api' % (hostname, kwargs['port']) else: url = 'https://%s/api' % hostname self.api = API(url=url, username=username, password=password, insecure=True) def _get_vm(self, vm_name=None): if vm_name is None: raise Exception('Could not find a VM named %s.' % vm_name) else: vm = self.api.vms.get(name=vm_name) if vm is None: raise Exception('Could not find a VM named %s.' % vm_name) return vm def get_ip_address(self, vm_name): vm = self._get_vm(vm_name) return vm.get_guest_info().get_ips().get_ip()[0].get_address() def does_vm_exist(self, name): try: self._get_vm(name) return True except Exception: return False def start_vm(self, vm_name=None): vm = self._get_vm(vm_name) if vm.status.get_state() == 'up': return True else: ack = vm.start() if ack.get_status().get_state() == 'complete': return True return False def stop_vm(self, vm_name): vm = self._get_vm(vm_name) if vm.status.get_state() == 'down': return True else: ack = vm.stop() if ack.get_status().get_state() == 'complete': return True return False def delete_vm(self, vm_name): vm = self._get_vm(vm_name) if vm.status.get_state() == 'up': self.stop_vm(vm_name) ack = vm.delete() if ack == '': return True else: return False def create_vm(self, vm_name): raise NotImplementedError('This function has not yet been implemented.') # Heres the code but don't have a need and no time to test it to get it right # including for inclusion later # # def create_vm(self, vm_name, *args, **kwargs): # MB = 1024 * 1024 # try: # self.api.vms.add( # params.VM( # name=vm_name, # memory=kwargs['memory_in_mb'] * MB, # cluster=self.api.clusters.get(kwargs['cluster_name']), # template=self.api.templates.get('Blank'))) # print 'VM created' # self.api.vms.get(vm_name).nics.add(params.NIC(name='eth0', # network=params.Network(name='ovirtmgmt'), interface='virtio')) # print 'NIC added to VM' # self.api.vms.get(vm_name).disks.add(params.Disk( # storage_domains=params.StorageDomains( # storage_domain=[self.api.storagedomains.get(kwargs['storage_domain'])], # size=512 * MB, # status=None, # interface='virtio', # format='cow', # sparse=True, # bootable=True))) # print 'Disk added to VM' # print 'Waiting for VM to reach Down status' # while self.api.vms.get(vm_name).status.state != 'down': # time.sleep(1) # except Exception as e: # print 'Failed to create VM with disk and NIC\n%s' % str(e) def restart_vm(self, vm_name): if not self.stop_vm(vm_name): return False else: return self.start_vm(vm_name) def list_vm(self, **kwargs): # list vm based on kwargs can be buggy # i.e. you can't return a list of powered on vm # but you can return a vm w/ a matched name vm_list = self.api.vms.list(**kwargs) return [vm.name for vm in vm_list] def list_host(self, **kwargs): host_list = self.api.hosts.list(**kwargs) return [host.name for host in host_list] def list_datastore(self, **kwargs): datastore_list = self.api.storagedomains.list(**kwargs) return [ds.name for ds in datastore_list if ds.get_status() is None] def list_cluster(self, **kwargs): cluster_list = self.api.clusters.list(**kwargs) return [cluster.name for cluster in cluster_list] def list_template(self, **kwargs): ''' CFME ignores the 'Blank' template, so we do too ''' template_list = self.api.templates.list(**kwargs) return [template.name for template in template_list if template.name != "Blank"] def list_flavor(self): raise NotImplementedError('This function is not supported on this platform.') def info(self): # and we got nothing! pass def disconnect(self): self.api.disconnect() def vm_status(self, vm_name=None): state = self._get_vm(vm_name).get_status().get_state() return state def is_vm_running(self, vm_name): state = self.vm_status(vm_name) return "up" == state def is_vm_stopped(self, vm_name): state = self.vm_status(vm_name) return "down" == state def is_vm_suspended(self, vm_name): state = self.vm_status(vm_name) return "suspended" == state def suspend_vm(self, vm_name): vm = self._get_vm(vm_name) if vm.status.get_state() == 'down': raise Exception('Could not suspend %s because it\'s not running.' % vm_name) else: ack = vm.suspend() return ack.get_status().get_state() == 'complete' def clone_vm(self, source_name, vm_name): raise NotImplementedError('This function has not yet been implemented.') def deploy_template(self, template, *args, **kwargs): self.api.vms.add(params.VM( name=kwargs['vm_name'], cluster=self.api.clusters.get(kwargs['cluster_name']), template=self.api.templates.get(template))) while self.api.vms.get(kwargs['vm_name']).status.state != 'down': time.sleep(5) self.start_vm(kwargs['vm_name']) while not self.is_vm_running(kwargs['vm_name']): time.sleep(5) return kwargs['vm_name']
logging.info ( 'Successfully Connected to %s' % APIURL) try: print ' \n I am logging in %s \n' % LOGFILENAME vmsList = api.vms.list() for i in vmsList: print i.name if i.status.state != 'up': logging.warning('%s is not up, trying to start it' % i.name) threadMe = Thread(target=start_vms, args=[i]) threadMe.start() threads.append(threadMe) except Exception as e: logging.debug('Error:\n%s' % str(e)) logging.warning ('No of VMs to start : %s' % len(threads)) print 'No of VMs to start: %s' % len(threads) for th in threads: logging.info ('Waiting for %s to join' % th) th.join (30) if not th.isAlive(): logging.info ('Thread : %s terminated' % (th.getName())) else: logging.debug( 'Thread : %s is still alive, you may check this task..' % (th)) logging.debug (' Below Vms failed to start with an exception:%s' % (failedVms)); api.disconnect() except Exception as ex: logging.debug('Unexpected error: %s' % ex)
def migrateHost2defaultHost(self, system, move): """ This function will move vms to its default host. The function makes a loop over all running VMs. If a VM is not running on his staring Host then this VM will be moved to its starting host. It is the intention to use this function only form the commandline application moveVm2Host. A delay is forseen between every REST call in order not to overload the RHV Manager. :param system: The rhvm host :param move: set to 1 will move the hosts """ try: if debug: print "Getting information from %s with url %s" % ( system, self.config.get(system, 'url')) # Initialize api_instance sith login creadentials api = API(url=self.config.get(system, 'url'), username=self.config.get(system, 'username'), password=self.config.get(system, 'password'), ca_file=self.config.get(system, 'ca_file')) if debug: print "-Connected successfully-" if debug: print "--Retrieving the HOSTS and host state--" hostList = api.hosts.list() for host in hostList: if debug: print "%s (%s) is %s" % (host.get_name(), host.get_id(), host.status.state) if (host.status.state == 'up'): self.hostArr[host.get_id()] = host.get_name() if debug: print "--Retrieving the VMS--" vmList = api.vms.list() for vm in vmList: if (vm.status.state == 'up'): if debug: print "%s (%s) %s ON %s" % (vm.get_name(), vm.get_id(), vm.status.state, vm.host.get_id()) if (vm.placement_policy.host is None): #if debug: print "No host placement policy assigned" pass else: if debug: print "Host placement policy assigned to %s" % ( vm.placement_policy.host.get_id()) if (vm.placement_policy.host.get_id() == vm.host.get_id()): if debug: print "No HOST migration needed" else: line = "Host %s (on %s) needs to move to %s" % ( vm.get_name(), self.hostArr[vm.host.get_id()], self.hostArr[vm.placement_policy.host.get_id()] ) sys.stdout.write(line) sys.stdout.flush() #move = 1 if move: vm.migrate(action=params.Action( host=params.Host( id=vm.placement_policy.host.get_id()))) # This is a sleep routine for i in range(0, 5): sys.stdout.write('.') sys.stdout.flush() time.sleep(2) sys.stdout.write("\n") sys.stdout.flush() else: sys.stdout.write("\n") sys.stdout.flush() else: if debug: print "%s (%s) DOWN migration policy %s" % ( vm.get_name(), vm.get_id(), vm.placement_policy.affinity) api.disconnect() except Exception as ex: print "Exception : %s " % str(ex)
class ovirt_test( object ): """An instance represents ovirt test. """ def __init__ ( self, test_dict = {} ): '''Constructor for ovirt_test. self.ovirt_dict = {} self.ovirt_dict['URL'] = url self.ovirt_dict['VERSION'] = params.Version(major='3', minor='0') self.ovirt_dict['CA_FILE'] = "/etc/pki/ovirt-engine/ca.pem" self.ovirt_dict['USERINFO'] = {} self.ovirt_dict['USERINFO']['NAME'] = 'admin@internal' self.ovirt_dict['USERINFO']['PASSWORD'] = '******' self.ovirt_dict['DATACENTERS'] = [(name,type,cluster,storage,network),(name,...), ...] network = [(name,type),(name, ...), ...] storage = [(name,type,addr,path),(name, ...), ...] cluster = [(name,type,hosts,vms,volumes),(name, ...), ...] hosts = [(name,address),(name, ...), ...] volumes = [(name,type),(name, ...), ...] vms = [(name,type,ostype,display,nics,disks),(name, ...), ...] nics = [(name,type,network),(name, ...), ...] disks = [(name,type,size,storage),(name, ...), ...] ''' global iCLUSTERS global iSTORAGES global iHOSTS iCLUSTERS = 2 iSTORAGES = 3 iHOSTS = 2 self.ovirt_dict = {} self.ovirt_dict['URL'] = test_dict['URL'] self.ovirt_dict['VERSION'] = test_dict['VERSION'] self.ovirt_dict['CA_FILE'] = test_dict['CA_FILE'] self.ovirt_dict['USERINFOS'] = [] self.ovirt_dict['USERINFOS'].append(test_dict['USERINFOS'][0]) self.ovirt_dict['CONNECTION'] = False self.ovirt_dict['DATACENTERS'] = [] self.ovirt_dict['CLUSTERS'] = [] self.ovirt_dict['HOSTS'] = [] self.ovirt_dict['STORAGES'] = [] self.ovirt_dict['VMS'] = [] self.ovirt_dict['NICS'] = [] self.ovirt_dict['DISKS'] = [] def connect_engine( self ): '''Connect ovirt-engine, default connect local ovirt-engine api = API(url="https://128.224.165.209:443/api", \ username="******", \ password="******", \ ca_file="/etc/pki/ovirt-engine/ca.pem") ''' try: self.api = API(url=self.ovirt_dict['URL'], username=self.ovirt_dict['USERINFOS'][0][0], password=self.ovirt_dict['USERINFOS'][0][1], ca_file=self.ovirt_dict['CA_FILE']) print 'Connect ovirt-engine successfully' self.ovirt_dict['CONNECTION'] = True except Exception as e: print 'Connect ovirt-engine failed:\n%s' % (str(e)) self.ovirt_dict['CONNECTION'] = False return False def disconnect( self ): '''Disconnect ovirt-engine''' try: if self.api.disconnect() == None: print 'Disconnect ovirt-engine successfully' self.ovirt_dict['CONNECTION'] = False except Exception as e: print 'Disconnect ovirt-engine failed:\n%s' % (str(e)) self.ovirt_dict['CONNECTION'] = 1 return False def check_item( self, group , item_name, Other = 'None' ): '''Check the item(item_name) exist in group''' try: index = 0 length = len(group) for index in range(0,length): if group[index][0] == item_name: if Other != 'None' and group[index][1] == Other: return index return index if index + 1 >= length: return None except Exception as e: print 'Check %s failed:\n%s' % (item_name,str(e)) return None def add_user( self, UserInfo = ('admin@internal','admin') ): '''add a new user''' try: self.ovirt_dict['USERINFOS'].append(UserInfo) except Exception as e: print 'Add new user failed:\n%s' % (str(e)) return False def change_user( self, UserInfo = ('admin@internal','admin') ): '''change username and password''' try: length = len(self.ovirt_dict['USERINFOS']) for i in range(0,length): if self.ovirt_dict['USERINFOS'][i][0] == UserInfo[0]: tempUser = self.ovirt_dict['USERINFOS'][i] self.ovirt_dict['USERINFOS'].remove(tempUser) self.ovirt_dict['USERINFOS'].insert(i,UserInfo) Found = 1 break else: Found = 0 if Found == 0: print 'Change user information failed' return False except Exception as e: print 'Change user information failed:\n%s' % (str(e)) return False def printInstanceInfo( self ): ''''change username and password''' print self.ovirt_dict def print_summary(self, summary): lenth = len(summary) print ' \n' + '#'*10 + ' ' + summary + ' ' + '#'*(40-lenth) + ' \n'
class RHEVMHelper(object): api_connections_lock = BoundedSemaphore() def __init__(self, url, username, password): self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) # The SDK allows only a single active connection object to be created, regardless of whether # or not multiple RHEVM servers are being accessed. For now we need to have a global lock, # create a connection object before each batch of API interactions and then disconnect it. self.api_details = { 'url': url, 'username': username, 'password': password } # TODO: When this limitation in the ovirt SDK is removed, get rid of these def _init_api(self): self.log.debug( "Doing blocking acquire() on global RHEVM API connection lock") self.api_connections_lock.acquire() self.log.debug("Got global RHEVM API connection lock") url = self.api_details['url'] username = self.api_details['username'] password = self.api_details['password'] self.api = API(url=url, username=username, password=password) def _disconnect_api(self): self.api.disconnect() self.log.debug("Releasing global RHEVM API connection lock") self.api_connections_lock.release() # These are the only two genuinley public methods # What we create is a VM template def import_template(self, image_filename, nfs_host, nfs_path, nfs_dir, cluster, ovf_name=None, ovf_desc=None): if not ovf_desc: self.ovf_desc = "Imported by Image Factory" else: self.ovf_desc = ovf_desc self.log.debug( "Preparing for RHEVM template import of image file (%s)" % (image_filename)) # API lock protected action try: self._init_api() self.init_vm_import(image_filename, nfs_host, nfs_path, nfs_dir, cluster) finally: self._disconnect_api() if not ovf_name: self.ovf_name = str(self.tpl_uuid) else: self.ovf_name = ovf_name self.log.debug("Staging files") self.stage_files() self.log.debug("Moving files to final export domain location") self.move_files() self.log.debug("Executing import") # API lock protected action try: self._init_api() self.execute_import() finally: self._disconnect_api() return str(self.tpl_uuid) def delete_template(self, template_uuid): template = self.api.templates.get(id=template_uuid) if template: template.delete() return True else: return False # Begin Nuts and Bolts # We don't want to run seteuid() in our main process as it will globally change the UID/GID for everything # OTOH, we need to be root to access our image files and temp files # We use stdin and Popen's preexec_fn via the helper functions below to deal with this def become_nfs_user(self): os.setegid(NFSGID) os.seteuid(NFSUID) def copy_as_nfs_user(self, sourcefile, destfile): self.log.debug("Copying (%s) to (%s) as nfsuser" % (sourcefile, destfile)) f = open(sourcefile, "r") (stdout, stderr, retcode) = subprocess_check_output( ['dd', 'of=%s' % (destfile), 'bs=4k'], stdin=f, preexec_fn=self.become_nfs_user) f.close() def move_as_nfs_user(self, sourcefile, destfile): self.log.debug("Moving (%s) to (%s) as nfsuser" % (sourcefile, destfile)) (stdout, stderr, retcode) = subprocess_check_output( ['mv', '%s' % (sourcefile), '%s' % (destfile)], preexec_fn=self.become_nfs_user) def mkdir_as_nfs_user(self, directory): self.log.debug("Making directory (%s) as nfsuser" % (directory)) (stdout, stderr, retcode) = subprocess_check_output( ['mkdir', '%s' % (directory)], preexec_fn=self.become_nfs_user) def rm_rf_as_nfs_user(self, directory): self.log.debug("Recursive remove of dir (%s) as nfsuser" % (directory)) (stdout, stderr, retcode) = subprocess_check_output( ['rm', '-rf', '%s' % (directory)], preexec_fn=self.become_nfs_user) def get_storage_domain(self, nfs_host, nfs_path): # Find the storage domain that matches the nfs details given sds = self.api.storagedomains.list() for sd in sds: if sd.get_type() == "export": self.log.debug("Export domain: (%s)" % (sd.get_name())) stor = sd.get_storage() if (stor.get_address() == nfs_host) and (stor.get_path() == nfs_path): self.log.debug("This is the right domain (%s)" % (sd.get_id())) return sd return None def get_pool_id(self, sd_uuid): # Get datacenter for a given storage domain UUID # This is the UUID that becomes the "StoragePoolID" in our OVF XML # TODO: The storagedomain object has a get_data_center() method that doesn't seem to work # Find out why dcs = self.api.datacenters.list() for dc in dcs: self.log.debug( "Looking for our storage domain (%s) in data center (%s)" % (sd_uuid, dc.get_id())) sd = dc.storagedomains.get(id=sd_uuid) if sd: self.log.debug("This is the right datacenter (%s)" % (dc.get_id())) return dc return None def get_cluster_by_dc(self, poolid): # If we have been passed "_any_" as the cluster name, we pick the first cluster that # matches our datacenter/pool ID clusters = self.api.clusters.list() for cluster in clusters: dc_id = None if cluster.get_data_center(): dc_id = cluster.get_data_center().get_id() self.log.debug( "Checking cluster (%s) with name (%s) with data center (%s)" % (cluster.get_id(), cluster.get_name(), dc_id)) if dc_id == poolid: return cluster self.log.debug("Cannot find cluster for dc (%s)" % (poolid)) return None def get_cluster_by_name(self, name): # If we have been passed a specific cluster name, we need to find that specific cluster clusters = self.api.clusters.list() for cluster in clusters: self.log.debug("Checking cluster (%s) with name (%s)" % (cluster.get_id(), cluster.get_name())) if cluster.get_name() == name: return cluster self.log.debug("Cannot find cluster named (%s)" % (name)) return None def check_qcow_size(self, filename): # Detect if an image is in qcow format # If it is, return the size of the underlying disk image # If it isn't, return none # For interested parties, this is the QCOW header struct in C # struct qcow_header { # uint32_t magic; # uint32_t version; # uint64_t backing_file_offset; # uint32_t backing_file_size; # uint32_t cluster_bits; # uint64_t size; /* in bytes */ # uint32_t crypt_method; # uint32_t l1_size; # uint64_t l1_table_offset; # uint64_t refcount_table_offset; # uint32_t refcount_table_clusters; # uint32_t nb_snapshots; # uint64_t snapshots_offset; # }; # And in Python struct format string-ese qcow_struct = ">IIQIIQIIQQIIQ" # > means big-endian qcow_magic = 0x514649FB # 'Q' 'F' 'I' 0xFB f = open(filename, "r") pack = f.read(struct.calcsize(qcow_struct)) f.close() unpack = struct.unpack(qcow_struct, pack) if unpack[0] == qcow_magic: return unpack[5] else: return None def init_vm_import(self, image_filename, nfs_host, nfs_path, nfs_dir, cluster): # Prepare for the import of a VM self.image_filename = image_filename self.nfs_host = nfs_host self.nfs_path = nfs_path self.nfs_dir = nfs_dir # Sets some values used when creating XML and meta files self.storage_domain_object = self.get_storage_domain( nfs_host, nfs_path) if self.storage_domain_object: self.storage_domain = self.storage_domain_object.get_id() else: raise Exception( "Cannot find storage domain matching NFS details given") self.dc_object = self.get_pool_id(self.storage_domain) if self.dc_object: # Our StoragePoolID is the UUID of the DC containing our storage domain self.pool_id = self.dc_object.get_id() else: raise Exception("Cannot find datacenter for our storage domain") if cluster == '_any_': self.cluster_object = self.get_cluster_by_dc(self.pool_id) else: self.cluster_object = self.get_cluster_by_name(cluster) if self.cluster_object: self.cluster = self.cluster_object.get_id() else: raise Exception("Cannot find cluster (%s)" % (cluster)) # Volume size is the qcow_size if the image is qcow # or the raw disk image size if not self.qcow_size = self.check_qcow_size(image_filename) if self.qcow_size: self.vol_size = self.qcow_size else: statb = os.stat(image_filename) self.vol_size = statb[stat.ST_SIZE] # We need these three unique identifiers when generating XML and the meta file self.img_uuid = uuid.uuid4() self.vol_uuid = uuid.uuid4() self.tpl_uuid = uuid.uuid4() # Set this once to use in both the OVF XML and the meta file self.raw_create_time = time.time() self.create_time = time.gmtime(self.raw_create_time) def stage_files(self): # Called after init to copy files to staging location # This is the base dir of the export domain self.export_domain_dir = self.nfs_dir + "/" + self.storage_domain if not os.path.isdir(self.export_domain_dir): raise Exception( "Cannot find expected export domain directory (%s) at local mount point (%s)" % (self.nfs_dir, self.storage_domain)) # Make distinct tempdir for OVF stuff self.ovftmpdir = self.export_domain_dir + "/" + "imgfac." + str( self.tpl_uuid) self.mkdir_as_nfs_user(self.ovftmpdir) # Add the OVF file self.ovfdest = self.ovftmpdir + "/" + str(self.tpl_uuid) + ".ovf" ovf_file_object = NamedTemporaryFile() et = self.generate_ovf_xml() et.write(ovf_file_object) ovf_file_object.flush() self.copy_as_nfs_user(ovf_file_object.name, self.ovfdest) ovf_file_object.close() # Make our own temporary subdir for the image file itself self.imgtmpdir = self.export_domain_dir + "/" + "imgfac." + str( self.img_uuid) self.mkdir_as_nfs_user(self.imgtmpdir) # Add the meta file for the image self.imgdest = self.imgtmpdir + "/" + str(self.vol_uuid) self.imgmetadest = self.imgdest + ".meta" meta_file_object = NamedTemporaryFile() meta_file_object.write(self.generate_meta_file()) meta_file_object.flush() self.copy_as_nfs_user(meta_file_object.name, self.imgmetadest) meta_file_object.close() # Copy the big image file last self.copy_as_nfs_user(self.image_filename, self.imgdest) def move_files(self): self.final_image_dir = "%s/images/%s" % (self.export_domain_dir, str(self.img_uuid)) self.final_ovf_dir = "%s/master/vms/%s" % (self.export_domain_dir, str(self.tpl_uuid)) self.move_as_nfs_user(self.imgtmpdir, self.final_image_dir) self.move_as_nfs_user(self.ovftmpdir, self.final_ovf_dir) def remove_export_template(self): self.rm_rf_as_nfs_user(self.final_image_dir) self.rm_rf_as_nfs_user(self.final_ovf_dir) def execute_import(self): # We import to the master storage domain of the datacenter of which our export domain is a member # Got it? action = params.Action() sds = self.dc_object.storagedomains.list() for sd in sds: if sd.get_master(): action.storage_domain = sd if not action.storage_domain: raise Exception( "Could not find master storage domain for datacenter ID (%s)" % (self.dc_object.get_id())) action.cluster = self.cluster_object # At this point our freshly copied in files are discoverable via the tpl_uuid in our export domain template = self.storage_domain_object.templates.get( id=str(self.tpl_uuid)) if template: template.import_template(action=action) real_template = self.api.templates.get(id=str(self.tpl_uuid)) # Wait 5 minutes for an import to finish self.log.debug("Waiting for template import to complete") for i in range(30): self.log.debug( "Waited %d - state (%s)" % (i * 10, real_template.get_status().get_state())) if real_template.get_status().get_state() != 'locked': break real_template = real_template.update() sleep(10) self.log.debug("Deleting export domain files") self.remove_export_template() final_state = real_template.get_status().get_state() if final_state == 'ok': self.log.debug("Template import completed successfully") return elif final_state == 'locked': raise Exception( "Timed out waiting for template import to finish") else: raise Exception("Template import ended in unknown state (%s)" % (final_state)) def generate_meta_file(self): metafile = "" metafile += "DOMAIN=" + self.storage_domain + "\n" # saved template has VOLTYPE=SHARED metafile += "VOLTYPE=LEAF\n" metafile += "CTIME=" + str(int(self.raw_create_time)) + "\n" # saved template has FORMAT=COW if self.qcow_size: metafile += "FORMAT=COW\n" else: metafile += "FORMAT=RAW\n" metafile += "IMAGE=" + str(self.img_uuid) + "\n" metafile += "DISKTYPE=1\n" metafile += "PUUID=00000000-0000-0000-0000-000000000000\n" metafile += "LEGALITY=LEGAL\n" metafile += "MTIME=" + str(int(self.raw_create_time)) + "\n" metafile += "POOL_UUID=" + self.pool_id + "\n" # assuming 1KB alignment metafile += "SIZE=" + str(self.vol_size / 512) + "\n" metafile += "TYPE=SPARSE\n" metafile += "DESCRIPTION=Uploaded by Image Factory\n" metafile += "EOF\n" return metafile def generate_ovf_xml(self): etroot = ElementTree.Element('ovf:Envelope') etroot.set('xmlns:ovf', "http://schemas.dmtf.org/ovf/envelope/1/") etroot.set( 'xmlns:rasd', "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" ) etroot.set( 'xmlns:vssd', "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ) etroot.set('xmlns:xsi', "http://www.w3.org/2001/XMLSchema-instance") etroot.set('ovf:version', "0.9") etref = ElementTree.Element('References') etfile = ElementTree.Element('File') etfile.set('ovf:href', str(self.img_uuid) + '/' + str(self.vol_uuid)) etfile.set('ovf:id', str(self.vol_uuid)) etfile.set('ovf:size', str(self.vol_size)) # TODO: Bulk this up a bit etfile.set('ovf:description', self.ovf_name) etref.append(etfile) etroot.append(etref) etsec = ElementTree.Element('Section') etsec.set('xsi:type', "ovf:NetworkSection_Type") ete = ElementTree.Element('Info') ete.text = "List of Networks" etsec.append(ete) # dummy section, even though we have Ethernet defined below etroot.append(etsec) etsec = ElementTree.Element('Section') etsec.set('xsi:type', "ovf:DiskSection_Type") etdisk = ElementTree.Element('Disk') etdisk.set('ovf:diskId', str(self.vol_uuid)) vol_size_str = str( (self.vol_size + (1024 * 1024 * 1024) - 1) / (1024 * 1024 * 1024)) etdisk.set('ovf:size', vol_size_str) etdisk.set('ovf:vm_snapshot_id', '00000000-0000-0000-0000-000000000000') etdisk.set('ovf:actual_size', vol_size_str) etdisk.set('ovf:format', 'http://www.vmware.com/specifications/vmdk.html#sparse') etdisk.set('ovf:parentRef', '') # XXX ovf:vm_snapshot_id etdisk.set('ovf:fileRef', str(self.img_uuid) + '/' + str(self.vol_uuid)) # XXX ovf:format ("usually url to the specification") if self.qcow_size: etdisk.set('ovf:volume-type', "Sparse") etdisk.set('ovf:volume-format', "COW") else: etdisk.set('ovf:volume-type', "Preallocated") etdisk.set('ovf:volume-format', "RAW") etdisk.set('ovf:disk-interface', "VirtIO") etdisk.set('ovf:disk-type', "System") etdisk.set('ovf:boot', "true") etdisk.set('ovf:wipe-after-delete', "false") etsec.append(etdisk) etroot.append(etsec) etcon = ElementTree.Element('Content') etcon.set('xsi:type', "ovf:VirtualSystem_Type") etcon.set('ovf:id', "out") ete = ElementTree.Element('Name') ete.text = self.ovf_name etcon.append(ete) ete = ElementTree.Element('TemplateId') ete.text = str(self.tpl_uuid) etcon.append(ete) # spec also has 'TemplateName' ete = ElementTree.Element('Description') ete.text = self.ovf_desc etcon.append(ete) ete = ElementTree.Element('Domain') # AD domain, not in use right now # ete.text = etcon.append(ete) ete = ElementTree.Element('CreationDate') ete.text = time.strftime("%Y/%m/%d %H:%M:%S", self.create_time) etcon.append(ete) ete = ElementTree.Element('TimeZone') # ete.text = etcon.append(ete) ete = ElementTree.Element('IsAutoSuspend') ete.text = "false" etcon.append(ete) ete = ElementTree.Element('VmType') ete.text = "1" etcon.append(ete) ete = ElementTree.Element('default_display_type') # vnc = 0, gxl = 1 ete.text = "0" etcon.append(ete) ete = ElementTree.Element('default_boot_sequence') # C=0, DC=1, N=2, CDN=3, CND=4, DCN=5, DNC=6, NCD=7, # NDC=8, CD=9, D=10, CN=11, DN=12, NC=13, ND=14 # (C - HardDisk, D - CDROM, N - Network) ete.text = "1" etcon.append(ete) etsec = ElementTree.Element('Section') etsec.set('xsi:type', "ovf:OperatingSystemSection_Type") etsec.set('ovf:id', str(self.tpl_uuid)) etsec.set('ovf:required', "false") ete = ElementTree.Element('Info') ete.text = "Guest OS" etsec.append(ete) ete = ElementTree.Element('Description') # This is rigid, must be "Other", "OtherLinux", "RHEL6", or such ete.text = "OtherLinux" etsec.append(ete) etcon.append(etsec) etsec = ElementTree.Element('Section') etsec.set('xsi:type', "ovf:VirtualHardwareSection_Type") ete = ElementTree.Element('Info') ete.text = "1 CPU, 512 Memory" etsec.append(ete) etsys = ElementTree.Element('System') # This is probably wrong, needs actual type. ete = ElementTree.Element('vssd:VirtualSystemType') ete.text = "RHEVM 4.6.0.163" etsys.append(ete) etsec.append(etsys) etitem = ElementTree.Element('Item') ete = ElementTree.Element('rasd:Caption') ete.text = "1 virtual CPU" etitem.append(ete) ete = ElementTree.Element('rasd:Description') ete.text = "Number of virtual CPU" etitem.append(ete) ete = ElementTree.Element('rasd:InstanceId') ete.text = "1" etitem.append(ete) ete = ElementTree.Element('rasd:ResourceType') ete.text = "3" etitem.append(ete) ete = ElementTree.Element('rasd:num_of_sockets') ete.text = "1" etitem.append(ete) ete = ElementTree.Element('rasd:cpu_per_socket') ete.text = "1" etitem.append(ete) etsec.append(etitem) etitem = ElementTree.Element('Item') ete = ElementTree.Element('rasd:Caption') ete.text = "512 MB of memory" etitem.append(ete) ete = ElementTree.Element('rasd:Description') ete.text = "Memory Size" etitem.append(ete) ete = ElementTree.Element('rasd:InstanceId') ete.text = "2" etitem.append(ete) ete = ElementTree.Element('rasd:ResourceType') ete.text = "4" etitem.append(ete) ete = ElementTree.Element('rasd:AllocationUnits') ete.text = "MegaBytes" etitem.append(ete) ete = ElementTree.Element('rasd:VirtualQuantity') ete.text = "512" etitem.append(ete) etsec.append(etitem) etitem = ElementTree.Element('Item') ete = ElementTree.Element('rasd:Caption') ete.text = "Drive 1" etitem.append(ete) ete = ElementTree.Element('rasd:InstanceId') ete.text = str(self.vol_uuid) etitem.append(ete) ete = ElementTree.Element('rasd:ResourceType') ete.text = "17" etitem.append(ete) ete = ElementTree.Element('rasd:HostResource') ete.text = str(self.img_uuid) + '/' + str(self.vol_uuid) etitem.append(ete) ete = ElementTree.Element('rasd:Parent') ete.text = "00000000-0000-0000-0000-000000000000" etitem.append(ete) ete = ElementTree.Element('rasd:Template') ete.text = "00000000-0000-0000-0000-000000000000" etitem.append(ete) ete = ElementTree.Element('rasd:ApplicationList') # List of installed applications, separated by comma etitem.append(ete) # This corresponds to ID of volgroup in host where snapshot was taken. # Obviously we have nothing like it. ete = ElementTree.Element('rasd:StorageId') # "Storage Domain Id" ete.text = "00000000-0000-0000-0000-000000000000" etitem.append(ete) ete = ElementTree.Element('rasd:StoragePoolId') ete.text = self.pool_id etitem.append(ete) ete = ElementTree.Element('rasd:CreationDate') ete.text = time.strftime("%Y/%m/%d %H:%M:%S", self.create_time) etitem.append(ete) ete = ElementTree.Element('rasd:LastModified') ete.text = time.strftime("%Y/%m/%d %H:%M:%S", self.create_time) etitem.append(ete) etsec.append(etitem) etitem = ElementTree.Element('Item') ete = ElementTree.Element('rasd:Caption') ete.text = "Ethernet 0 rhevm" etitem.append(ete) ete = ElementTree.Element('rasd:InstanceId') ete.text = "3" etitem.append(ete) ete = ElementTree.Element('rasd:ResourceType') ete.text = "10" etitem.append(ete) ete = ElementTree.Element('rasd:ResourceSubType') # e1000 = 2, pv = 3 ete.text = "3" etitem.append(ete) ete = ElementTree.Element('rasd:Connection') ete.text = "rhevm" etitem.append(ete) ete = ElementTree.Element('rasd:Name') ete.text = "eth0" etitem.append(ete) # also allowed is "MACAddress" ete = ElementTree.Element('rasd:speed') ete.text = "1000" etitem.append(ete) etsec.append(etitem) etitem = ElementTree.Element('Item') ete = ElementTree.Element('rasd:Caption') ete.text = "Graphics" etitem.append(ete) ete = ElementTree.Element('rasd:InstanceId') # doc says "6", reality is "5" ete.text = "5" etitem.append(ete) ete = ElementTree.Element('rasd:ResourceType') ete.text = "20" etitem.append(ete) ete = ElementTree.Element('rasd:VirtualQuantity') ete.text = "1" etitem.append(ete) etsec.append(etitem) etcon.append(etsec) etroot.append(etcon) et = ElementTree.ElementTree(etroot) return et
def run(**kwargs): """Calls all the functions needed to upload new template to RHEVM. This is called either by template_upload_all script, or by main function. Args: **kwargs: Kwargs generated from cfme_data['template_upload']['template_upload_rhevm']. """ ovaname = get_ova_name(kwargs.get('image_url')) mgmt_sys = cfme_data['management_systems'][kwargs.get('provider')] rhevurl = mgmt_sys['hostname'] rhevm_credentials = mgmt_sys['credentials'] username = credentials[rhevm_credentials]['username'] password = credentials[rhevm_credentials]['password'] ssh_rhevm_creds = mgmt_sys['hosts'][0]['credentials'] sshname = credentials[ssh_rhevm_creds]['username'] sshpass = credentials[ssh_rhevm_creds]['password'] rhevip = mgmt_sys['ipaddress'] apiurl = 'https://%s:443/api' % rhevurl ssh_client = make_ssh_client(rhevip, sshname, sshpass) api = API(url=apiurl, username=username, password=password, insecure=True, persistent_auth=False) template_name = kwargs.get('template_name', None) if template_name is None: template_name = cfme_data['basic_info']['appliance_template'] path, edomain_ip = get_edomain_path(api, kwargs.get('edomain')) kwargs = update_params_api(api, **kwargs) check_kwargs(**kwargs) if api.templates.get(template_name) is not None: print("RHEVM: Found finished template with this name.") print("RHEVM: The script will now end.") else: print("RHEVM: Downloading .ova file...") download_ova(ssh_client, kwargs.get('image_url')) try: print("RHEVM: Templatizing .ova file...") template_from_ova(api, username, password, rhevip, kwargs.get('edomain'), ovaname, ssh_client) print("RHEVM: Importing new template...") import_template(api, kwargs.get('edomain'), kwargs.get('sdomain'), kwargs.get('cluster')) print("RHEVM: Making a temporary VM from new template...") make_vm_from_template(api, kwargs.get('cluster')) print("RHEVM: Adding disk to created VM...") add_disk_to_vm(api, kwargs.get('sdomain'), kwargs.get('disk_size'), kwargs.get('disk_format'), kwargs.get('disk_interface')) print("RHEVM: Templatizing VM...") templatize_vm(api, template_name, kwargs.get('cluster')) finally: change_edomain_state(api, 'maintenance', kwargs.get('edomain')) cleanup(api, kwargs.get('edomain'), ssh_client, ovaname) cleanup_empty_dir_on_edomain(path, edomain_ip, sshname, sshpass) change_edomain_state(api, 'active', kwargs.get('edomain')) ssh_client.close() api.disconnect() print("RHEVM: Done.")
class RHEVMSystem(MgmtSystemAPIBase): """ Client to RHEVM API This class piggy backs off ovirtsdk. Benefits of ovirtsdk: - Don't need intimite knowledge w/ RHEVM api itself. Detriments of ovirtsdk: - Response to most quaries are returned as an object rather than a string. This makes it harder to do simple stuff like getting the status of a vm. - Because of this, it makes listing VMs based on **kwargs impossible since ovirtsdk relies on re class to find matches. E.G. List out VM with this name (positive case) Ideal: self.api.vms.list(name='test_vm') Underneath the hood: - ovirtsdk fetches list of all vms [ovirtsdk.infrastructure.brokers.VM object, ...] - ovirtsdk then tries to filter the result using re. - tries to look for 'name' attr in ovirtsdk.infrastructure.brokers.VM object - found name attribute, in this case, the type of the value of the attribute is string. - match() succeed in comparing the value to 'test_vm' E.G. List out VM with that's powered on (negative case) Ideal: self.api.vms.list(status='up') Underneath the hood: - '^same step as above except^' - found status attribute, in this case, the type of the value of the attribute is ovirtsdk.xml.params.Status - match() failed because class is compared to string 'up' This problem should be attributed to how RHEVM api was designed rather than how ovirtsdk handles RHEVM api responses. - Obj. are not updated after action calls. - E.G. vm = api.vms.get(name='test_vm') vm.status.get_state() # returns 'down' vm.start() # wait a few mins vm.status.get_state() # returns 'down'; wtf? vm = api.vms.get(name='test_vm') vm.status.get_state() # returns 'up' """ def __init__(self, hostname, username, password, **kwargs): # generate URL from hostname if 'port' in kwargs: url = 'https://%s:%s/api' % (hostname, kwargs['port']) else: url = 'https://%s/api' % hostname self.api = API(url=url, username=username, password=password, insecure=True) def _get_vm(self, vm_name=None): if vm_name is None: raise Exception('Could not find a VM named %s.' % vm_name) else: vm = self.api.vms.get(name=vm_name) if vm is None: raise Exception('Could not find a VM named %s.' % vm_name) return vm def start_vm(self, vm_name=None): vm = self._get_vm(vm_name) if vm.status.get_state() == 'up': return True else: ack = vm.start() if ack.get_status().get_state() == 'complete': return True return False def stop_vm(self, vm_name): vm = self._get_vm(vm_name) if vm.status.get_state() == 'down': return True else: ack = vm.stop() if ack.get_status().get_state() == 'complete': return True return False def delete_vm(self, vm_name): vm = self._get_vm(vm_name) if vm.status.get_state() == 'up': self.stop_vm(vm_name) ack = vm.delete() if ack.get_status().get_state() == '': return True else: return False def create_vm(self, vm_name): raise NotImplementedError( 'This function has not yet been implemented.') def restart_vm(self, vm_name): if not self.stop_vm(vm_name): return False else: return self.start_vm(vm_name) def list_vm(self, **kwargs): # list vm based on kwargs can be buggy # i.e. you can't return a list of powered on vm # but you can return a vm w/ a matched name vm_list = self.api.vms.list(**kwargs) return [vm.name for vm in vm_list] def info(self): # and we got nothing! pass def disconnect(self): self.api.disconnect() def vm_status(self, vm_name=None): state = self._get_vm(vm_name).get_status().get_state() print "vm " + vm_name + " status is " + state return state def is_vm_running(self, vm_name): state = self.vm_status(vm_name) return "up" == state def is_vm_stopped(self, vm_name): state = self.vm_status(vm_name) return "down" == state def is_vm_suspended(self, vm_name): state = self.vm_status(vm_name) return "suspended" == state def suspend_vm(self, vm_name): vm = self._get_vm(vm_name) if vm.status.get_state() == 'down': raise Exception('Could not suspend %s because it\'s not running.' % vm_name) else: ack = vm.suspend() return ack.get_status().get_state() == 'complete' def clone_vm(self, source_name, vm_name): raise NotImplementedError( 'This function has not yet been implemented.')
class RHEVMHelper(object): api_connections_lock = BoundedSemaphore() def __init__(self, url, username, password): self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) # The SDK allows only a single active connection object to be created, regardless of whether # or not multiple RHEVM servers are being accessed. For now we need to have a global lock, # create a connection object before each batch of API interactions and then disconnect it. self.api_details = { 'url':url, 'username':username, 'password':password } # TODO: When this limitation in the ovirt SDK is removed, get rid of these def _init_api(self): self.log.debug("Doing blocking acquire() on global RHEVM API connection lock") self.api_connections_lock.acquire() self.log.debug("Got global RHEVM API connection lock") url = self.api_details['url'] username = self.api_details['username'] password = self.api_details['password'] self.api = API(url=url, username=username, password=password, insecure=True) def _disconnect_api(self): try: self.log.debug("Attempting API disconnect") if hasattr(self, 'api') and self.api is not None: self.api.disconnect() else: self.log.debug("API connection was not initialized. Will not attempt to disconnect.") finally: # Must always do this self.log.debug("Releasing global RHEVM API connection lock") self.api_connections_lock.release() # These are the only two genuinley public methods # What we create is a VM template def import_template(self, image_filename, nfs_host, nfs_path, nfs_dir, cluster, ovf_name = None, ovf_desc = None): if not ovf_desc: self.ovf_desc = "Imported by Image Factory" else: self.ovf_desc = ovf_desc self.log.debug("Preparing for RHEVM template import of image file (%s)" % (image_filename)) # API lock protected action try: self._init_api() self.init_vm_import(image_filename, nfs_host, nfs_path, nfs_dir, cluster) finally: self._disconnect_api() self.ovf_name = ovf_name self.log.debug("Staging files") self.stage_files() self.log.debug("Moving files to final export domain location") self.move_files() self.ovf_pkg.delete() self.log.debug("Executing import") # API lock protected action try: self._init_api() self.execute_import() finally: self._disconnect_api() return str(self.ovf_pkg.tpl_uuid) def delete_template(self, template_uuid): template = self.api.templates.get(id=template_uuid) if template: template.delete() return True else: return False # Begin Nuts and Bolts # We don't want to run seteuid() in our main process as it will globally change the UID/GID for everything # OTOH, we need to be root to access our image files and temp files # We use stdin and Popen's preexec_fn via the helper functions below to deal with this def become_nfs_user(self): os.setegid(NFSGID) os.seteuid(NFSUID) def copy_as_nfs_user(self, sourcefile, destfile): self.log.debug("Copying (%s) to (%s) as nfsuser" % (sourcefile, destfile)) f = open(sourcefile,"r") (stdout, stderr, retcode) = subprocess_check_output([ 'dd', 'of=%s' % (destfile), 'bs=4k' ], stdin=f, preexec_fn=self.become_nfs_user) f.close() def copy_dir_as_nfs_user(self, sourcefile, destfile): self.log.debug("Copying directory (%s) to (%s) as nfsuser" % (sourcefile, destfile)) (stdout, stderr, retcode) = subprocess_check_output([ 'cp', '-r', '%s' % (sourcefile), '%s' % (destfile)], preexec_fn=self.become_nfs_user) def move_as_nfs_user(self, sourcefile, destfile): self.log.debug("Moving (%s) to (%s) as nfsuser" % (sourcefile, destfile)) (stdout, stderr, retcode) = subprocess_check_output([ 'mv', '%s' % (sourcefile), '%s' % (destfile)], preexec_fn=self.become_nfs_user) def mkdir_as_nfs_user(self, directory): self.log.debug("Making directory (%s) as nfsuser" % (directory)) (stdout, stderr, retcode) = subprocess_check_output([ 'mkdir', '%s' % (directory)], preexec_fn=self.become_nfs_user) def rm_rf_as_nfs_user(self, directory): self.log.debug("Recursive remove of dir (%s) as nfsuser" % (directory)) (stdout, stderr, retcode) = subprocess_check_output([ 'rm', '-rf', '%s' % (directory)], preexec_fn=self.become_nfs_user) def get_storage_domain(self, nfs_host, nfs_path): # Find the storage domain that matches the nfs details given sds = self.api.storagedomains.list() for sd in sds: if sd.get_type() == "export": self.log.debug("Export domain: (%s)" % (sd.get_name())) stor = sd.get_storage() if (stor.get_address() == nfs_host) and (stor.get_path() == nfs_path): self.log.debug("This is the right domain (%s)" % (sd.get_id())) return sd return None def get_pool_id(self, sd_uuid): # Get datacenter for a given storage domain UUID # This is the UUID that becomes the "StoragePoolID" in our OVF XML # TODO: The storagedomain object has a get_data_center() method that doesn't seem to work # Find out why dcs = self.api.datacenters.list() for dc in dcs: self.log.debug("Looking for our storage domain (%s) in data center (%s)" % (sd_uuid, dc.get_id())) sd = dc.storagedomains.get(id=sd_uuid) if sd: self.log.debug("This is the right datacenter (%s)" % (dc.get_id())) return dc return None def get_cluster_by_dc(self, poolid): # If we have been passed "_any_" as the cluster name, we pick the first cluster that # matches our datacenter/pool ID clusters = self.api.clusters.list() for cluster in clusters: dc_id = None if cluster.get_data_center(): dc_id = cluster.get_data_center().get_id() self.log.debug("Checking cluster (%s) with name (%s) with data center (%s)" % (cluster.get_id(), cluster.get_name(), dc_id)) if dc_id == poolid: return cluster self.log.debug("Cannot find cluster for dc (%s)" % (poolid)) return None def get_cluster_by_name(self, name): # If we have been passed a specific cluster name, we need to find that specific cluster clusters = self.api.clusters.list() for cluster in clusters: self.log.debug("Checking cluster (%s) with name (%s)" % (cluster.get_id(), cluster.get_name())) if cluster.get_name() == name: return cluster self.log.debug("Cannot find cluster named (%s)" % (name)) return None def init_vm_import(self, image_filename, nfs_host, nfs_path, nfs_dir, cluster): # Prepare for the import of a VM self.image_filename = image_filename self.nfs_host = nfs_host self.nfs_path = nfs_path self.nfs_dir = nfs_dir # Sets some values used when creating XML and meta files self.storage_domain_object = self.get_storage_domain(nfs_host, nfs_path) if self.storage_domain_object: self.storage_domain = self.storage_domain_object.get_id() else: raise Exception("Cannot find storage domain matching NFS details given") self.dc_object = self.get_pool_id(self.storage_domain) if self.dc_object: # Our StoragePoolID is the UUID of the DC containing our storage domain self.pool_id=self.dc_object.get_id() else: raise Exception("Cannot find datacenter for our storage domain") if cluster == '_any_': self.cluster_object = self.get_cluster_by_dc(self.pool_id) else: self.cluster_object = self.get_cluster_by_name(cluster) if self.cluster_object: self.cluster = self.cluster_object.get_id() else: raise Exception("Cannot find cluster (%s)" % (cluster)) def stage_files(self): # Called after init to copy files to staging location # This is the base dir of the export domain self.export_domain_dir = self.nfs_dir + "/" + self.storage_domain if not os.path.isdir(self.export_domain_dir): raise Exception("Cannot find expected export domain directory (%s) at local mount point (%s)" % (self.nfs_dir, self.storage_domain)) self.ovf_pkg = RHEVOVFPackage(disk=self.image_filename, ovf_name=self.ovf_name, ovf_desc=self.ovf_desc) self.ovf_pkg.sync() def move_files(self): self.final_image_dir = "%s/images/%s" % (self.export_domain_dir, str(self.ovf_pkg.img_uuid)) self.final_ovf_dir = "%s/master/vms/%s" % (self.export_domain_dir, str(self.ovf_pkg.tpl_uuid)) self.copy_dir_as_nfs_user(self.ovf_pkg.image_dir, self.final_image_dir) self.copy_dir_as_nfs_user(self.ovf_pkg.ovf_dir, self.final_ovf_dir) def remove_export_template(self): self.rm_rf_as_nfs_user(self.final_image_dir) self.rm_rf_as_nfs_user(self.final_ovf_dir) def execute_import(self): # We import to the master storage domain of the datacenter of which our export domain is a member # Got it? action = params.Action() sds = self.dc_object.storagedomains.list() for sd in sds: if sd.get_master(): action.storage_domain=sd if not action.storage_domain: raise Exception("Could not find master storage domain for datacenter ID (%s)" % (self.dc_object.get_id())) action.cluster = self.cluster_object # At this point our freshly copied in files are discoverable via the tpl_uuid in our export domain template = self.storage_domain_object.templates.get(id=str(self.ovf_pkg.tpl_uuid)) if template: template.import_template(action=action) real_template = self.api.templates.get(id=str(self.ovf_pkg.tpl_uuid)) # Wait 5 minutes for an import to finish self.log.debug("Waiting for template import to complete") for i in range(30): self.log.debug("Waited %d - state (%s)" % (i*10, real_template.get_status().get_state())) if real_template.get_status().get_state() != 'locked': break real_template = real_template.update() sleep(10) self.log.debug("Deleting export domain files") self.remove_export_template() final_state = real_template.get_status().get_state() if final_state == 'ok': self.log.debug("Template import completed successfully") return elif final_state == 'locked': raise Exception("Timed out waiting for template import to finish") else: raise Exception("Template import ended in unknown state (%s)" % (final_state))
class RHEVMHelper(object): api_connections_lock = BoundedSemaphore() def __init__(self, url, username, password): self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) # The SDK allows only a single active connection object to be created, regardless of whether # or not multiple RHEVM servers are being accessed. For now we need to have a global lock, # create a connection object before each batch of API interactions and then disconnect it. self.api_details = { 'url':url, 'username':username, 'password':password } # TODO: When this limitation in the ovirt SDK is removed, get rid of these def _init_api(self): self.log.debug("Doing blocking acquire() on global RHEVM API connection lock") self.api_connections_lock.acquire() self.log.debug("Got global RHEVM API connection lock") url = self.api_details['url'] username = self.api_details['username'] password = self.api_details['password'] self.api = API(url=url, username=username, password=password) def _disconnect_api(self): self.api.disconnect() self.log.debug("Releasing global RHEVM API connection lock") self.api_connections_lock.release() # These are the only two genuinley public methods # What we create is a VM template def import_template(self, image_filename, nfs_host, nfs_path, nfs_dir, cluster, ovf_name = None, ovf_desc = None): if not ovf_desc: self.ovf_desc = "Imported by Image Factory" else: self.ovf_desc = ovf_desc self.log.debug("Preparing for RHEVM template import of image file (%s)" % (image_filename)) # API lock protected action try: self._init_api() self.init_vm_import(image_filename, nfs_host, nfs_path, nfs_dir, cluster) finally: self._disconnect_api() if not ovf_name: self.ovf_name=str(self.tpl_uuid) else: self.ovf_name = ovf_name self.log.debug("Staging files") self.stage_files() self.log.debug("Moving files to final export domain location") self.move_files() self.log.debug("Executing import") # API lock protected action try: self._init_api() self.execute_import() finally: self._disconnect_api() return str(self.tpl_uuid) def delete_template(self, template_uuid): template = self.api.templates.get(id=template_uuid) if template: template.delete() return True else: return False # Begin Nuts and Bolts # We don't want to run seteuid() in our main process as it will globally change the UID/GID for everything # OTOH, we need to be root to access our image files and temp files # We use stdin and Popen's preexec_fn via the helper functions below to deal with this def become_nfs_user(self): os.setegid(NFSGID) os.seteuid(NFSUID) def copy_as_nfs_user(self, sourcefile, destfile): self.log.debug("Copying (%s) to (%s) as nfsuser" % (sourcefile, destfile)) f = open(sourcefile,"r") (stdout, stderr, retcode) = subprocess_check_output([ 'dd', 'of=%s' % (destfile), 'bs=4k' ], stdin=f, preexec_fn=self.become_nfs_user) f.close() def move_as_nfs_user(self, sourcefile, destfile): self.log.debug("Moving (%s) to (%s) as nfsuser" % (sourcefile, destfile)) (stdout, stderr, retcode) = subprocess_check_output([ 'mv', '%s' % (sourcefile), '%s' % (destfile)], preexec_fn=self.become_nfs_user) def mkdir_as_nfs_user(self, directory): self.log.debug("Making directory (%s) as nfsuser" % (directory)) (stdout, stderr, retcode) = subprocess_check_output([ 'mkdir', '%s' % (directory)], preexec_fn=self.become_nfs_user) def rm_rf_as_nfs_user(self, directory): self.log.debug("Recursive remove of dir (%s) as nfsuser" % (directory)) (stdout, stderr, retcode) = subprocess_check_output([ 'rm', '-rf', '%s' % (directory)], preexec_fn=self.become_nfs_user) def get_storage_domain(self, nfs_host, nfs_path): # Find the storage domain that matches the nfs details given sds = self.api.storagedomains.list() for sd in sds: if sd.get_type() == "export": self.log.debug("Export domain: (%s)" % (sd.get_name())) stor = sd.get_storage() if (stor.get_address() == nfs_host) and (stor.get_path() == nfs_path): self.log.debug("This is the right domain (%s)" % (sd.get_id())) return sd return None def get_pool_id(self, sd_uuid): # Get datacenter for a given storage domain UUID # This is the UUID that becomes the "StoragePoolID" in our OVF XML # TODO: The storagedomain object has a get_data_center() method that doesn't seem to work # Find out why dcs = self.api.datacenters.list() for dc in dcs: self.log.debug("Looking for our storage domain (%s) in data center (%s)" % (sd_uuid, dc.get_id())) sd = dc.storagedomains.get(id=sd_uuid) if sd: self.log.debug("This is the right datacenter (%s)" % (dc.get_id())) return dc return None def get_cluster_by_dc(self, poolid): # If we have been passed "_any_" as the cluster name, we pick the first cluster that # matches our datacenter/pool ID clusters = self.api.clusters.list() for cluster in clusters: dc_id = None if cluster.get_data_center(): dc_id = cluster.get_data_center().get_id() self.log.debug("Checking cluster (%s) with name (%s) with data center (%s)" % (cluster.get_id(), cluster.get_name(), dc_id)) if dc_id == poolid: return cluster self.log.debug("Cannot find cluster for dc (%s)" % (poolid)) return None def get_cluster_by_name(self, name): # If we have been passed a specific cluster name, we need to find that specific cluster clusters = self.api.clusters.list() for cluster in clusters: self.log.debug("Checking cluster (%s) with name (%s)" % (cluster.get_id(), cluster.get_name())) if cluster.get_name() == name: return cluster self.log.debug("Cannot find cluster named (%s)" % (name)) return None def check_qcow_size(self, filename): # Detect if an image is in qcow format # If it is, return the size of the underlying disk image # If it isn't, return none # For interested parties, this is the QCOW header struct in C # struct qcow_header { # uint32_t magic; # uint32_t version; # uint64_t backing_file_offset; # uint32_t backing_file_size; # uint32_t cluster_bits; # uint64_t size; /* in bytes */ # uint32_t crypt_method; # uint32_t l1_size; # uint64_t l1_table_offset; # uint64_t refcount_table_offset; # uint32_t refcount_table_clusters; # uint32_t nb_snapshots; # uint64_t snapshots_offset; # }; # And in Python struct format string-ese qcow_struct=">IIQIIQIIQQIIQ" # > means big-endian qcow_magic = 0x514649FB # 'Q' 'F' 'I' 0xFB f = open(filename,"r") pack = f.read(struct.calcsize(qcow_struct)) f.close() unpack = struct.unpack(qcow_struct, pack) if unpack[0] == qcow_magic: return unpack[5] else: return None def init_vm_import(self, image_filename, nfs_host, nfs_path, nfs_dir, cluster): # Prepare for the import of a VM self.image_filename = image_filename self.nfs_host = nfs_host self.nfs_path = nfs_path self.nfs_dir = nfs_dir # Sets some values used when creating XML and meta files self.storage_domain_object = self.get_storage_domain(nfs_host, nfs_path) if self.storage_domain_object: self.storage_domain = self.storage_domain_object.get_id() else: raise Exception("Cannot find storage domain matching NFS details given") self.dc_object = self.get_pool_id(self.storage_domain) if self.dc_object: # Our StoragePoolID is the UUID of the DC containing our storage domain self.pool_id=self.dc_object.get_id() else: raise Exception("Cannot find datacenter for our storage domain") if cluster == '_any_': self.cluster_object = self.get_cluster_by_dc(self.pool_id) else: self.cluster_object = self.get_cluster_by_name(cluster) if self.cluster_object: self.cluster = self.cluster_object.get_id() else: raise Exception("Cannot find cluster (%s)" % (cluster)) # Volume size is the qcow_size if the image is qcow # or the raw disk image size if not self.qcow_size = self.check_qcow_size(image_filename) if self.qcow_size: self.vol_size=self.qcow_size else: statb = os.stat(image_filename) self.vol_size = statb[stat.ST_SIZE] # We need these three unique identifiers when generating XML and the meta file self.img_uuid = uuid.uuid4() self.vol_uuid = uuid.uuid4() self.tpl_uuid = uuid.uuid4() # Set this once to use in both the OVF XML and the meta file self.raw_create_time = time.time() self.create_time = time.gmtime(self.raw_create_time) def stage_files(self): # Called after init to copy files to staging location # This is the base dir of the export domain self.export_domain_dir = self.nfs_dir + "/" + self.storage_domain if not os.path.isdir(self.export_domain_dir): raise Exception("Cannot find expected export domain directory (%s) at local mount point (%s)" % (self.nfs_dir, self.storage_domain)) # Make distinct tempdir for OVF stuff self.ovftmpdir=self.export_domain_dir + "/" + "imgfac." + str(self.tpl_uuid) self.mkdir_as_nfs_user(self.ovftmpdir) # Add the OVF file self.ovfdest = self.ovftmpdir + "/" + str(self.tpl_uuid) + ".ovf" ovf_file_object = NamedTemporaryFile() et = self.generate_ovf_xml() et.write(ovf_file_object) ovf_file_object.flush() self.copy_as_nfs_user(ovf_file_object.name, self.ovfdest) ovf_file_object.close() # Make our own temporary subdir for the image file itself self.imgtmpdir=self.export_domain_dir + "/" + "imgfac." + str(self.img_uuid) self.mkdir_as_nfs_user(self.imgtmpdir) # Add the meta file for the image self.imgdest = self.imgtmpdir + "/" + str(self.vol_uuid) self.imgmetadest = self.imgdest + ".meta" meta_file_object = NamedTemporaryFile() meta_file_object.write(self.generate_meta_file()) meta_file_object.flush() self.copy_as_nfs_user(meta_file_object.name, self.imgmetadest) meta_file_object.close() # Copy the big image file last self.copy_as_nfs_user(self.image_filename, self.imgdest) def move_files(self): self.final_image_dir = "%s/images/%s" % (self.export_domain_dir, str(self.img_uuid)) self.final_ovf_dir = "%s/master/vms/%s" % (self.export_domain_dir, str(self.tpl_uuid)) self.move_as_nfs_user(self.imgtmpdir, self.final_image_dir) self.move_as_nfs_user(self.ovftmpdir, self.final_ovf_dir) def remove_export_template(self): self.rm_rf_as_nfs_user(self.final_image_dir) self.rm_rf_as_nfs_user(self.final_ovf_dir) def execute_import(self): # We import to the master storage domain of the datacenter of which our export domain is a member # Got it? action = params.Action() sds = self.dc_object.storagedomains.list() for sd in sds: if sd.get_master(): action.storage_domain=sd if not action.storage_domain: raise Exception("Could not find master storage domain for datacenter ID (%s)" % (self.dc_object.get_id())) action.cluster = self.cluster_object # At this point our freshly copied in files are discoverable via the tpl_uuid in our export domain template = self.storage_domain_object.templates.get(id=str(self.tpl_uuid)) if template: template.import_template(action=action) real_template = self.api.templates.get(id=str(self.tpl_uuid)) # Wait 5 minutes for an import to finish self.log.debug("Waiting for template import to complete") for i in range(30): self.log.debug("Waited %d - state (%s)" % (i*10, real_template.get_status().get_state())) if real_template.get_status().get_state() != 'locked': break real_template = real_template.update() sleep(10) self.log.debug("Deleting export domain files") self.remove_export_template() final_state = real_template.get_status().get_state() if final_state == 'ok': self.log.debug("Template import completed successfully") return elif final_state == 'locked': raise Exception("Timed out waiting for template import to finish") else: raise Exception("Template import ended in unknown state (%s)" % (final_state)) def generate_meta_file(self): metafile="" metafile += "DOMAIN=" + self.storage_domain + "\n" # saved template has VOLTYPE=SHARED metafile += "VOLTYPE=LEAF\n" metafile += "CTIME=" + str(int(self.raw_create_time)) + "\n" # saved template has FORMAT=COW if self.qcow_size: metafile += "FORMAT=COW\n" else: metafile += "FORMAT=RAW\n" metafile += "IMAGE=" + str(self.img_uuid) + "\n" metafile += "DISKTYPE=1\n" metafile += "PUUID=00000000-0000-0000-0000-000000000000\n" metafile += "LEGALITY=LEGAL\n" metafile += "MTIME=" + str(int(self.raw_create_time)) + "\n" metafile += "POOL_UUID=" + self.pool_id + "\n" # assuming 1KB alignment metafile += "SIZE=" + str(self.vol_size/512) + "\n" metafile += "TYPE=SPARSE\n" metafile += "DESCRIPTION=Uploaded by Image Factory\n" metafile += "EOF\n" return metafile def generate_ovf_xml(self): etroot = ElementTree.Element('ovf:Envelope') etroot.set('xmlns:ovf', "http://schemas.dmtf.org/ovf/envelope/1/") etroot.set('xmlns:rasd', "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData") etroot.set('xmlns:vssd', "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData") etroot.set('xmlns:xsi', "http://www.w3.org/2001/XMLSchema-instance") etroot.set('ovf:version', "0.9") etref = ElementTree.Element('References') etfile = ElementTree.Element('File') etfile.set('ovf:href', str(self.img_uuid)+'/'+str(self.vol_uuid)) etfile.set('ovf:id', str(self.vol_uuid)) etfile.set('ovf:size', str(self.vol_size)) # TODO: Bulk this up a bit etfile.set('ovf:description', self.ovf_name) etref.append(etfile) etroot.append(etref) etsec = ElementTree.Element('Section') etsec.set('xsi:type', "ovf:NetworkSection_Type") ete = ElementTree.Element('Info') ete.text = "List of Networks" etsec.append(ete) # dummy section, even though we have Ethernet defined below etroot.append(etsec) etsec = ElementTree.Element('Section') etsec.set('xsi:type', "ovf:DiskSection_Type") etdisk = ElementTree.Element('Disk') etdisk.set('ovf:diskId', str(self.vol_uuid)) vol_size_str = str((self.vol_size + (1024*1024*1024) - 1) / (1024*1024*1024)) etdisk.set('ovf:size', vol_size_str) etdisk.set('ovf:vm_snapshot_id', '00000000-0000-0000-0000-000000000000') etdisk.set('ovf:actual_size', vol_size_str) etdisk.set('ovf:format', 'http://www.vmware.com/specifications/vmdk.html#sparse') etdisk.set('ovf:parentRef', '') # XXX ovf:vm_snapshot_id etdisk.set('ovf:fileRef', str(self.img_uuid)+'/'+str(self.vol_uuid)) # XXX ovf:format ("usually url to the specification") if self.qcow_size: etdisk.set('ovf:volume-type', "Sparse") etdisk.set('ovf:volume-format', "COW") else: etdisk.set('ovf:volume-type', "Preallocated") etdisk.set('ovf:volume-format', "RAW") etdisk.set('ovf:disk-interface', "VirtIO") etdisk.set('ovf:disk-type', "System") etdisk.set('ovf:boot', "true") etdisk.set('ovf:wipe-after-delete', "false") etsec.append(etdisk) etroot.append(etsec) etcon = ElementTree.Element('Content') etcon.set('xsi:type', "ovf:VirtualSystem_Type") etcon.set('ovf:id', "out") ete = ElementTree.Element('Name') ete.text = self.ovf_name etcon.append(ete) ete = ElementTree.Element('TemplateId') ete.text = str(self.tpl_uuid) etcon.append(ete) # spec also has 'TemplateName' ete = ElementTree.Element('Description') ete.text = self.ovf_desc etcon.append(ete) ete = ElementTree.Element('Domain') # AD domain, not in use right now # ete.text = etcon.append(ete) ete = ElementTree.Element('CreationDate') ete.text = time.strftime("%Y/%m/%d %H:%M:%S", self.create_time) etcon.append(ete) ete = ElementTree.Element('TimeZone') # ete.text = etcon.append(ete) ete = ElementTree.Element('IsAutoSuspend') ete.text = "false" etcon.append(ete) ete = ElementTree.Element('VmType') ete.text = "1" etcon.append(ete) ete = ElementTree.Element('default_display_type') # vnc = 0, gxl = 1 ete.text = "0" etcon.append(ete) ete = ElementTree.Element('default_boot_sequence') # C=0, DC=1, N=2, CDN=3, CND=4, DCN=5, DNC=6, NCD=7, # NDC=8, CD=9, D=10, CN=11, DN=12, NC=13, ND=14 # (C - HardDisk, D - CDROM, N - Network) ete.text = "1" etcon.append(ete) etsec = ElementTree.Element('Section') etsec.set('xsi:type', "ovf:OperatingSystemSection_Type") etsec.set('ovf:id', str(self.tpl_uuid)) etsec.set('ovf:required', "false") ete = ElementTree.Element('Info') ete.text = "Guest OS" etsec.append(ete) ete = ElementTree.Element('Description') # This is rigid, must be "Other", "OtherLinux", "RHEL6", or such ete.text = "OtherLinux" etsec.append(ete) etcon.append(etsec) etsec = ElementTree.Element('Section') etsec.set('xsi:type', "ovf:VirtualHardwareSection_Type") ete = ElementTree.Element('Info') ete.text = "1 CPU, 512 Memory" etsec.append(ete) etsys = ElementTree.Element('System') # This is probably wrong, needs actual type. ete = ElementTree.Element('vssd:VirtualSystemType') ete.text = "RHEVM 4.6.0.163" etsys.append(ete) etsec.append(etsys) etitem = ElementTree.Element('Item') ete = ElementTree.Element('rasd:Caption') ete.text = "1 virtual CPU" etitem.append(ete) ete = ElementTree.Element('rasd:Description') ete.text = "Number of virtual CPU" etitem.append(ete) ete = ElementTree.Element('rasd:InstanceId') ete.text = "1" etitem.append(ete) ete = ElementTree.Element('rasd:ResourceType') ete.text = "3" etitem.append(ete) ete = ElementTree.Element('rasd:num_of_sockets') ete.text = "1" etitem.append(ete) ete = ElementTree.Element('rasd:cpu_per_socket') ete.text = "1" etitem.append(ete) etsec.append(etitem) etitem = ElementTree.Element('Item') ete = ElementTree.Element('rasd:Caption') ete.text = "512 MB of memory" etitem.append(ete) ete = ElementTree.Element('rasd:Description') ete.text = "Memory Size" etitem.append(ete) ete = ElementTree.Element('rasd:InstanceId') ete.text = "2" etitem.append(ete) ete = ElementTree.Element('rasd:ResourceType') ete.text = "4" etitem.append(ete) ete = ElementTree.Element('rasd:AllocationUnits') ete.text = "MegaBytes" etitem.append(ete) ete = ElementTree.Element('rasd:VirtualQuantity') ete.text = "512" etitem.append(ete) etsec.append(etitem) etitem = ElementTree.Element('Item') ete = ElementTree.Element('rasd:Caption') ete.text = "Drive 1" etitem.append(ete) ete = ElementTree.Element('rasd:InstanceId') ete.text = str(self.vol_uuid) etitem.append(ete) ete = ElementTree.Element('rasd:ResourceType') ete.text = "17" etitem.append(ete) ete = ElementTree.Element('rasd:HostResource') ete.text = str(self.img_uuid)+'/'+str(self.vol_uuid) etitem.append(ete) ete = ElementTree.Element('rasd:Parent') ete.text = "00000000-0000-0000-0000-000000000000" etitem.append(ete) ete = ElementTree.Element('rasd:Template') ete.text = "00000000-0000-0000-0000-000000000000" etitem.append(ete) ete = ElementTree.Element('rasd:ApplicationList') # List of installed applications, separated by comma etitem.append(ete) # This corresponds to ID of volgroup in host where snapshot was taken. # Obviously we have nothing like it. ete = ElementTree.Element('rasd:StorageId') # "Storage Domain Id" ete.text = "00000000-0000-0000-0000-000000000000" etitem.append(ete) ete = ElementTree.Element('rasd:StoragePoolId') ete.text = self.pool_id etitem.append(ete) ete = ElementTree.Element('rasd:CreationDate') ete.text = time.strftime("%Y/%m/%d %H:%M:%S", self.create_time) etitem.append(ete) ete = ElementTree.Element('rasd:LastModified') ete.text = time.strftime("%Y/%m/%d %H:%M:%S", self.create_time) etitem.append(ete) etsec.append(etitem) etitem = ElementTree.Element('Item') ete = ElementTree.Element('rasd:Caption') ete.text = "Ethernet 0 rhevm" etitem.append(ete) ete = ElementTree.Element('rasd:InstanceId') ete.text = "3" etitem.append(ete) ete = ElementTree.Element('rasd:ResourceType') ete.text = "10" etitem.append(ete) ete = ElementTree.Element('rasd:ResourceSubType') # e1000 = 2, pv = 3 ete.text = "3" etitem.append(ete) ete = ElementTree.Element('rasd:Connection') ete.text = "rhevm" etitem.append(ete) ete = ElementTree.Element('rasd:Name') ete.text = "eth0" etitem.append(ete) # also allowed is "MACAddress" ete = ElementTree.Element('rasd:speed') ete.text = "1000" etitem.append(ete) etsec.append(etitem) etitem = ElementTree.Element('Item') ete = ElementTree.Element('rasd:Caption') ete.text = "Graphics" etitem.append(ete) ete = ElementTree.Element('rasd:InstanceId') # doc says "6", reality is "5" ete.text = "5" etitem.append(ete) ete = ElementTree.Element('rasd:ResourceType') ete.text = "20" etitem.append(ete) ete = ElementTree.Element('rasd:VirtualQuantity') ete.text = "1" etitem.append(ete) etsec.append(etitem) etcon.append(etsec) etroot.append(etcon) et = ElementTree.ElementTree(etroot) return et
class RHEVMHelper(object): api_connections_lock = BoundedSemaphore() def __init__(self, url, username, password): self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) # The SDK allows only a single active connection object to be created, regardless of whether # or not multiple RHEVM servers are being accessed. For now we need to have a global lock, # create a connection object before each batch of API interactions and then disconnect it. self.api_details = { 'url': url, 'username': username, 'password': password } # TODO: When this limitation in the ovirt SDK is removed, get rid of these def _init_api(self): self.log.debug( "Doing blocking acquire() on global RHEVM API connection lock") self.api_connections_lock.acquire() self.log.debug("Got global RHEVM API connection lock") url = self.api_details['url'] username = self.api_details['username'] password = self.api_details['password'] self.api = API(url=url, username=username, password=password, insecure=True) def _disconnect_api(self): try: self.log.debug("Attempting API disconnect") if hasattr(self, 'api') and self.api is not None: self.api.disconnect() else: self.log.debug( "API connection was not initialized. Will not attempt to disconnect." ) finally: # Must always do this self.log.debug("Releasing global RHEVM API connection lock") self.api_connections_lock.release() # These are the only two genuinley public methods # What we create is a VM template def import_template(self, image_filename, nfs_host, nfs_path, nfs_dir, cluster, ovf_name=None, ovf_desc=None): if not ovf_desc: self.ovf_desc = "Imported by Image Factory" else: self.ovf_desc = ovf_desc self.log.debug( "Preparing for RHEVM template import of image file (%s)" % (image_filename)) # API lock protected action try: self._init_api() self.init_vm_import(image_filename, nfs_host, nfs_path, nfs_dir, cluster) finally: self._disconnect_api() self.ovf_name = ovf_name self.log.debug("Staging files") self.stage_files() self.log.debug("Moving files to final export domain location") self.move_files() self.ovf_pkg.delete() self.log.debug("Executing import") # API lock protected action try: self._init_api() self.execute_import() finally: self._disconnect_api() return str(self.ovf_pkg.tpl_uuid) def delete_template(self, template_uuid): template = self.api.templates.get(id=template_uuid) if template: template.delete() return True else: return False # Begin Nuts and Bolts # We don't want to run seteuid() in our main process as it will globally change the UID/GID for everything # OTOH, we need to be root to access our image files and temp files # We use stdin and Popen's preexec_fn via the helper functions below to deal with this def become_nfs_user(self): os.setegid(NFSGID) os.seteuid(NFSUID) def copy_as_nfs_user(self, sourcefile, destfile): self.log.debug("Copying (%s) to (%s) as nfsuser" % (sourcefile, destfile)) f = open(sourcefile, "r") (stdout, stderr, retcode) = subprocess_check_output( ['dd', 'of=%s' % (destfile), 'bs=4k'], stdin=f, preexec_fn=self.become_nfs_user) f.close() def copy_dir_as_nfs_user(self, sourcefile, destfile): self.log.debug("Copying directory (%s) to (%s) as nfsuser" % (sourcefile, destfile)) (stdout, stderr, retcode) = subprocess_check_output( ['cp', '-r', '%s' % (sourcefile), '%s' % (destfile)], preexec_fn=self.become_nfs_user) def move_as_nfs_user(self, sourcefile, destfile): self.log.debug("Moving (%s) to (%s) as nfsuser" % (sourcefile, destfile)) (stdout, stderr, retcode) = subprocess_check_output( ['mv', '%s' % (sourcefile), '%s' % (destfile)], preexec_fn=self.become_nfs_user) def mkdir_as_nfs_user(self, directory): self.log.debug("Making directory (%s) as nfsuser" % (directory)) (stdout, stderr, retcode) = subprocess_check_output( ['mkdir', '%s' % (directory)], preexec_fn=self.become_nfs_user) def rm_rf_as_nfs_user(self, directory): self.log.debug("Recursive remove of dir (%s) as nfsuser" % (directory)) (stdout, stderr, retcode) = subprocess_check_output( ['rm', '-rf', '%s' % (directory)], preexec_fn=self.become_nfs_user) def get_storage_domain(self, nfs_host, nfs_path): # Find the storage domain that matches the nfs details given sds = self.api.storagedomains.list() for sd in sds: if sd.get_type() == "export": self.log.debug("Export domain: (%s)" % (sd.get_name())) stor = sd.get_storage() if (stor.get_address() == nfs_host) and (stor.get_path() == nfs_path): self.log.debug("This is the right domain (%s)" % (sd.get_id())) return sd return None def get_pool_id(self, sd_uuid): # Get datacenter for a given storage domain UUID # This is the UUID that becomes the "StoragePoolID" in our OVF XML # TODO: The storagedomain object has a get_data_center() method that doesn't seem to work # Find out why dcs = self.api.datacenters.list() for dc in dcs: self.log.debug( "Looking for our storage domain (%s) in data center (%s)" % (sd_uuid, dc.get_id())) sd = dc.storagedomains.get(id=sd_uuid) if sd: self.log.debug("This is the right datacenter (%s)" % (dc.get_id())) return dc return None def get_cluster_by_dc(self, poolid): # If we have been passed "_any_" as the cluster name, we pick the first cluster that # matches our datacenter/pool ID clusters = self.api.clusters.list() for cluster in clusters: dc_id = None if cluster.get_data_center(): dc_id = cluster.get_data_center().get_id() self.log.debug( "Checking cluster (%s) with name (%s) with data center (%s)" % (cluster.get_id(), cluster.get_name(), dc_id)) if dc_id == poolid: return cluster self.log.debug("Cannot find cluster for dc (%s)" % (poolid)) return None def get_cluster_by_name(self, name): # If we have been passed a specific cluster name, we need to find that specific cluster clusters = self.api.clusters.list() for cluster in clusters: self.log.debug("Checking cluster (%s) with name (%s)" % (cluster.get_id(), cluster.get_name())) if cluster.get_name() == name: return cluster self.log.debug("Cannot find cluster named (%s)" % (name)) return None def init_vm_import(self, image_filename, nfs_host, nfs_path, nfs_dir, cluster): # Prepare for the import of a VM self.image_filename = image_filename self.nfs_host = nfs_host self.nfs_path = nfs_path self.nfs_dir = nfs_dir # Sets some values used when creating XML and meta files self.storage_domain_object = self.get_storage_domain( nfs_host, nfs_path) if self.storage_domain_object: self.storage_domain = self.storage_domain_object.get_id() else: raise Exception( "Cannot find storage domain matching NFS details given") self.dc_object = self.get_pool_id(self.storage_domain) if self.dc_object: # Our StoragePoolID is the UUID of the DC containing our storage domain self.pool_id = self.dc_object.get_id() else: raise Exception("Cannot find datacenter for our storage domain") if cluster == '_any_': self.cluster_object = self.get_cluster_by_dc(self.pool_id) else: self.cluster_object = self.get_cluster_by_name(cluster) if self.cluster_object: self.cluster = self.cluster_object.get_id() else: raise Exception("Cannot find cluster (%s)" % (cluster)) def stage_files(self): # Called after init to copy files to staging location # This is the base dir of the export domain self.export_domain_dir = self.nfs_dir + "/" + self.storage_domain if not os.path.isdir(self.export_domain_dir): raise Exception( "Cannot find expected export domain directory (%s) at local mount point (%s)" % (self.nfs_dir, self.storage_domain)) self.ovf_pkg = RHEVOVFPackage(disk=self.image_filename, ovf_name=self.ovf_name, ovf_desc=self.ovf_desc) self.ovf_pkg.sync() def move_files(self): self.final_image_dir = "%s/images/%s" % (self.export_domain_dir, str(self.ovf_pkg.img_uuid)) self.final_ovf_dir = "%s/master/vms/%s" % (self.export_domain_dir, str(self.ovf_pkg.tpl_uuid)) self.copy_dir_as_nfs_user(self.ovf_pkg.image_dir, self.final_image_dir) self.copy_dir_as_nfs_user(self.ovf_pkg.ovf_dir, self.final_ovf_dir) def remove_export_template(self): self.rm_rf_as_nfs_user(self.final_image_dir) self.rm_rf_as_nfs_user(self.final_ovf_dir) def execute_import(self): # We import to the master storage domain of the datacenter of which our export domain is a member # Got it? action = params.Action() sds = self.dc_object.storagedomains.list() for sd in sds: if sd.get_master(): action.storage_domain = sd if not action.storage_domain: raise Exception( "Could not find master storage domain for datacenter ID (%s)" % (self.dc_object.get_id())) action.cluster = self.cluster_object # At this point our freshly copied in files are discoverable via the tpl_uuid in our export domain template = self.storage_domain_object.templates.get( id=str(self.ovf_pkg.tpl_uuid)) if template: template.import_template(action=action) real_template = self.api.templates.get( id=str(self.ovf_pkg.tpl_uuid)) # Wait 5 minutes for an import to finish self.log.debug("Waiting for template import to complete") for i in range(30): self.log.debug( "Waited %d - state (%s)" % (i * 10, real_template.get_status().get_state())) if real_template.get_status().get_state() != 'locked': break real_template = real_template.update() sleep(10) self.log.debug("Deleting export domain files") self.remove_export_template() final_state = real_template.get_status().get_state() if final_state == 'ok': self.log.debug("Template import completed successfully") return elif final_state == 'locked': raise Exception( "Timed out waiting for template import to finish") else: raise Exception("Template import ended in unknown state (%s)" % (final_state))
class RHEVMSystem(MgmtSystemAPIBase): """ Client to RHEVM API This class piggy backs off ovirtsdk. Benefits of ovirtsdk: - Don't need intimite knowledge w/ RHEVM api itself. Detriments of ovirtsdk: - Response to most quaries are returned as an object rather than a string. This makes it harder to do simple stuff like getting the status of a vm. - Because of this, it makes listing VMs based on **kwargs impossible since ovirtsdk relies on re class to find matches. E.G. List out VM with this name (positive case) Ideal: self.api.vms.list(name='test_vm') Underneath the hood: - ovirtsdk fetches list of all vms [ovirtsdk.infrastructure.brokers.VM object, ...] - ovirtsdk then tries to filter the result using re. - tries to look for 'name' attr in ovirtsdk.infrastructure.brokers.VM object - found name attribute, in this case, the type of the value of the attribute is string. - match() succeed in comparing the value to 'test_vm' E.G. List out VM with that's powered on (negative case) Ideal: self.api.vms.list(status='up') Underneath the hood: - '^same step as above except^' - found status attribute, in this case, the type of the value of the attribute is ovirtsdk.xml.params.Status - match() failed because class is compared to string 'up' This problem should be attributed to how RHEVM api was designed rather than how ovirtsdk handles RHEVM api responses. - Obj. are not updated after action calls. - E.G. vm = api.vms.get(name='test_vm') vm.status.get_state() # returns 'down' vm.start() # wait a few mins vm.status.get_state() # returns 'down'; wtf? vm = api.vms.get(name='test_vm') vm.status.get_state() # returns 'up' """ _stats_available = { 'num_vm': lambda self: len(self.list_vm()), 'num_host': lambda self: len(self.list_host()), 'num_cluster': lambda self: len(self.list_cluster()), 'num_template': lambda self: len(self.list_template()), 'num_datastore': lambda self: len(self.list_datastore()), } def __init__(self, hostname, username, password, **kwargs): # generate URL from hostname if 'port' in kwargs: url = 'https://%s:%s/api' % (hostname, kwargs['port']) else: url = 'https://%s/api' % hostname self.api = API(url=url, username=username, password=password, insecure=True) def _get_vm(self, vm_name=None): if vm_name is None: raise Exception('Could not find a VM named %s.' % vm_name) else: vm = self.api.vms.get(name=vm_name) if vm is None: raise Exception('Could not find a VM named %s.' % vm_name) return vm def get_ip_address(self, vm_name): vm = self._get_vm(vm_name) return vm.get_guest_info().get_ips().get_ip()[0].get_address() def does_vm_exist(self, name): try: self._get_vm(name) return True except Exception: return False def start_vm(self, vm_name=None): vm = self._get_vm(vm_name) if vm.status.get_state() == 'up': return True else: ack = vm.start() if ack.get_status().get_state() == 'complete': return True return False def stop_vm(self, vm_name): vm = self._get_vm(vm_name) if vm.status.get_state() == 'down': return True else: ack = vm.stop() if ack.get_status().get_state() == 'complete': return True return False def delete_vm(self, vm_name): vm = self._get_vm(vm_name) if vm.status.get_state() == 'up': self.stop_vm(vm_name) ack = vm.delete() if ack == '': return True else: return False def create_vm(self, vm_name): raise NotImplementedError( 'This function has not yet been implemented.') # Heres the code but don't have a need and no time to test it to get it right # including for inclusion later # # def create_vm(self, vm_name, *args, **kwargs): # MB = 1024 * 1024 # try: # self.api.vms.add( # params.VM( # name=vm_name, # memory=kwargs['memory_in_mb'] * MB, # cluster=self.api.clusters.get(kwargs['cluster_name']), # template=self.api.templates.get('Blank'))) # print 'VM created' # self.api.vms.get(vm_name).nics.add(params.NIC(name='eth0', # network=params.Network(name='ovirtmgmt'), interface='virtio')) # print 'NIC added to VM' # self.api.vms.get(vm_name).disks.add(params.Disk( # storage_domains=params.StorageDomains( # storage_domain=[self.api.storagedomains.get(kwargs['storage_domain'])], # size=512 * MB, # status=None, # interface='virtio', # format='cow', # sparse=True, # bootable=True))) # print 'Disk added to VM' # print 'Waiting for VM to reach Down status' # while self.api.vms.get(vm_name).status.state != 'down': # time.sleep(1) # except Exception as e: # print 'Failed to create VM with disk and NIC\n%s' % str(e) def restart_vm(self, vm_name): if not self.stop_vm(vm_name): return False else: return self.start_vm(vm_name) def list_vm(self, **kwargs): # list vm based on kwargs can be buggy # i.e. you can't return a list of powered on vm # but you can return a vm w/ a matched name vm_list = self.api.vms.list(**kwargs) return [vm.name for vm in vm_list] def list_host(self, **kwargs): host_list = self.api.hosts.list(**kwargs) return [host.name for host in host_list] def list_datastore(self, **kwargs): datastore_list = self.api.storagedomains.list(**kwargs) return [ds.name for ds in datastore_list if ds.get_status() is None] def list_cluster(self, **kwargs): cluster_list = self.api.clusters.list(**kwargs) return [cluster.name for cluster in cluster_list] def list_template(self, **kwargs): ''' CFME ignores the 'Blank' template, so we do too ''' template_list = self.api.templates.list(**kwargs) return [ template.name for template in template_list if template.name != "Blank" ] def list_flavor(self): raise NotImplementedError( 'This function is not supported on this platform.') def info(self): # and we got nothing! pass def disconnect(self): self.api.disconnect() def vm_status(self, vm_name=None): state = self._get_vm(vm_name).get_status().get_state() return state def is_vm_running(self, vm_name): state = self.vm_status(vm_name) return "up" == state def is_vm_stopped(self, vm_name): state = self.vm_status(vm_name) return "down" == state def is_vm_suspended(self, vm_name): state = self.vm_status(vm_name) return "suspended" == state def suspend_vm(self, vm_name): vm = self._get_vm(vm_name) if vm.status.get_state() == 'down': raise Exception('Could not suspend %s because it\'s not running.' % vm_name) else: ack = vm.suspend() return ack.get_status().get_state() == 'complete' def clone_vm(self, source_name, vm_name): raise NotImplementedError( 'This function has not yet been implemented.') def deploy_template(self, template, *args, **kwargs): self.api.vms.add( params.VM(name=kwargs['vm_name'], cluster=self.api.clusters.get(kwargs['cluster_name']), template=self.api.templates.get(template))) while self.api.vms.get(kwargs['vm_name']).status.state != 'down': time.sleep(5) self.start_vm(kwargs['vm_name']) while not self.is_vm_running(kwargs['vm_name']): time.sleep(5) return kwargs['vm_name']
vmsList = api.vms.list() for i in vmsList: print i.name if i.status.state != 'down': logging.warning('%s is not down, trying to stop it' % i.name) threadMe = Thread(target=start_vms, args=[i]) threadMe.start() threads.append(threadMe) except Exception as e: logging.debug('Error:\n%s' % str(e)) logging.warning('No of VMs to stop : %s' % len(threads)) print 'No of VMs to stop: %s' % len(threads) for th in threads: logging.info('Waiting for %s to join' % th) th.join(30) if not th.isAlive(): logging.info('Thread : %s terminated' % (th.getName())) else: logging.debug( 'Thread : %s is still alive, you may check this task..' % (th)) logging.debug(' Below Vms failed to stop with an exception:%s' % (failedVms)) api.disconnect() except Exception as ex: logging.debug('Unexpected error: %s' % ex)
class RHEVMSystem(MgmtSystemAPIBase): """ Client to RHEVM API This class piggy backs off ovirtsdk. Benefits of ovirtsdk: - Don't need intimite knowledge w/ RHEVM api itself. Detriments of ovirtsdk: - Response to most quaries are returned as an object rather than a string. This makes it harder to do simple stuff like getting the status of a vm. - Because of this, it makes listing VMs based on **kwargs impossible since ovirtsdk relies on re class to find matches. I.E. List out VM with this name (positive case) Ideal: self.api.vms.list(name='test_vm') Underneath the hood: - ovirtsdk fetches list of all vms [ovirtsdk.infrastructure.brokers.VM object, ...] - ovirtsdk then tries to filter the result using re. - tries to look for 'name' attr in ovirtsdk.infrastructure.brokers.VM object - found name attribute, in this case, the type of the value of the attribute is string. - match() succeed in comparing the value to 'test_vm' I.E. List out VM with that's powered on (negative case) Ideal: self.api.vms.list(status='up') Underneath the hood: - '^same step as above except^' - found status attribute, in this case, the type of the value of the attribute is ovirtsdk.xml.params.Status - match() failed because class is compared to string 'up' This problem should be attributed to how RHEVM api was designed rather than how ovirtsdk handles RHEVM api responses. - Obj. are not updated after action calls. - I.E. vm = api.vms.get(name='test_vm') vm.status.get_state() # returns 'down' vm.start() # wait a few mins vm.status.get_state() # returns 'down'; wtf? vm = api.vms.get(name='test_vm') vm.status.get_state() # returns 'up' """ def __init__(self, hostname='localhost', username='******', password='******'): """ Initialize RHEVMSystem """ # sanitize hostname if not hostname.startswith('https://'): hostname = 'https://%s' % hostname if not hostname.endswith('/api'): hostname = '%s/api' % hostname self.api = API(url=hostname, username=username, password=password, insecure=True) def _get_vm(self, vm_name=None): """ RHEVMSystem implementation in _get_vm. """ if vm_name is None: raise Exception('Could not find a VM named %s.' % vm_name) else: vm = self.api.vms.get(name=vm_name) if vm is None: raise Exception('Could not find a VM named %s.' % vm_name) return vm def start_vm(self, vm_name=None): """ RHEVMSystem implementation of start_vm. """ vm = self._get_vm(vm_name) if vm.status.get_state() == 'up': raise Exception('Could not start %s because it\'s already running.' % vm_name) else: ack = vm.start() if ack.get_status().get_state() == 'complete': return True return False def stop_vm(self, vm_name): """ RHEVMSystem implementation of stop_vm. """ vm = self._get_vm(vm_name) if vm.status.get_state() == 'down': raise Exception('Could not stop %s because it\'s not running.' % vm_name) else: ack = vm.stop() if ack.get_status().get_state() == 'complete': return True return False def delete_vm(self, vm_name): """ RHEVMSystem implementation of delete_vm. """ vm = self._get_vm(vm_name) if vm.status.get_state() == 'up': raise Exception('Could not delete %s because it\'s still running.' % vm_name) else: ack = vm.delete() if ack.get_status().get_state() == '': return True return False def create_vm(self, vm_name): """ RHEVMSystem implementation of create_vm. """ #Unfortunately, there are not enough smurf slaves in the village to build this functionality yet. pass def restart_vm(self, vm_name): """ RHEVMSystem implementation of restart_vm. """ if not self.stop_vm(vm_name): return False else: return self.start_vm(vm_name) def list_vm(self, **kwargs): """ RHEVMSystem implementation of list_vm. """ # list vm based on kwargs can be buggy # i.e. you can't return a list of powered on vm # but you can return a vm w/ a matched name vm_list = self.api.vms.list(**kwargs) return [vm.name for vm in vm_list] def info(self): """ RHEVMSystem implementation of info. """ # and we got nothing! pass def disconnect(self): """ RHEVMSystem implementation of disconnect. """ self.api.disconnect() def vm_status(self, vm_name=None): """ RHEVMSystem implementation of vm_status. """ state = self._get_vm(vm_name).get_status().get_state() print "vm " + vm_name + " status is " + state return state def is_vm_running(self, vm_name): """ RHEVMSystem implementation of is_vm_running. """ state = self.vm_status(vm_name) return "up" == state def is_vm_stopped(self, vm_name): """ RHEVMSystem implementation of is_vm_stopped. """ state = self.vm_status(vm_name) return "down" == state def is_vm_suspended(self, vm_name): """ RHEVMSystem implementation of is_vm_suspended. """ state = self.vm_status(vm_name) return "suspended" == state def suspend_vm(self, vm_name): """ RHEVMSystem implementation of suspend_vm. """ vm = self._get_vm(vm_name) if vm.status.get_state() == 'down': raise Exception('Could not suspend %s because it\'s not running.' % vm_name) else: ack = vm.suspend() return ack.get_status().get_state() == 'complete'
class TabNeutron(Tab): def __init__(self, main_loop): self.main_loop = main_loop self.name = u"Neutron Deploy" self.vm_name = "NeutronAppliance" self.widget = SimplePopupLauncher(self.get_pass()) def get_pass(self): self.w_password = urwid.Edit(u"Please enter admin@internal password to procceed: ", mask="*") return urwid.Columns([ ('weight', 4, self.w_password), ('weight', 1, urwid.Button(u"GO", on_press=self.on_pass)) ]) def on_pass(self, button): self.api = API(url='https://127.0.0.1/api', username='******', password=self.w_password.edit_text.encode("ascii", "ignore"), insecure=True) if not self.api.vms.get(name=self.vm_name) == None: self.widget.set_popup_text("Vm named '%s' exists, please remove or rename it and try again" % self.vm_name) self.widget.open_pop_up() return divider = urwid.Divider("-") self.w_mgmt_profile = VnicProfileSelector(self.api, True, self.vnic_profile_changed_mgmt) self.w_int_profile = VnicProfileSelector(self.api, True, self.vnic_profile_changed_int) self.w_int_profile.set_ip_info(False) self.w_ext_profile = VnicProfileSelector(self.api, False, None) self.w_vm_pass = urwid.Edit(u"vm root password: "******"*") self.w_keystone_pass = urwid.Edit(u"keystone admin password: "******"*") self.widget.original_widget = urwid.Pile([ urwid.Text(u"Deploying neutron appliance under datacenter 'Default'"), divider, urwid.Text("Choose profile for management network:"), self.w_mgmt_profile, divider, urwid.Text("Choose profile for internal network:"), self.w_int_profile, divider, urwid.Text("Choose profile for external network:"), self.w_ext_profile, divider, self.w_vm_pass, self.w_keystone_pass, divider, urwid.Button(u"OK", on_press=self.begin_deploy) ]) def vnic_profile_changed_mgmt(self, button, selected): if selected: if self.w_mgmt_profile.get_vnic_profile_id_by_name(button.get_label()) == self.w_int_profile.get_vnic_profile_id(): self.w_int_profile.set_ip_info(False) else: self.w_int_profile.set_ip_info(True) def vnic_profile_changed_int(self, button, selected): if selected: if self.w_mgmt_profile.get_vnic_profile_id() == self.w_int_profile.get_vnic_profile_id_by_name(button.get_label()): self.w_int_profile.set_ip_info(False) else: self.w_int_profile.set_ip_info(True) def begin_deploy(self, button): conf_widget = self.widget.original_widget self.output = urwid.Text("") widget = urwid.BoxAdapter(urwid.Frame( header=urwid.Text("Setup output:"), body=urwid.Filler(self.output, valign="bottom"), footer=urwid.Button("percentage"), focus_part="header"), 20) widget.set_focus("footer") self.widget.original_widget = widget self.log(u"Detecting ip conflicts...") conflict = True conflict_ip = self.w_mgmt_profile.w_ip.edit_text if os.system("arping %s -w 10" % conflict_ip): conflict = False elif not self.mgmt_int_same(): conflict_ip = self.w_int_profile.w_ip.edit_text if os.system("arping %s -w 10" % conflict_ip): conflict = False if conflict: self.widget.set_popup_text("IP address %s is already used, please configure a unused IP address." % conflict_ip) self.widget.open_pop_up() self.widget.original_widget = conf_widget return self.log(u"Begin neutron appliance deploy") self.add_vm() self.add_external_provider() self.configure_uiplugin() self.configure_httpd() self.log(u"Neutron appliance deploy finished, please REFRESH your webadmin page") self.api.disconnect() def log(self, text): self.output.set_text(self.output.text + text + "\n") self.main_loop.draw_screen() def mgmt_int_same(self): return self.w_mgmt_profile.get_vnic_profile_id() == self.w_int_profile.get_vnic_profile_id() def add_vm(self): mgmt_int_same = self.mgmt_int_same() template_name = "Neutron_Appliance_Template" nics = [] nics.append(params.NIC( name="eth0", boot_protocol="STATIC", on_boot=True, network=params.Network( ip=params.IP( address=self.w_mgmt_profile.w_ip.edit_text, netmask=self.w_mgmt_profile.w_netmask.edit_text, gateway=self.w_mgmt_profile.w_gateway.edit_text, ) ) )) if not mgmt_int_same: nics.append(params.NIC( name="eth2", boot_protocol="STATIC", on_boot=True, network=params.Network( ip=params.IP( address=self.w_int_profile.w_ip.edit_text, netmask=self.w_int_profile.w_netmask.edit_text, gateway=self.w_int_profile.w_gateway.edit_text, ) ) )) nics.append(params.NIC( name="eth1", boot_protocol="NONE", on_boot=True, )) vm=params.VM( name=self.vm_name, cluster=self.api.clusters.get(name="Default"), template=self.api.templates.get(name=template_name), ) self.log(u"Adding neutron vm") self.api.vms.add(vm) self.log(u"Neutron vm added successflly") self.api.vms.get(self.vm_name).nics.add(params.NIC( name='eth0', vnic_profile=params.VnicProfile(id=self.w_mgmt_profile.get_vnic_profile_id()), interface='virtio' )) self.log("NIC 'eth0' added to neutron vm as management network") self.api.vms.get(self.vm_name).nics.add(params.NIC( name='eth1', vnic_profile=params.VnicProfile(id=self.w_ext_profile.get_vnic_profile_id()), interface='virtio' )) self.log("NIC 'eth1' added to neutron vm as external network") if not mgmt_int_same: self.api.vms.get(self.vm_name).nics.add(params.NIC( name='eth2', vnic_profile=params.VnicProfile(id=self.w_int_profile.get_vnic_profile_id()), interface='virtio' )) self.log("NIC 'eth2' added to neutron vm as internal network") cloud_init_content = """runcmd: - sed -i 's/ServerAlias 127.0.0.1/ServerAlias %s/' /etc/httpd/conf.d/15-horizon_vhost.conf - sed -i 's/local_ip =127.0.0.1/local_ip = %s/' /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini - sed -i 's/#CSRF_COOKIE_SECURE/CSRF_COOKIE_SECURE/' /etc/openstack-dashboard/local_settings - sed -i 's/#SESSION_COOKIE_SECURE/SESSION_COOKIE_SECURE/' /etc/openstack-dashboard/local_settings - service httpd restart - mysql neutron --execute="delete from ml2_gre_endpoints where ip_address='127.0.0.1'" - service neutron-openvswitch-agent restart - chkconfig cloud-init off - source /root/keystonerc_admin - keystone user-password-update --pass %s admin - sed -i '/^export OS_PASSWORD/c\export OS_PASSWORD=%s' /root/keystonerc_admin""" % ( self.w_mgmt_profile.w_ip.edit_text, self.w_mgmt_profile.w_ip.edit_text if mgmt_int_same else self.w_int_profile.w_ip.edit_text, self.w_keystone_pass.edit_text, self.w_keystone_pass.edit_text, ) initialization=params.Initialization( cloud_init=params.CloudInit( host=params.Host(address="localhost"), regenerate_ssh_keys=True, users=params.Users( user=[params.User(user_name="root", password=self.w_vm_pass.edit_text)] ), network_configuration=params.NetworkConfiguration( nics=params.Nics(nic=nics) ), files=params.Files( file=[params.File(name="/tmp/setup", type_="PLAINTEXT", content=cloud_init_content,)] ) ) ) self.log("Wait for vm to be created...") created = False while not created: time.sleep(10) if "down" == self.api.vms.get(name=self.vm_name).status.state: created = True self.log("Starting vm...") vm = self.api.vms.get(name=self.vm_name) vm.start( action=params.Action( use_cloud_init=True, vm=params.VM( initialization=initialization ) ) ) def add_external_provider(self): external_provider_name = "neutron-appliance" if not self.api.openstacknetworkproviders.get(name=external_provider_name) == None: self.log("Removing existing external provider: %s ..." % external_provider_name) self.api.openstacknetworkproviders.get(name=external_provider_name).delete() self.log(u"Adding external provider...") agent_configuration = params.AgentConfiguration( network_mappings='vmnet:br-tun', broker_type='rabbit_mq', address=self.w_mgmt_profile.w_ip.edit_text, port=5672, username='******', password='******', ) self.api.openstacknetworkproviders.add(params.OpenStackNetworkProvider( name=external_provider_name, description='auto created by eayunos', url='http://%s:9696' % self.w_mgmt_profile.w_ip.edit_text, requires_authentication=True, username='******', password=self.w_password.edit_text.encode("ascii", "ignore"), authentication_url='http://%s:5000/v2.0/' % self.w_mgmt_profile.w_ip.edit_text, tenant_name='admin', plugin_type='OPEN_VSWITCH', agent_configuration=agent_configuration, )) self.log(u"External provider added successfully") def configure_uiplugin(self): os.system("ln -nsf /usr/share/neutron-uiplugin/neutron-resources /usr/share/ovirt-engine/ui-plugins/neutron-resources") os.system("ln -nsf /usr/share/neutron-uiplugin/neutron.json /usr/share/ovirt-engine/ui-plugins/neutron.json") self.log("Neutron uiplugin configured") def configure_httpd(self): content = """ProxyPass "/dashboard" "http://{IP}/dashboard" ProxyPassReverse "/dashboard" "http://{IP}/dashboard" ProxyPass "/static" "http://{IP}/static" ProxyPassReverse "/static" "http://{IP}/static" """.replace("{IP}", self.w_mgmt_profile.w_ip.edit_text) with open("/etc/httpd/conf.d/z-neutron.conf", "w") as f: f.write(content) self.log("Restarting httpd service") os.system("service httpd restart") self.log("Httpd reverse proxy configured")