def _prepare_disks(vm_settings, target_dir): """ Prepare VM disks for OVF appliance creation. File based disks will be copied to VM creation directory. LVM and block-device based disks will be converted into file based images and copied to creation directory. @param target_dir: directory where disks will be copied """ disk_list_dom = get_libvirt_conf_xml(vm_settings["vm_name"])\ .getElementsByTagName("domain")[0].getElementsByTagName("disk") disk_num, disk_list = 0, [] for disk_dom in disk_list_dom: if disk_dom.getAttribute("device") == "disk": disk_num += 1 source_dom = disk_dom.getElementsByTagName("source")[0] filename = "%s%d.img" % (vm_settings["template_name"], disk_num) new_path = path.join(target_dir, filename) if disk_dom.getAttribute("type") == "file": disk_path = source_dom.getAttribute("file") shutil.copy2(disk_path, new_path) elif disk_dom.getAttribute("type") == "block": source_dev = source_dom.getAttribute("dev") execute("qemu-img convert -f raw -O qcow2 %s %s" % (source_dev, new_path)) disk_dict = { "file_size": str(get_file_size_bytes(new_path)), "filename": filename, "new_path": new_path, "file_id": "diskfile%d" % (disk_num), "disk_id": "vmdisk%d.img" % (disk_num), "disk_capacity": str(get_kvm_disk_capacity_bytes(new_path)) } disk_list.append(disk_dict) return disk_list
def migrate(conn, uid, target_host, live=False, print_=True): """Migrate given container to a target_host""" if not test_passwordless_ssh(target_host): raise CommandException( "Public key ssh connection with the target host could not be established" ) log = get_logger() # a workaround for the megadynamic nature of python variable type when called via an agent live = live == 'True' if type(live) is str else live # is ctid present on the target host? ctid = get_ctid_by_uuid(conn, uid) try: execute("ssh %s vzlist %s" % (target_host, ctid)) raise CommandException( "Target host '%s' has an already defined CTID '%s'" % (target_host, ctid)) except CommandException as ce: if ce.code != 256: raise msg = "Initiating migration to %s..." % target_host log.info(msg) live_trigger = '--online' if live else '' for line in execute2("vzmigrate -v %s %s %s" % (live_trigger, target_host, ctid)): log.info(line)
def vm_diskspace(vm): log = get_logger() if conn.getType() == 'OpenVZ': return {'/': openvz.get_diskspace(vm.name())} # return a total sum of block devices used by KVM VM # get list of block devices of a file type try: script_path = get_config().getstring('general', 'script_prefix', '/opt/opennode/bin/') cmd = os.path.join( script_path, 'libvirt_detect_domain_devices.sh %s' % vm.name()) devices = execute(cmd).split('\n') total_bytes = 0.0 for dev_path in devices: if dev_path.strip() == '-': continue # simple protection against non-disk base devices cmd = os.path.join( script_path, 'libvirt_get_device_size.sh %s %s' % (vm.name(), dev_path)) total_bytes += int(execute( cmd)) / 1024.0 / 1024.0 # we want result to be in MB except CommandException as ce: log.debug('Failed diskspace detection: \'%s\'' % ce) total_bytes = 0.0 except ValueError as ve: log.debug('Failed diskspace conversion: \'%s\'' % ve) total_bytes = 0.0 return {'/': total_bytes}
def delete_pool(pool_name): """Delete a storage pool""" try: execute("virsh 'pool-destroy %s'" %pool_name) execute("virsh 'pool-undefine %s'" %pool_name) del_folder(os.path.join(c('general', 'storage-endpoint'), pool_name)) if pool_name == c('general', 'default-storage-pool'): set_default_pool('') except Exception, e: print "Failed to delete pool %s: %s" % (pool_name, e)
def generate_config(ovf_settings): """ Generates ubc and non-ubc configuration """ base_conf = os.path.join('/etc/vz/conf', "ve-vswap-256m.conf-sample") ubc_conf_str = generate_ubc_config(ovf_settings) non_ubc_conf_str = generate_nonubc_config(base_conf, ovf_settings) openvz_ct_conf = "%s\n%s\n" % (ubc_conf_str, non_ubc_conf_str) # final configuration is ubc + non-ubc # overwrite configuration target_conf_fnm = os.path.join('/etc/vz/conf/', "ve-%s.conf-sample" % ovf_settings["vm_id"]) with open(target_conf_fnm, 'w') as conf_file: conf_file.write(openvz_ct_conf) execute("chmod 644 %s" % target_conf_fnm)
def delete_pool(pool_name): """Delete a storage pool""" try: config = get_config() if get_pool_path(pool_name) == '/storage/local': raise Exception('/storage/local can not be deleted') execute("virsh 'pool-destroy %s'" % pool_name) execute("virsh 'pool-undefine %s'" % pool_name) del_folder(get_pool_path(pool_name)) if pool_name == config.getstring('general', 'default-storage-pool'): set_default_pool('') except Exception, e: raise Exception("Failed to delete pool %s: %s" % (pool_name, e))
def shutdown_vm(uuid): """Shutdown VM with a given UUID""" ctid = get_ctid_by_uuid(uuid) try: print execute("vzctl stop %s" % ctid) except CommandException as e: if e.code == 13056: # sometimes umount fails for i in range(5): try: print execute("vzctl umount %s" % ctid) except CommandException: import time time.sleep(3)
def generate_config(ovf_settings): """ Generates ubc and non-ubc configuration """ base_conf = os.path.join('/etc/vz/conf', "ve-vswap-256m.conf-sample") ubc_conf_str = generate_ubc_config(ovf_settings) non_ubc_conf_str = generate_nonubc_config(base_conf, ovf_settings) openvz_ct_conf = "%s\n%s\n" % (ubc_conf_str, non_ubc_conf_str ) # final configuration is ubc + non-ubc # overwrite configuration target_conf_fnm = os.path.join('/etc/vz/conf/', "ve-%s.conf-sample" % ovf_settings["vm_id"]) with open(target_conf_fnm, 'w') as conf_file: conf_file.write(openvz_ct_conf) execute("chmod 644 %s" % target_conf_fnm)
def shutdown_vm(conn, uuid): """Shutdown VM with a given UUID""" ctid = get_ctid_by_uuid(conn, uuid) log = get_logger() try: msg = execute("vzctl stop %s" % ctid) log.info(msg) except CommandException as e: if e.code == 13056: # sometimes umount fails for i in range(5): try: msg = execute("vzctl umount %s" % ctid) log.info(msg) except CommandException: time.sleep(3)
def prepare_file_system(settings, storage_pool): """ Prepare file system for VM template creation in OVF appliance format: - create template directory if it does not exist - copy disk based images - convert block device based images to file based images """ config = get_config() log = get_logger() images_dir = path.join(config.getstring("general", "storage-endpoint"), storage_pool, "images") target_dir = path.join(config.getstring("general", "storage-endpoint"), storage_pool, "kvm", "unpacked") for disk_index, disk in enumerate(settings.get("disks", [])): disk_template_path = path.join(target_dir, disk["template_name"]) if disk["deploy_type"] == "file": volume_name = "disk%s" % disk_index disk["source_file"] = '%s-%s-%s.%s' % ( settings["hostname"], settings["uuid"], volume_name, disk.get('template_format', 'qcow2')) disk_deploy_path = path.join(images_dir, disk["source_file"]) shutil.copy2(disk_template_path, disk_deploy_path) # resize disk to match the requested # XXX we assume that the size was already adjusted to the template requirements diskspace = settings.get('disk') if diskspace: diskspace = int( float(diskspace) ) # it's str by default. 'string' > int is always true (LEV-116) # get the disk size current_size = int( execute( "qemu-img info %s |grep 'virtual size' |awk '{print $4}' |cut -b2- " % disk_deploy_path)) / 1024 / 1024 / 1024 # to get to GB if diskspace > current_size: log.info('Increasing image file %s from %s to %sG' % (disk_deploy_path, current_size, diskspace)) execute("qemu-img resize %s %sG" % (disk_deploy_path, diskspace)) else: log.info( 'Ignoring disk (%s) increase request (to %s) as existing image is already larger (%s)' % (disk_deploy_path, diskspace, current_size)) elif disk["deploy_type"] in ["physical", "lvm"]: disk_deploy_path = disk["source_dev"] execute("qemu-img convert -f qcow2 -O raw %s %s" % (disk_template_path, disk_deploy_path))
def get_netstats(): iface = get_config().getstring('general', 'main_iface') return [ int(v) for v in execute( "grep %s: /proc/net/dev | awk -F: '{print $2}' | " "awk '{print $1, $9}'" % iface).split(' ') ]
def get_netstats(): return [ int(v) for v in execute("vzctl exec %s \"cat /proc/net/dev|grep venet0 | " "awk -F: '{print \$2}' | awk '{print \$1, \$9}'\"" % vm.ID()).split(' ') ]
def get_uptime(ctid): """Get uptime in seconds. 0 if container is not running.""" try: return float( execute("vzctl exec %s \"awk '{print \$1}' /proc/uptime\"" % ctid)) except: return 0
def get_swap_size_gb(): total_swap = 0 output = execute("swapon -s") for dev_line in output.split("\n")[1:]: size, used = map(int, re.split("\s+", dev_line)[2:4]) total_swap += size - used return round(total_swap / 1024.0 ** 2, 3)
def get_template_name(ctid): """Return a name of the template used for creating specific container""" try: int(ctid) except ValueError: raise TemplateException("Incorrect format for a container id: %s" % ctid) return execute("vzlist %s -H -o ostemplate" % ctid)
def get_onboot(ctid): """Return onboot parameter of a specified CT""" # XXX: If ONBOOT is unset in conf (default for vzctl) then we use it as no encoding = {"yes": 1, "no": 0, "-": 0} return encoding[execute("vzlist %s -H -o onboot" % ctid).strip()]
def _get_stopped_vm_ids(conn): # XXX a workaround for libvirt's python API listDefinedDomains function not reportng last OpenVZ VM # correctly on rare occasion if conn.getType() == "OpenVZ": return execute("vzlist -H -S -o ctid").split() else: return conn.listDefinedDomains()
def get_swap_size_gb(): total_swap = 0 output = execute("swapon -s") for dev_line in output.split("\n")[1:]: size, used = map(int, re.split("\s+", dev_line)[2:4]) total_swap += (size - used) return round(total_swap / 1024.0 ** 2, 3)
def _get_stopped_vm_ids(conn): # XXX a workaround for libvirt's python API listDefinedDomains function not reportng last OpenVZ VM # correctly on rare occasion if conn.getType() == 'OpenVZ': return execute('vzlist -H -S -o ctid').split() else: return conn.listDefinedDomains()
def get_hostname(ctid): """Return a hostname of the container""" try: int(ctid) except ValueError: raise TemplateException("Incorrect format for a container id: %s" % ctid) return execute("vzlist %s -H -o hostname" % ctid)
def get_disc_space_gb(): output = execute("df /vz") tmp_output = output.split("\n", 1) if len(tmp_output) != 2: raise RuntimeError("Unable to calculate disk space") df_list = tmp_output[1].split() disk_space = float(df_list[3]) return round(disk_space / 1024 ** 2, 3)
def _get_running_vm_ids(conn): # XXX a workaround for libvirt's listDomainsID function throwing error _and_ # screwing up snack screen if 0 openvz VMs available and no other backends present if conn.getType() == 'OpenVZ' and \ 'missing' == execute("vzlist -H > /dev/null 2>&1; if [ $? -eq 1 ]; then echo missing; fi"): return [] else: return conn.listDomainsID()
def get_openvz_stopped_ctids(): """ Return a list of currently stopped OpenVZ CTs @return: List of OpenVZ containers on current machine @rtype: List """ return map(int, [ctid for ctid in execute('vzlist -S -H -o ctid').splitlines()])
def _get_openvz_ct_id_list(): """ Return a list of current OpenVZ CTs (both running and stopped) @return: List of OpenVZ containers on current machine @rtype: List """ existing = [ctid.strip() for ctid in execute("vzlist --all -H -o ctid").splitlines()] return map(int, existing)
def get_openvz_all_ctids(): """ Return a list of current OpenVZ CTs (both running and stopped) @return: List of OpenVZ containers on current machine @rtype: List """ return map(int, [ctid for ctid in execute('vzlist --all -H -o ctid').splitlines()])
def cpu_usage(): time_list_now = map(int, execute("head -n 1 /proc/stat").split(' ')[2:6]) time_list_was = roll_data('/tmp/func-cpu-host', time_list_now, [0] * 6) deltas = [yi - xi for yi, xi in zip(time_list_now, time_list_was)] try: cpu_pct = 1 - (float(deltas[-1]) / sum(deltas)) except ZeroDivisionError: cpu_pct = 0 return cpu_pct
def _get_remote_ctid_list(remote_host): """Get list of ctid's from remote HN @param remote_host: host to connect to (with username if provided) """ data = execute('ssh %s "vzlist -a -H -o ctid"' % remote_host) # Clean up output so ssh identity errors will not hit conversion to int data = [i for i in data.split('\n') if len(i) == 10 and i.startswith(' ')] return map(int, data)
def migrate(uid, target_host, live=False): """Migrate given container to a target_host""" if not test_passwordless_ssh(target_host): raise CommandException("Public key ssh connection with the target host could not be established") # is ctid present on the target host? ctid = get_ctid_by_uuid(uid) try: execute("ssh %s vzlist %s" % (target_host, ctid)) raise CommandException("Target host '%s' already has a defined CTID '%s'" % (target_host, ctid)) except CommandException as ce: if ce.code == 256: pass else: raise ce print "Initiating migration to %s..." % target_host live_trigger = '--online' if live else '' for line in execute2("vzmigrate -v %s %s %s" % (live_trigger, target_host, ctid)): print line
def get_ioprio(ctid): """ Get VM I/O priority. If priority is entered manually or elsewhere, return approximate value based on value table""" value = {'0': 0, '1': 0, '2': 0, '3': 0, '4': 4, '5': 7, '6': 7, '7': 7} rv = execute("vzlist %s -H -o ioprio" % ctid).strip() if rv == '-': return 4 else: return value[rv]
def get_kvm_disk_capacity_bytes(path): print "Getting capacity of the kvm disk '%s'" % path res = execute("virt-df --csv %s" % (path)) rows = res.split("\n")[2:] capacity = 0 for row in rows: row_elements = row.split(",") used, available = int(row_elements[3]), int(row_elements[4]) capacity += used + available return capacity * 1024
def list_pools(): """List existing storage pools""" pool_params = [] try: pools = execute("virsh 'pool-list' | tail -n+3 |head -n-1").splitlines() for p in pools: p = re.sub("\s+" , " ", p.strip()) pool_params.append(p.split(' ')) except Exception, e: print "Unable to list storage pools: %s" %e
def cpu_usage(): time_list_now = map(int, execute("vzctl exec %s \"head -n 1 /proc/stat\"" % vm.ID()).split(' ')[2:6]) time_list_was = roll_data('/tmp/openvz-vm-cpu-%s' % vm.ID(), time_list_now, [0] * 6) deltas = [yi - xi for yi, xi in zip(time_list_now, time_list_was)] try: cpu_pct = 1 - (float(deltas[-1]) / sum(deltas)) except ZeroDivisionError: cpu_pct = 0 return cpu_pct
def prepare_file_system(settings, storage_pool): """ Prepare file system for VM template creation in OVF appliance format: - create template directory if it does not exist - copy disk based images - convert block device based images to file based images """ images_dir = path.join(config.c("general", "storage-endpoint"), storage_pool, "images") target_dir = path.join(config.c("general", "storage-endpoint"), storage_pool, "kvm", "unpacked") for disk in settings["disks"]: disk_template_path = path.join(target_dir, disk["template_name"]) if disk["deploy_type"] == "file": disk_deploy_path = path.join(images_dir, settings["vm_type"] + "-" + disk["source_file"]) shutil.copy2(disk_template_path, disk_deploy_path) elif disk["deploy_type"] in ["physical", "lvm"]: disk_deploy_path = disk["source_dev"] execute("qemu-img convert -f qcow2 -O raw %s %s" % (disk_template_path, disk_deploy_path))
def _get_remote_ctid_list(remote_host): """Get list of ctid's from remote HN @param remote_host: host to connect to (with username if provided) """ data = execute('ssh %s "vzlist -a -H -o ctid"' % remote_host) # Clean up output so ssh identity errors will not hit conversion to int data = [ i for i in data.split('\n') if len(i) == 10 and i.startswith(' ') ] return map(int, data)
def get_openvz_all_ctids(): """ Return a list of current OpenVZ CTs (both running and stopped) @return: List of OpenVZ containers on current machine @rtype: List """ return map( int, [ctid for ctid in execute('vzlist --all -H -o ctid').splitlines()])
def query_openvz(include_running=False, fields='ctid,hostname'): """Run a query against OpenVZ""" include_flag = '-S' if not include_running else '-a' vzcontainers = execute("vzlist -H %s -o %s" % (include_flag, fields)).split('\n') result = [] for cont in vzcontainers: if len(cont.strip()) == 0: break result.append([f for f in cont.strip().split(' ') if len(f) > 0]) return result
def get_openvz_running_ctids(): """ Return a list of currently running OpenVZ CTs @return: List of OpenVZ containers on current machine @rtype: List """ return map(int, [ctid for ctid in execute('vzlist -H -o ctid').splitlines() if 'Container' not in ctid])
def migrate(conn, uid, target_host, live=False, print_=True): """Migrate given container to a target_host""" if not test_passwordless_ssh(target_host): raise CommandException("Public key ssh connection with the target host could not be established") log = get_logger() # a workaround for the megadynamic nature of python variable type when called via an agent live = live == 'True' if type(live) is str else live # is ctid present on the target host? ctid = get_ctid_by_uuid(conn, uid) try: execute("ssh %s vzlist %s" % (target_host, ctid)) raise CommandException("Target host '%s' has an already defined CTID '%s'" % (target_host, ctid)) except CommandException as ce: if ce.code != 256: raise msg = "Initiating migration to %s..." % target_host log.info(msg) live_trigger = '--online' if live else '' for line in execute2("vzmigrate -v %s %s %s" % (live_trigger, target_host, ctid)): log.info(line)
def get_openvz_running_ctids(): """ Return a list of currently running OpenVZ CTs @return: List of OpenVZ containers on current machine @rtype: List """ return map(int, [ ctid for ctid in execute('vzlist -H -o ctid').splitlines() if 'Container' not in ctid ])
def get_kvm_disk_capacity_bytes(path): msg = "Getting capacity of the kvm disk '%s'" % path get_logger().info(msg) res = execute("virt-df --csv %s" % (path)) rows = res.split("\n")[2:] capacity = 0 for row in rows: row_elements = row.split(",") used, available = int(row_elements[3]), int(row_elements[4]) capacity += used + available return capacity * 1024
def list_pools(): """List existing storage pools""" pool_params = [] try: pools = execute("virsh 'pool-list' | tail -n+3 |head -n-1 | egrep -v '^default-iso |^default '").splitlines() for p in pools: p = re.sub("\s+", " ", p.strip()) pool_params.append(p.split(' ')) except Exception, e: msg = "Unable to list storage pools: %s" % e get_logger().error(msg) print msg
def get_ram_size_gb(): cmd_list = ["cat /proc/meminfo | grep MemFree", "cat /proc/meminfo | grep Buffers", "cat /proc/meminfo | grep Cached"] memory = 0 for cmd in cmd_list: output = execute(cmd) try: memory += int(output.split()[1]) except (ValueError, IndexError): raise RuntimeError("Unable to calculate OpenNode server memory size") return round(memory / 1024.0 ** 2, 3)
def prepare_file_system(settings, storage_pool): """ Prepare file system for VM template creation in OVF appliance format: - create template directory if it does not exist - copy disk based images - convert block device based images to file based images """ config = get_config() log = get_logger() images_dir = path.join(config.getstring("general", "storage-endpoint"), storage_pool, "images") target_dir = path.join(config.getstring("general", "storage-endpoint"), storage_pool, "kvm", "unpacked") for disk_index, disk in enumerate(settings.get("disks", [])): disk_template_path = path.join(target_dir, disk["template_name"]) if disk["deploy_type"] == "file": volume_name = "disk%s" % disk_index disk["source_file"] = '%s-%s-%s.%s' % (settings["hostname"], settings["uuid"], volume_name, disk.get('template_format', 'qcow2')) disk_deploy_path = path.join(images_dir, disk["source_file"]) shutil.copy2(disk_template_path, disk_deploy_path) # resize disk to match the requested # XXX we assume that the size was already adjusted to the template requirements diskspace = settings.get('disk') if diskspace: diskspace = int(float(diskspace)) # it's str by default. 'string' > int is always true (LEV-116) # get the disk size current_size = int(execute("qemu-img info %s |grep 'virtual size' |awk '{print $4}' |cut -b2- " % disk_deploy_path)) / 1024 / 1024 / 1024 # to get to GB if diskspace > current_size: log.info('Increasing image file %s from %s to %sG' % (disk_deploy_path, current_size, diskspace)) execute("qemu-img resize %s %sG" % (disk_deploy_path, diskspace)) else: log.info('Ignoring disk (%s) increase request (to %s) as existing image is already larger (%s)' % (disk_deploy_path, diskspace, current_size)) elif disk["deploy_type"] in ["physical", "lvm"]: disk_deploy_path = disk["source_dev"] execute("qemu-img convert -f qcow2 -O raw %s %s" % (disk_template_path, disk_deploy_path))
def is_default_pool_modified(): """Check if there were any modifications done by a user to the default pool""" try: config = get_config() res = execute("virsh 'pool-dumpxml default'") defined_path = parseString(res).getElementsByTagName('path')[0].lastChild.nodeValue # XXX: This will remain as-is right now. current_path = os.path.join(config.getstring('general', 'storage-endpoint'), config.getstring('general', 'default-storage-pool'), 'images') return str(defined_path) != current_path except CommandException: return False # pool is undefined or we are not sure -> so, assume it's all good
def cpu_usage(): time_list_now = map( int, execute("vzctl exec %s \"head -n 1 /proc/stat\"" % vm.ID()).split(' ')[2:6]) time_list_was = roll_data('/tmp/openvz-vm-cpu-%s' % vm.ID(), time_list_now, [0] * 6) deltas = [yi - xi for yi, xi in zip(time_list_now, time_list_was)] try: cpu_pct = 1 - (float(deltas[-1]) / sum(deltas)) except ZeroDivisionError: cpu_pct = 0 return cpu_pct
def configure_bridge(bridge, hello=None, fd=None, stp=None): """Set bridge parameters.""" if hello is not None: execute('brctl sethello %s %d' % (bridge, hello)) if fd is not None: execute('brctl setfd %s %d' % (bridge, fd)) if stp is not None: execute('brctl stp %s %s' % (bridge, stp))
def list_pools(): """List existing storage pools""" pool_params = [] try: pools = execute( "virsh 'pool-list' | tail -n+3 |head -n-1 | egrep -v '^default-iso |^default '" ).splitlines() for p in pools: p = re.sub("\s+", " ", p.strip()) pool_params.append(p.split(' ')) except Exception, e: msg = "Unable to list storage pools: %s" % e get_logger().error(msg) print msg
def is_default_pool_modified(): """Check if there were any modifications done by a user to the default pool""" try: config = get_config() res = execute("virsh 'pool-dumpxml default'") defined_path = parseString(res).getElementsByTagName( 'path')[0].lastChild.nodeValue # XXX: This will remain as-is right now. current_path = os.path.join( config.getstring('general', 'storage-endpoint'), config.getstring('general', 'default-storage-pool'), 'images') return str(defined_path) != current_path except CommandException: return False # pool is undefined or we are not sure -> so, assume it's all good
def dump_info(vms, csv): """Dump information about VMs of a certain hypervisor into a CSV file""" for vm in vms: ips = vm['interfaces'] if len(ips) > 0: ip = ips[0].get('ipv4_address', ips[0]['mac']) else: ip = 'missing' hn = execute('hostname') # uptime in h uptime_period = 60 * 60.0 name = vm['name'] mem = vm['memory'] / 1024.0 disk = vm['diskspace']['/'] / 1024.0 vcpus = vm['vcpu'] uptime = vm['uptime'] if vm['uptime'] else 0 uptime /= uptime_period print name, mem, disk, vcpus, uptime, ip, hn csv.writerow([name, mem, disk, vcpus, uptime, ip, hn])