def change_ctid(conn, uuid, new_ctid): if conn.getType() == "OpenVZ": ctid = openvz.get_ctid_by_uuid(conn, uuid) get_logger().info("Change ctid from %s to %s", ctid, new_ctid) openvz.change_ctid(ctid, new_ctid) else: raise NotImplementedError("VM type '%s' is not (yet) supported" % conn.getType())
def download(remote, local, continue_=False, silent=False): """Download a remote file to a local file, using optional username/password for basic HTTP authentication. Using cURL as external dependency""" msg = "Getting remote file %s" % remote get_logger().info(msg) print msg url = urlparse.urlsplit(remote) curl_cmd = ['curl'] curl_file_args = ['-o', '%s' % local, '%s' % remote] if url.username: if 'http_proxy' in os.environ: if url.password: remote = remote.replace( '%s:%s@' % (url.username, url.password), '') userauth = '%s:%s' % (url.username, url.password) else: remote = remote.replace('%s@' % url.username, '') userauth = url.username curl_cmd += ['--anyauth', '--user', userauth] else: curl_cmd.append('-L') if silent: curl_cmd += ['-s'] if continue_: curl_cmd += ['-C', '-'] subprocess.call(curl_cmd + curl_file_args)
def download(remote, local, continue_=False, silent=False): """Download a remote file to a local file, using optional username/password for basic HTTP authentication. Using cURL as external dependency""" msg = "Getting remote file %s" % remote get_logger().info(msg) print msg url = urlparse.urlsplit(remote) curl_cmd = ['curl'] curl_file_args = ['-o', '%s' % local, '%s' % remote] if url.username: if 'http_proxy' in os.environ: if url.password: remote = remote.replace('%s:%s@' % (url.username, url.password), '') userauth = '%s:%s' % (url.username, url.password) else: remote = remote.replace('%s@' % url.username, '') userauth = url.username curl_cmd += ['--anyauth', '--user', userauth] else: curl_cmd.append('-L') if silent: curl_cmd += ['-s'] if continue_: curl_cmd += ['-C', '-'] subprocess.call(curl_cmd + curl_file_args)
def change_ctid(conn, uuid, new_ctid): if conn.getType() == 'OpenVZ': ctid = openvz.get_ctid_by_uuid(conn, uuid) get_logger().info('Change ctid from %s to %s', ctid, new_ctid) openvz.change_ctid(ctid, new_ctid) else: raise NotImplementedError("VM type '%s' is not (yet) supported" % conn.getType())
def execute2(cmd): get_logger().debug('execute2 cmd: %s', cmd) args = shlex.split(cmd) p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) while(True): retcode = p.poll() # returns None while subprocess is running line = p.stdout.readline() yield line if(retcode is not None): break
def get_kvm_disk_capacity_bytes(path): msg = "Getting capacity of the kvm disk '%s'" % path get_logger().info(msg) res = execute("virt-df --csv %s" % (path)) rows = res.split("\n")[2:] capacity = 0 for row in rows: row_elements = row.split(",") used, available = int(row_elements[3]), int(row_elements[4]) capacity += used + available return capacity * 1024
def list_pools(): """List existing storage pools""" pool_params = [] try: pools = execute("virsh 'pool-list' | tail -n+3 |head -n-1 | egrep -v '^default-iso |^default '").splitlines() for p in pools: p = re.sub("\s+", " ", p.strip()) pool_params.append(p.split(' ')) except Exception, e: msg = "Unable to list storage pools: %s" % e get_logger().error(msg) print msg
def execute2(cmd): get_logger().debug('execute2 cmd: %s', cmd) args = shlex.split(cmd) p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) while (True): retcode = p.poll() # returns None while subprocess is running line = p.stdout.readline() yield line if (retcode is not None): break
def list_pools(): """List existing storage pools""" pool_params = [] try: pools = execute( "virsh 'pool-list' | tail -n+3 |head -n-1 | egrep -v '^default-iso |^default '" ).splitlines() for p in pools: p = re.sub("\s+", " ", p.strip()) pool_params.append(p.split(' ')) except Exception, e: msg = "Unable to list storage pools: %s" % e get_logger().error(msg) print msg
def setup_scripts(vm_settings, storage_pool): """Setup action scripts for the CT""" dest_dir = path.join(get_config().getstring('general', 'storage-endpoint'), storage_pool, "openvz") unpacked_dir = path.join(dest_dir, "unpacked") ct_scripts_fnm = path.join(unpacked_dir, "%s.scripts.tar.gz" % vm_settings["template_name"]) dest_folder = '/etc/vz/conf/%s' % vm_settings['vm_id'] try: with closing(tarfile.open(ct_scripts_fnm, "r:gz")) as tar: for f in tar: target_script = '%s.%s' % (dest_folder, f.name) shutil.copyfileobj(tar.extractfile(f), open(target_script, 'w')) os.chmod(target_script, 0755) except: msg = "No action scripts in the template." get_logger().warn(msg) pass
def execute(cmd): """ Run cmd in a shell, return output of the execution. Raise exception for non-0 return code. vzctl gets special treatment. TODO: add other vz family commmands """ get_logger().debug('execute cmd: %s', cmd) status, output = commands.getstatusoutput("LC_ALL=C %s" % cmd) if status != 0: if cmd.startswith('vzctl'): raise CommandException("Failed to execute command '%s'. Status: '%s'. Message: '%s'. Output: '%s'" % (cmd, status>>8, OpenVZ_EXIT_STATUS[cmd.split(' ')[0]][status>>8], output), status>>8) raise CommandException("Failed to execute command '%s'. Status: '%s'. Output: '%s'" % (cmd, status, output), status) get_logger().debug('execute returned: %s', output) return output
def migrate(conn, uid, target_host, live=False, print_=True): """Migrate given container to a target_host""" if not test_passwordless_ssh(target_host): raise CommandException( "Public key ssh connection with the target host could not be established" ) log = get_logger() # a workaround for the megadynamic nature of python variable type when called via an agent live = live == 'True' if type(live) is str else live # is ctid present on the target host? ctid = get_ctid_by_uuid(conn, uid) try: execute("ssh %s vzlist %s" % (target_host, ctid)) raise CommandException( "Target host '%s' has an already defined CTID '%s'" % (target_host, ctid)) except CommandException as ce: if ce.code != 256: raise msg = "Initiating migration to %s..." % target_host log.info(msg) live_trigger = '--online' if live else '' for line in execute2("vzmigrate -v %s %s %s" % (live_trigger, target_host, ctid)): log.info(line)
def prepare_storage_pool(storage_pool=get_default_pool(), check_libvirt=True): """Assures that storage pool has the correct folder structure""" # create structure storage_pool_path = "%s/%s" % (get_config().getstring('general', 'storage-endpoint'), storage_pool) mkdir_p("%s/iso/" % storage_pool_path) mkdir_p("%s/images/" % storage_pool_path) mkdir_p("%s/openvz/unpacked" % storage_pool_path) mkdir_p("%s/kvm/unpacked" % storage_pool_path) # assure that the storage is created also in libvirt if check_libvirt: try: get_pool_path(storage_pool) except CommandException, e: get_logger().warning('Default pool %s was missing from libvirt. Adding...') add_pool(storage_pool)
def vm_diskspace(vm): log = get_logger() if conn.getType() == 'OpenVZ': return {'/': openvz.get_diskspace(vm.name())} # return a total sum of block devices used by KVM VM # get list of block devices of a file type try: script_path = get_config().getstring('general', 'script_prefix', '/opt/opennode/bin/') cmd = os.path.join( script_path, 'libvirt_detect_domain_devices.sh %s' % vm.name()) devices = execute(cmd).split('\n') total_bytes = 0.0 for dev_path in devices: if dev_path.strip() == '-': continue # simple protection against non-disk base devices cmd = os.path.join( script_path, 'libvirt_get_device_size.sh %s %s' % (vm.name(), dev_path)) total_bytes += int(execute( cmd)) / 1024.0 / 1024.0 # we want result to be in MB except CommandException as ce: log.debug('Failed diskspace detection: \'%s\'' % ce) total_bytes = 0.0 except ValueError as ve: log.debug('Failed diskspace conversion: \'%s\'' % ve) total_bytes = 0.0 return {'/': total_bytes}
def prepare_storage_pool(storage_pool=get_default_pool(), check_libvirt=True): """Assures that storage pool has the correct folder structure""" # create structure storage_pool_path = "%s/%s" % (get_config().getstring( 'general', 'storage-endpoint'), storage_pool) mkdir_p("%s/iso/" % storage_pool_path) mkdir_p("%s/images/" % storage_pool_path) mkdir_p("%s/openvz/unpacked" % storage_pool_path) mkdir_p("%s/kvm/unpacked" % storage_pool_path) # assure that the storage is created also in libvirt if check_libvirt: try: get_pool_path(storage_pool) except CommandException, e: get_logger().warning( 'Default pool %s was missing from libvirt. Adding...') add_pool(storage_pool)
def add_pool(pool_name, careful=True): """Add a new pool_name""" if careful and filter(lambda p: p[0] == pool_name, list_pools()): msg = "Pool '%s' already exists." % pool_name get_logger().warn(msg) return try: pool_name = re.sub(" ", "_", pool_name) # safety measure pool_path = os.path.join(get_config().getstring('general', 'storage-endpoint'), pool_name) mkdir_p(pool_path) prepare_storage_pool(pool_name, check_libvirt=False) execute("virsh 'pool-define-as %s dir --target %s'" % (pool_name, pool_path)) execute("virsh 'pool-start %s'" % pool_name) execute("virsh 'pool-autostart %s'" % pool_name) except Exception, e: msg = "Failed to create a new pool: %s" % e get_logger().error(msg)
def save_as_ovf(vm_settings, storage_pool): """ Creates ovf template archive for the specified container. Steps: - archive container directory - generate ovf configuration file - pack ovf and container archive into tar.gz file """ dest_dir = path.join(get_config().getstring('general', 'storage-endpoint'), storage_pool, "openvz") unpacked_dir = path.join(dest_dir, "unpacked") ct_archive_fnm = path.join(unpacked_dir, "%s.tar.gz" % vm_settings["template_name"]) ct_source_dir = path.join("/vz/private", vm_settings["vm_name"]) # Pack vm container catalog log = get_logger() msg = "Archiving VM container catalog %s. This may take a while..." % ct_source_dir log.info(msg) with closing(tarfile.open(ct_archive_fnm, "w:gz")) as tar: for f in os.listdir(ct_source_dir): tar.add(path.join(ct_source_dir, f), arcname=f) # Archive action scripts if they are present msg = "Adding action scripts..." log.info(msg) ct_scripts_fnm = path.join( unpacked_dir, "%s.scripts.tar.gz" % vm_settings["template_name"]) with closing(tarfile.open(ct_scripts_fnm, "w:gz")) as tar: for script_type in [ 'premount', 'mount', 'start', 'stop', 'umount', 'postumount' ]: script_fnm = "/etc/vz/conf/%s.%s" % (vm_settings["vm_name"], script_type) if os.path.exists(script_fnm): tar.add(script_fnm, arcname=script_type) # generate and save ovf configuration file msg = "Generating ovf file..." log.info(msg) ovf = _generate_ovf_file(vm_settings, ct_archive_fnm) ovf_fnm = path.join(unpacked_dir, "%s.ovf" % vm_settings["template_name"]) with open(ovf_fnm, 'w') as f: ovf.writeFile(f, pretty=True, encoding='UTF-8') # pack container archive and ovf file msg = "Archiving..." log.info(msg) ovf_archive_fnm = path.join(dest_dir, "%s.ova" % vm_settings["template_name"]) with closing(tarfile.open(ovf_archive_fnm, "w")) as tar: tar.add(ct_archive_fnm, arcname=path.basename(ct_archive_fnm)) tar.add(ovf_fnm, arcname=path.basename(ovf_fnm)) tar.add(ct_scripts_fnm, arcname=path.basename(ct_scripts_fnm)) calculate_hash(ovf_archive_fnm) log.info("Done! Saved template at %s" % ovf_archive_fnm)
def deploy(ovf_settings, storage_pool): """ Deploys OpenVZ container """ log = get_logger() # make sure we have required template present and symlinked link_template(storage_pool, ovf_settings["template_name"]) msg = "Generating configuration..." log.info(msg) generate_config(ovf_settings) msg = "Creating OpenVZ container..." log.info(msg) create_container(ovf_settings) msg = "Deploying..." log.info(msg) nameservers = ovf_settings.get("nameservers", None) if not nameservers: nameservers = [ovf_settings.get("nameserver", '8.8.8.8')] # XXX a hack to set a working dns if NS is malformed for some reason (OMS-444) if (type(nameservers) == str and len(nameservers) < 7): nameservers = ['8.8.8.8'] execute("vzctl set %s %s --save" % (ovf_settings["vm_id"], ' '.join('--nameserver %s' % i for i in nameservers))) execute("vzctl set %s --ipadd %s --save" % (ovf_settings["vm_id"], ovf_settings["ip_address"])) execute("vzctl set %s --hostname %s --save" % (ovf_settings["vm_id"], ovf_settings["hostname"])) if len(ovf_settings['passwd']) > 0: execute("vzctl set %s --userpasswd 'root:%s' --save" % (ovf_settings["vm_id"], ovf_settings["passwd"])) msg = "Setting up action scripts..." log.info(msg) setup_scripts(ovf_settings, storage_pool) if ovf_settings.get('appliance_type') == 'oms': oms.configure_oms_vm(ovf_settings["vm_id"], ovf_settings["hostname"]) if ovf_settings.get("startvm", 0) == 1: execute("vzctl start %s" % (ovf_settings["vm_id"])) if ovf_settings.get("onboot", 0) == 1: execute("vzctl set %s --onboot yes --save" % (ovf_settings["vm_id"])) else: execute("vzctl set %s --onboot no --save" % (ovf_settings["vm_id"])) if ovf_settings.get("ioprio", 4): execute("vzctl set %s --ioprio %d --save" % (ovf_settings["vm_id"], ovf_settings["ioprio"])) msg = "Template %s deployed successfully!" % ovf_settings["vm_id"] log.info(msg)
def setup_scripts(vm_settings, storage_pool): """Setup action scripts for the CT""" dest_dir = path.join(get_config().getstring('general', 'storage-endpoint'), storage_pool, "openvz") unpacked_dir = path.join(dest_dir, "unpacked") ct_scripts_fnm = path.join( unpacked_dir, "%s.scripts.tar.gz" % vm_settings["template_name"]) dest_folder = '/etc/vz/conf/%s' % vm_settings['vm_id'] try: with closing(tarfile.open(ct_scripts_fnm, "r:gz")) as tar: for f in tar: target_script = '%s.%s' % (dest_folder, f.name) shutil.copyfileobj(tar.extractfile(f), open(target_script, 'w')) os.chmod(target_script, 0755) except: msg = "No action scripts in the template." get_logger().warn(msg) pass
def add_pool(pool_name, careful=True): """Add a new pool_name""" if careful and filter(lambda p: p[0] == pool_name, list_pools()): msg = "Pool '%s' already exists." % pool_name get_logger().warn(msg) return try: pool_name = re.sub(" ", "_", pool_name) # safety measure pool_path = os.path.join( get_config().getstring('general', 'storage-endpoint'), pool_name) mkdir_p(pool_path) prepare_storage_pool(pool_name, check_libvirt=False) execute("virsh 'pool-define-as %s dir --target %s'" % (pool_name, pool_path)) execute("virsh 'pool-start %s'" % pool_name) execute("virsh 'pool-autostart %s'" % pool_name) except Exception, e: msg = "Failed to create a new pool: %s" % e get_logger().error(msg)
def deploy(settings, storage_pool): log = get_logger() log.info("Copying KVM template disks (this may take a while)...") prepare_file_system(settings, storage_pool) log.info("Generating KVM VM configuration...") libvirt_conf_dom = generate_libvirt_conf(settings) log.info("Finalyzing KVM template deployment...") conn = libvirt.open("qemu:///system") conn.defineXML(libvirt_conf_dom.toxml()) log.info("Deployment done!")
def execute(cmd): """ Run cmd in a shell, return output of the execution. Raise exception for non-0 return code. vzctl gets special treatment. TODO: add other vz family commmands """ cmd = "LC_ALL=C %s" % cmd get_logger().debug('execute cmd: %s', cmd) status, output = commands.getstatusoutput(cmd) if status != 0: if cmd.startswith('vzctl'): raise CommandException( "Failed to execute command '%s'. Status: '%s'. Message: '%s'. Output: '%s'" % (cmd, status >> 8, OpenVZ_EXIT_STATUS[cmd.split(' ')[0]][status >> 8], output), status >> 8) raise CommandException( "Failed to execute command '%s'. Status: '%s'. Output: '%s'" % (cmd, status, output), status) get_logger().debug('execute returned: %s', output) return output
def save_as_ovf(vm_settings, storage_pool, unpack=True): """ Creates ovf template archive for the specified VM. Steps: - relocate kvm disk files - generate ovf configuration file - pack ovf and disk files into tar.gz file - (if unpack) leave generated files as unpacked """ config = get_config() log = get_logger() target_dir = path.join(config.getstring('general', 'storage-endpoint'), storage_pool, "kvm") if unpack: target_dir = path.join(target_dir, 'unpacked') # prepare file system msg = "Preparing disks... (This may take a while)" log.info(msg) vm_settings["disks"] = _prepare_disks(vm_settings, target_dir) # generate and save ovf configuration file msg = "Generating ovf file..." log.info(msg) ovf = _generate_ovf_file(vm_settings) ovf_fnm = path.join(target_dir, "%s.ovf" % vm_settings["template_name"]) with open(ovf_fnm, 'w') as f: ovf.writeFile(f, pretty=True, encoding='UTF-8') # pack container archive and ovf file msg = "Archiving..." log.info(msg) arch_location = path.join(config.getstring('general', 'storage-endpoint'), storage_pool, "kvm") ovf_archive_fnm = path.join(arch_location, "%s.ova" % vm_settings["template_name"]) with closing(tarfile.open(ovf_archive_fnm, "w")) as tar: tar.add(ovf_fnm, arcname=path.basename(ovf_fnm)) for disk in vm_settings["disks"]: tar.add(disk["new_path"], arcname=path.basename(disk["new_path"])) # remove generated files if not unpack: os.remove(ovf_fnm) for disk in vm_settings["disks"]: os.remove(disk["new_path"]) calculate_hash(ovf_archive_fnm) msg = "Done! Template saved at %s" % ovf_archive_fnm log.info(msg)
def save_as_ovf(vm_settings, storage_pool): """ Creates ovf template archive for the specified container. Steps: - archive container directory - generate ovf configuration file - pack ovf and container archive into tar.gz file """ dest_dir = path.join(get_config().getstring('general', 'storage-endpoint'), storage_pool, "openvz") unpacked_dir = path.join(dest_dir, "unpacked") ct_archive_fnm = path.join(unpacked_dir, "%s.tar.gz" % vm_settings["template_name"]) ct_source_dir = path.join("/vz/private", vm_settings["vm_name"]) # Pack vm container catalog log = get_logger() msg = "Archiving VM container catalog %s. This may take a while..." % ct_source_dir log.info(msg) with closing(tarfile.open(ct_archive_fnm, "w:gz")) as tar: for f in os.listdir(ct_source_dir): tar.add(path.join(ct_source_dir, f), arcname=f) # Archive action scripts if they are present msg = "Adding action scripts..." log.info(msg) ct_scripts_fnm = path.join(unpacked_dir, "%s.scripts.tar.gz" % vm_settings["template_name"]) with closing(tarfile.open(ct_scripts_fnm, "w:gz")) as tar: for script_type in ['premount', 'mount', 'start', 'stop', 'umount', 'postumount']: script_fnm = "/etc/vz/conf/%s.%s" % (vm_settings["vm_name"], script_type) if os.path.exists(script_fnm): tar.add(script_fnm, arcname=script_type) # generate and save ovf configuration file msg = "Generating ovf file..." log.info(msg) ovf = _generate_ovf_file(vm_settings, ct_archive_fnm) ovf_fnm = path.join(unpacked_dir, "%s.ovf" % vm_settings["template_name"]) with open(ovf_fnm, 'w') as f: ovf.writeFile(f, pretty=True, encoding='UTF-8') # pack container archive and ovf file msg = "Archiving..." log.info(msg) ovf_archive_fnm = path.join(dest_dir, "%s.ova" % vm_settings["template_name"]) with closing(tarfile.open(ovf_archive_fnm, "w")) as tar: tar.add(ct_archive_fnm, arcname=path.basename(ct_archive_fnm)) tar.add(ovf_fnm, arcname=path.basename(ovf_fnm)) tar.add(ct_scripts_fnm, arcname=path.basename(ct_scripts_fnm)) calculate_hash(ovf_archive_fnm) log.info("Done! Saved template at %s" % ovf_archive_fnm)
def shutdown_vm(conn, uuid): """Shutdown VM with a given UUID""" ctid = get_ctid_by_uuid(conn, uuid) log = get_logger() try: msg = execute("vzctl stop %s" % ctid) log.info(msg) except CommandException as e: if e.code == 13056: # sometimes umount fails for i in range(5): try: msg = execute("vzctl umount %s" % ctid) log.info(msg) except CommandException: time.sleep(3)
def prepare_file_system(settings, storage_pool): """ Prepare file system for VM template creation in OVF appliance format: - create template directory if it does not exist - copy disk based images - convert block device based images to file based images """ config = get_config() log = get_logger() images_dir = path.join(config.getstring("general", "storage-endpoint"), storage_pool, "images") target_dir = path.join(config.getstring("general", "storage-endpoint"), storage_pool, "kvm", "unpacked") for disk_index, disk in enumerate(settings.get("disks", [])): disk_template_path = path.join(target_dir, disk["template_name"]) if disk["deploy_type"] == "file": volume_name = "disk%s" % disk_index disk["source_file"] = '%s-%s-%s.%s' % ( settings["hostname"], settings["uuid"], volume_name, disk.get('template_format', 'qcow2')) disk_deploy_path = path.join(images_dir, disk["source_file"]) shutil.copy2(disk_template_path, disk_deploy_path) # resize disk to match the requested # XXX we assume that the size was already adjusted to the template requirements diskspace = settings.get('disk') if diskspace: diskspace = int( float(diskspace) ) # it's str by default. 'string' > int is always true (LEV-116) # get the disk size current_size = int( execute( "qemu-img info %s |grep 'virtual size' |awk '{print $4}' |cut -b2- " % disk_deploy_path)) / 1024 / 1024 / 1024 # to get to GB if diskspace > current_size: log.info('Increasing image file %s from %s to %sG' % (disk_deploy_path, current_size, diskspace)) execute("qemu-img resize %s %sG" % (disk_deploy_path, diskspace)) else: log.info( 'Ignoring disk (%s) increase request (to %s) as existing image is already larger (%s)' % (disk_deploy_path, diskspace, current_size)) elif disk["deploy_type"] in ["physical", "lvm"]: disk_deploy_path = disk["source_dev"] execute("qemu-img convert -f qcow2 -O raw %s %s" % (disk_template_path, disk_deploy_path))
def migrate(conn, uid, target_host, live=False, print_=True): """Migrate given container to a target_host""" if not test_passwordless_ssh(target_host): raise CommandException("Public key ssh connection with the target host could not be established") log = get_logger() # a workaround for the megadynamic nature of python variable type when called via an agent live = live == 'True' if type(live) is str else live # is ctid present on the target host? ctid = get_ctid_by_uuid(conn, uid) try: execute("ssh %s vzlist %s" % (target_host, ctid)) raise CommandException("Target host '%s' has an already defined CTID '%s'" % (target_host, ctid)) except CommandException as ce: if ce.code != 256: raise msg = "Initiating migration to %s..." % target_host log.info(msg) live_trigger = '--online' if live else '' for line in execute2("vzmigrate -v %s %s %s" % (live_trigger, target_host, ctid)): log.info(line)
def prepare_file_system(settings, storage_pool): """ Prepare file system for VM template creation in OVF appliance format: - create template directory if it does not exist - copy disk based images - convert block device based images to file based images """ config = get_config() log = get_logger() images_dir = path.join(config.getstring("general", "storage-endpoint"), storage_pool, "images") target_dir = path.join(config.getstring("general", "storage-endpoint"), storage_pool, "kvm", "unpacked") for disk_index, disk in enumerate(settings.get("disks", [])): disk_template_path = path.join(target_dir, disk["template_name"]) if disk["deploy_type"] == "file": volume_name = "disk%s" % disk_index disk["source_file"] = '%s-%s-%s.%s' % (settings["hostname"], settings["uuid"], volume_name, disk.get('template_format', 'qcow2')) disk_deploy_path = path.join(images_dir, disk["source_file"]) shutil.copy2(disk_template_path, disk_deploy_path) # resize disk to match the requested # XXX we assume that the size was already adjusted to the template requirements diskspace = settings.get('disk') if diskspace: diskspace = int(float(diskspace)) # it's str by default. 'string' > int is always true (LEV-116) # get the disk size current_size = int(execute("qemu-img info %s |grep 'virtual size' |awk '{print $4}' |cut -b2- " % disk_deploy_path)) / 1024 / 1024 / 1024 # to get to GB if diskspace > current_size: log.info('Increasing image file %s from %s to %sG' % (disk_deploy_path, current_size, diskspace)) execute("qemu-img resize %s %sG" % (disk_deploy_path, diskspace)) else: log.info('Ignoring disk (%s) increase request (to %s) as existing image is already larger (%s)' % (disk_deploy_path, diskspace, current_size)) elif disk["deploy_type"] in ["physical", "lvm"]: disk_deploy_path = disk["source_dev"] execute("qemu-img convert -f qcow2 -O raw %s %s" % (disk_template_path, disk_deploy_path))
def vm_diskspace(vm): log = get_logger() if conn.getType() == 'OpenVZ': return {'/': openvz.get_diskspace(vm.name())} # return a total sum of block devices used by KVM VM # get list of block devices of a file type try: cmd = "virsh domblklist --details %s | grep ^file | awk '{print $4}'" % vm.name() devices = execute(cmd).split('\n') total_bytes = 0.0 for dev_path in devices: if dev_path.strip() == '-': continue # simple protection against non-disk base devices cmd = "virsh domblkinfo %s %s |grep ^Capacity| awk '{print $2}'" % (vm.name(), dev_path) total_bytes += int(execute(cmd)) / 1024.0 / 1024.0 # we want result to be in MB except CommandException as ce: log.debug('Failed diskspace detection: \'%s\'' % ce) total_bytes = 0.0 except ValueError as ve: log.debug('Failed diskspace conversion: \'%s\'' % ve) total_bytes = 0.0 return {'/': total_bytes}
def vm_diskspace(vm): log = get_logger() if conn.getType() == "OpenVZ": return {"/": openvz.get_diskspace(vm.name())} # return a total sum of block devices used by KVM VM # get list of block devices of a file type try: script_path = get_config().getstring("general", "script_prefix", "/opt/opennode/bin/") cmd = os.path.join(script_path, "libvirt_detect_domain_devices.sh %s" % vm.name()) devices = execute(cmd).split("\n") total_bytes = 0.0 for dev_path in devices: if dev_path.strip() == "-": continue # simple protection against non-disk base devices cmd = os.path.join(script_path, "libvirt_get_device_size.sh %s %s" % (vm.name(), dev_path)) total_bytes += int(execute(cmd)) / 1024.0 / 1024.0 # we want result to be in MB except CommandException as ce: log.debug("Failed diskspace detection: '%s'" % ce) total_bytes = 0.0 except ValueError as ve: log.debug("Failed diskspace conversion: '%s'" % ve) total_bytes = 0.0 return {"/": total_bytes}
def __init__(self, msg, code=None): super(CommandException, self).__init__(msg) self.code = code get_logger().error('Command exception: %s', msg)
from opennode.cli.config import get_config from opennode.cli.actions import storage, vm as vm_ops from opennode.cli.actions.utils import delete, calculate_hash, execute_in_screen, execute, download from opennode.cli.actions.utils import urlopen, TemplateException from opennode.cli.log import get_logger __all__ = ['get_template_repos', 'get_template_list', 'sync_storage_pool', 'sync_template', 'delete_template', 'unpack_template', 'get_local_templates', 'sync_oms_template', 'is_fresh', 'is_syncing'] __context__ = {} log = get_logger() def _simple_download_hook(count, blockSize, totalSize): """Simple download counter""" log.info("% 3.1f%% of %d bytes\r" % (min(100, float(blockSize * count) / totalSize * 100), totalSize)) def get_template_repos(): """Return a list of formatted strings describing configured repositories""" config = get_config() repo_groups = config.getstring('general', 'repo-groups').split(',') result = [] for r in repo_groups: group = "%s-repo" % r.strip()
from opennode.cli.config import get_config from opennode.cli.actions import storage, vm as vm_ops from opennode.cli.actions.utils import delete, calculate_hash, execute_in_screen, execute, download from opennode.cli.actions.utils import urlopen, TemplateException from opennode.cli.log import get_logger __all__ = ['get_template_repos', 'get_template_list', 'sync_storage_pool', 'sync_template', 'delete_template', 'unpack_template', 'get_local_templates', 'sync_oms_template', 'is_fresh', 'is_syncing'] __context__ = {} log = get_logger() def _simple_download_hook(count, blockSize, totalSize): """Simple download counter""" log.info("% 3.1f%% of %d bytes\r" % (min(100, float(blockSize * count) / totalSize * 100), totalSize)) def get_template_repos(): """Return a list of formatted strings describing configured repositories""" config = get_config() repo_groups = config.getstring('general', 'repo-groups').split(',') # XXX: Get autodetexted backends from config. If host has no kvm # capability then don't display KVM repo for template download. backends = config.getstring('general', 'backends').split(',')