def _get_unpacked_base(vm_type): """ Get unpacked base for given vm type. @param vm_type: 'openvz' or 'kvm' """ return os.path.join(get_config().getstring('general', 'storage-endpoint'), get_config().getstring('general', 'default-storage-pool'), vm_type, 'unpacked')
def _configure(): logger = get_logger() global _configured if not getattr(logger, '_configured', False) and not _configured: conf_level = get_config().getstring('general', 'loglevel', 'INFO') level = logging._levelNames.get(conf_level.upper()) if level is None: level = logging.INFO logger.setLevel(level) fh = logging.handlers.WatchedFileHandler(get_config().getstring('general', 'log-location', '/var/log/opennode-tui.log')) format_str = '%(asctime)s %(levelname)7s %(module)10s:%(lineno)s:%(funcName)s - %(message)s' fhformatter = logging.Formatter(format_str) fh.setFormatter(fhformatter) logger.addHandler(fh) sh = logging.StreamHandler(sys.stdout) sh.setLevel(logging.INFO) sh.setFormatter(logging.Formatter('%(message)s')) logger.addHandler(sh) sherr = logging.StreamHandler() sherr.setLevel(logging.ERROR) sherr.setFormatter(logging.Formatter('%(module)10s:%(lineno)s:%(funcName)s - %(message)s')) logger.addHandler(sherr) _configured = True
def _get_unpacked_base(vm_type): """ Get unpacked base for given vm type. @param vm_type: 'openvz' or 'kvm' """ return os.path.join( get_config().getstring('general', 'storage-endpoint'), get_config().getstring('general', 'default-storage-pool'), vm_type, 'unpacked')
def autodetected_backends(): auto = [] if os.path.exists('/dev/vzctl'): auto.append('openvz:///system') if os.path.exists('/dev/kvm'): auto.append('qemu:///system') get_config().setvalue('general', 'backends', ','.join(auto)) return auto
def autodetected_backends(): auto = [] if os.path.exists("/dev/vzctl"): auto.append("openvz:///system") if os.path.exists("/dev/kvm"): auto.append("qemu:///system") get_config().setvalue("general", "backends", ",".join(auto)) return auto
def autodetected_backends(): auto = [] if os.path.exists('/dev/vzctl'): auto.append('openvz:///system') if os.path.exists('/dev/kvm'): auto.append('qemu:///system') get_config().setvalue('general', 'backends', ','.join(auto)) return auto
def set_templates_sync_list(tasks, sync_tasks_fnm=None): """Set new template synchronisation list. Function should be handled with care, as some retrieval might be in progress""" if not sync_tasks_fnm: sync_tasks_fnm = get_config().getstring('general', 'sync_task_list') with open(sync_tasks_fnm, 'w') as tf: pickle.dump(tasks, tf)
def generate_ubc_config(settings): """ Generates UBC part of configuration file for VZ container """ config = get_config("openvz") st = settings ubc_params = { "physpages_limit": st["memory"], "swappages_limit": st["swap"], "diskspace_soft": st["disk"], "diskspace_hard": _compute_diskspace_hard_limit(float(st["disk"])), "diskinodes_soft": float(st["disk"]) * int(config.getstring("ubc-defaults", "DEFAULT_INODES")), "diskinodes_hard": round( _compute_diskspace_hard_limit(float(st["disk"])) * int(config.getstring("ubc-defaults", "DEFAULT_INODES"))), "quotatime": config.getstring("ubc-defaults", "DEFAULT_QUOTATIME"), "cpus": st["vcpu"], "cpulimit": int(st["vcpulimit"]) * int(st["vcpu"]), 'cpuunits': config.getstring("ubc-defaults", "DEFAULT_CPUUNITS"), } # Get rid of zeros where necessary (eg 5.0 - > 5 ) ubc_params = dict([(key, int(float(val)) if float(val).is_integer() else val) for key, val in ubc_params.items()]) ubc_params['time'] = datetime.datetime.today().ctime() return openvz_template % ubc_params
def import_template(template, vm_type, storage_pool=None): """Import external template into ON storage pool""" config = get_config() if not storage_pool: storage_pool = config.getstring('general', 'default-storage-pool') if not os.path.exists(template): raise RuntimeError("Template not found: %s" % template) if not template.endswith('tar') or template.endswith('ova'): raise RuntimeError("Expecting a file ending with .tar or .ova for a template") tmpl_name = os.path.basename(template) target_file = os.path.join(storage.get_pool_path(storage_pool), vm_type, tmpl_name) log.info("Copying template to the storage pool... %s -> %s" % (template, target_file)) unfinished_local = "%s.unfinished" % tmpl_name shutil.copyfile(template, unfinished_local) calculate_hash(unfinished_local) log.info("Unpacking template %s..." % tmpl_name) extension = 'ova' if template.endswith('ova') else 'tar' unpack_template(storage_pool, vm_type, tmpl_name.rstrip('.%s' % extension)) os.rename(unfinished_local, target_file) os.rename(unfinished_local + '.pfff', '%s.pfff' % (target_file, extension))
def generate_ubc_config(settings): """ Generates UBC part of configuration file for VZ container """ config = get_config("openvz") st = settings ubc_params = { "physpages_limit": st["memory"], "swappages_limit": st["swap"], "diskspace_soft": st["disk"], "diskspace_hard": _compute_diskspace_hard_limit(float(st["disk"])), "diskinodes_soft": float(st["disk"]) * int(config.getstring("ubc-defaults", "DEFAULT_INODES")), "diskinodes_hard": round(_compute_diskspace_hard_limit(float(st["disk"])) * int(config.getstring("ubc-defaults", "DEFAULT_INODES"))), "quotatime": config.getstring("ubc-defaults", "DEFAULT_QUOTATIME"), "cpus": st["vcpu"], "cpulimit": int(st["vcpulimit"]) * int(st["vcpu"]), 'cpuunits': config.getstring("ubc-defaults", "DEFAULT_CPUUNITS"), } # Get rid of zeros where necessary (eg 5.0 - > 5 ) ubc_params = dict([(key, int(float(val)) if float(val).is_integer() else val) for key, val in ubc_params.items()]) ubc_params['time'] = datetime.datetime.today().ctime() return openvz_template % ubc_params
def _deploy_vm(vm_parameters, logger=None): from opennode.cli import actions storage_pool = actions.storage.get_default_pool() if storage_pool is None: raise Exception("Storage pool not defined") assert type( vm_parameters) is dict, 'Parameters must be a dict: %s' % vm_parameters vm_type = vm_parameters['vm_type'] template = vm_parameters['template_name'] # convert diskspace from MBs to GBs if 'disk' in vm_parameters: assert float(vm_parameters['disk']) > 1 and float(vm_parameters['disk']) < 600,\ 'Provided disk size is strange - MB vs GB issue?' if not template: if logger: logger("Cannot deploy because template is '%s'" % (template)) raise Exception("Cannot deploy because template is '%s'" % (template)) if vm_type == 'openvz': uuid = vm_parameters['uuid'] try: conn = libvirt.open('openvz:///system') deployed_uuid_list = [ivm['uuid'] for ivm in _list_vms(conn)] if uuid in deployed_uuid_list: msg = ( 'Deployment failed: a VM with UUID %s is already deployed ' '(%s)' % (uuid, deployed_uuid_list)) logging.error(msg) return logging.info('Deploying %s: %s', uuid, deployed_uuid_list) finally: conn.close() ovf_file = OvfFile( os.path.join(get_pool_path(storage_pool), vm_type, "unpacked", template + ".ovf")) vm = actions.vm.get_module(vm_type) settings = vm.get_ovf_template_settings(ovf_file) settings.update(vm_parameters) for disk in settings.get("disks", []): if disk["deploy_type"] == "file": volume_name = disk.get("source_file") or "disk" disk["source_file"] = '%s--%s.%s' % ( volume_name, settings["uuid"], disk.get('template_format', 'qcow2')) if not get_config().getboolean('general', 'disable_vm_sys_adjustment', False): errors = vm.adjust_setting_to_systems_resources(settings) if errors: if logger: logger("Got %s" % (errors, )) raise Exception("got errors %s" % (errors, )) vm.deploy(settings, storage_pool)
def details(name): res = {'type': 'simple', 'name': name} if os.path.exists('/sys/class/net/' + name + '/tun_flags'): res['type'] = 'virtual' sys_bridge_path = '/sys/class/net/' + name + '/brif/' if os.path.exists(sys_bridge_path): res['type'] = 'bridge' res['members'] = os.listdir(sys_bridge_path) addrs = netifaces.ifaddresses(name) if addrs.has_key(netifaces.AF_LINK): res['mac'] = addrs[netifaces.AF_LINK][0]['addr'] if addrs.has_key(netifaces.AF_INET): ip = addrs[netifaces.AF_INET][0]['addr'] mask = addrs[netifaces.AF_INET][0]['netmask'] l = 0 for b in mask.split('.'): l = l << 8 | int(b) prefix = number_of_set_bits(l) res['ip'] = '%s/%s' % (ip, prefix) config = get_config() default_name = (config.getstring( 'general', 'main_iface') if config.has_option( 'general', 'main_iface') else 'vmbr0') if name == default_name: res['primary'] = True return res
def details(name): res = {'type': 'simple', 'name': name} if os.path.exists('/sys/class/net/' + name + '/tun_flags'): res['type'] = 'virtual' sys_bridge_path = '/sys/class/net/' + name + '/brif/' if os.path.exists(sys_bridge_path): res['type'] = 'bridge' res['members'] = os.listdir(sys_bridge_path) addrs = netifaces.ifaddresses(name) if addrs.has_key(netifaces.AF_LINK): res['mac'] = addrs[netifaces.AF_LINK][0]['addr'] if addrs.has_key(netifaces.AF_INET): ip = addrs[netifaces.AF_INET][0]['addr'] mask = addrs[netifaces.AF_INET][0]['netmask'] l = 0 for b in mask.split('.'): l = l << 8 | int(b) prefix = number_of_set_bits(l) res['ip'] = '%s/%s' % (ip, prefix) config = get_config() default_name = (config.getstring('general', 'main_iface') if config.has_option('general', 'main_iface') else 'vmbr0') if name == default_name: res['primary'] = True return res
def set_default_pool(name): """Set default storage pool""" config = get_config() if name == 'default': raise CommandException('Cannot set pool name to a reserved "default".') # clean up default pool if not is_default_pool_modified(): for pool_name in ['default', 'default-iso']: try: execute("virsh 'pool-destroy %s'" % pool_name) execute("virsh 'pool-undefine %s'" % pool_name) except CommandException: pass # it's ok for these commands to fail if the pool is undefined endpoint = config.getstring('general', 'storage-endpoint') # create default image pool paths = {'default': 'images', 'default-iso': 'iso'} if len(name.strip()) > 0: for default_pool_name, item_path in paths.items(): execute("virsh 'pool-define-as --name %s --type dir --target %s/%s/%s'" % (default_pool_name, endpoint, name, item_path)) execute("virsh 'pool-autostart %s'" % default_pool_name) execute("virsh 'pool-build %s'" % default_pool_name) execute("virsh 'pool-start %s'" % default_pool_name) # finally set a pointer in the configuration file config.setvalue('general', 'default-storage-pool', name)
def get_netstats(): iface = get_config().getstring('general', 'main_iface') return [ int(v) for v in execute( "grep %s: /proc/net/dev | awk -F: '{print $2}' | " "awk '{print $1, $9}'" % iface).split(' ') ]
def set_templates_sync_list(tasks, sync_tasks_fnm=None): """Set new template synchronisation list. Function should be handled with care, as some retrieval might be in progress""" if not sync_tasks_fnm: sync_tasks_fnm = get_config().getstring('general', 'sync_task_list') with open(sync_tasks_fnm, 'w') as tf: pickle.dump(tasks, tf)
def set_default_pool(name): """Set default storage pool""" config = get_config() if name == 'default': raise CommandException('Cannot set pool name to a reserved "default".') # clean up default pool if not is_default_pool_modified(): for pool_name in ['default', 'default-iso']: try: execute("virsh 'pool-destroy %s'" % pool_name) execute("virsh 'pool-undefine %s'" % pool_name) except CommandException: pass # it's ok for these commands to fail if the pool is undefined endpoint = config.getstring('general', 'storage-endpoint') # create default image pool paths = {'default': 'images', 'default-iso': 'iso'} if len(name.strip()) > 0: for default_pool_name, item_path in paths.items(): execute( "virsh 'pool-define-as --name %s --type dir --target %s/%s/%s'" % (default_pool_name, endpoint, name, item_path)) execute("virsh 'pool-autostart %s'" % default_pool_name) execute("virsh 'pool-build %s'" % default_pool_name) execute("virsh 'pool-start %s'" % default_pool_name) # finally set a pointer in the configuration file config.setvalue('general', 'default-storage-pool', name)
def read_default_ovf_settings(): """ Reads default ovf configuration from file, returns a dictionary of settings.""" config = get_config("kvm") settings = { "serial": { "type": "pty", "target_port": 0 }, "console": { "type": "pty", "target_port": 0 }, "graphics": { "type": "vnc", "port": -1, "autoport": "yes", "keymap": config.getstring("vnc", "keymap") }, "interfaces": [], "features": [], "disks": [] } settings.update(dict(config.getlist('ovf-defaults'))) if not os.path.exists(settings.get("emulator", "")): settings["emulator"] = "/usr/bin/kvm" return settings
def vm_diskspace(vm): log = get_logger() if conn.getType() == 'OpenVZ': return {'/': openvz.get_diskspace(vm.name())} # return a total sum of block devices used by KVM VM # get list of block devices of a file type try: script_path = get_config().getstring('general', 'script_prefix', '/opt/opennode/bin/') cmd = os.path.join( script_path, 'libvirt_detect_domain_devices.sh %s' % vm.name()) devices = execute(cmd).split('\n') total_bytes = 0.0 for dev_path in devices: if dev_path.strip() == '-': continue # simple protection against non-disk base devices cmd = os.path.join( script_path, 'libvirt_get_device_size.sh %s %s' % (vm.name(), dev_path)) total_bytes += int(execute( cmd)) / 1024.0 / 1024.0 # we want result to be in MB except CommandException as ce: log.debug('Failed diskspace detection: \'%s\'' % ce) total_bytes = 0.0 except ValueError as ve: log.debug('Failed diskspace conversion: \'%s\'' % ve) total_bytes = 0.0 return {'/': total_bytes}
def sync_oms_template(storage_pool=None): """Synchronize OMS template""" config = get_config() if not storage_pool: storage_pool = config.getstring('general', 'default-storage-pool') repo = config.getstring('opennode-oms-template', 'repo') tmpl = config.getstring('opennode-oms-template', 'template_name') sync_template(repo, tmpl, storage_pool)
def get_template_list(remote_repo): """Retrieves a tmpl_list of templates from the specified repository""" url = get_config().getstring(remote_repo, 'url') url = url.rstrip('/') + '/' tmpl_list = urlopen(urlparse.urljoin(url, 'templatelist.txt')) templates = [template.strip() for template in tmpl_list] tmpl_list.close() return templates
def get_template_list(remote_repo): """Retrieves a tmpl_list of templates from the specified repository""" url = get_config().getstring(remote_repo, 'url') url = url.rstrip('/') + '/' tmpl_list = urlopen(urlparse.urljoin(url, 'templatelist.txt')) templates = [template.strip() for template in tmpl_list] tmpl_list.close() return templates
def sync_oms_template(storage_pool=None): """Synchronize OMS template""" config = get_config() if not storage_pool: storage_pool = config.getstring("general", "default-storage-pool") repo = config.getstring("opennode-oms-template", "repo") tmpl = config.getstring("opennode-oms-template", "template_name") sync_template(repo, tmpl, storage_pool)
def sync_oms_template(storage_pool=None): """Synchronize OMS template""" config = get_config() if not storage_pool: storage_pool = config.getstring('general', 'default-storage-pool') repo = config.getstring('opennode-oms-template', 'repo') tmpl = config.getstring('opennode-oms-template', 'template_name') sync_template(repo, tmpl, storage_pool)
def save_as_ovf(vm_settings, storage_pool): """ Creates ovf template archive for the specified container. Steps: - archive container directory - generate ovf configuration file - pack ovf and container archive into tar.gz file """ dest_dir = path.join(get_config().getstring('general', 'storage-endpoint'), storage_pool, "openvz") unpacked_dir = path.join(dest_dir, "unpacked") ct_archive_fnm = path.join(unpacked_dir, "%s.tar.gz" % vm_settings["template_name"]) ct_source_dir = path.join("/vz/private", vm_settings["vm_name"]) # Pack vm container catalog log = get_logger() msg = "Archiving VM container catalog %s. This may take a while..." % ct_source_dir log.info(msg) with closing(tarfile.open(ct_archive_fnm, "w:gz")) as tar: for f in os.listdir(ct_source_dir): tar.add(path.join(ct_source_dir, f), arcname=f) # Archive action scripts if they are present msg = "Adding action scripts..." log.info(msg) ct_scripts_fnm = path.join( unpacked_dir, "%s.scripts.tar.gz" % vm_settings["template_name"]) with closing(tarfile.open(ct_scripts_fnm, "w:gz")) as tar: for script_type in [ 'premount', 'mount', 'start', 'stop', 'umount', 'postumount' ]: script_fnm = "/etc/vz/conf/%s.%s" % (vm_settings["vm_name"], script_type) if os.path.exists(script_fnm): tar.add(script_fnm, arcname=script_type) # generate and save ovf configuration file msg = "Generating ovf file..." log.info(msg) ovf = _generate_ovf_file(vm_settings, ct_archive_fnm) ovf_fnm = path.join(unpacked_dir, "%s.ovf" % vm_settings["template_name"]) with open(ovf_fnm, 'w') as f: ovf.writeFile(f, pretty=True, encoding='UTF-8') # pack container archive and ovf file msg = "Archiving..." log.info(msg) ovf_archive_fnm = path.join(dest_dir, "%s.ova" % vm_settings["template_name"]) with closing(tarfile.open(ovf_archive_fnm, "w")) as tar: tar.add(ct_archive_fnm, arcname=path.basename(ct_archive_fnm)) tar.add(ovf_fnm, arcname=path.basename(ovf_fnm)) tar.add(ct_scripts_fnm, arcname=path.basename(ct_scripts_fnm)) calculate_hash(ovf_archive_fnm) log.info("Done! Saved template at %s" % ovf_archive_fnm)
def get_local_templates(vm_type, storage_pool=None): """Returns a list of templates of a certain vm_type from the storage pool""" config = get_config() if not storage_pool: storage_pool = config.getstring('general', 'default-storage-pool') return [os.path.splitext(tmpl)[0] for tmpl in os.listdir("%s/%s" % (storage.get_pool_path(storage_pool), vm_type)) if tmpl.endswith('tar') or tmpl.endswith('ova')]
def get_template_info(template_name, vm_type, storage_pool=None): config = get_config() if not storage_pool: storage_pool = config.getstring("general", "default-storage-pool") ovf_file = OvfFile(os.path.join(storage.get_pool_path(storage_pool), vm_type, "unpacked", template_name + ".ovf")) vm = vm_ops.get_module(vm_type) template_settings = vm.get_ovf_template_settings(ovf_file) # XXX handle modification to system params # errors = vm.adjust_setting_to_systems_resources(template_settings) return template_settings
def get_local_templates(vm_type, storage_pool=None): """Returns a list of templates of a certain vm_type from the storage pool""" config = get_config() if not storage_pool: storage_pool = config.getstring('general', 'default-storage-pool') return [os.path.splitext(tmpl)[0] for tmpl in os.listdir("%s/%s" % (storage.get_pool_path(storage_pool), vm_type)) if tmpl.endswith('tar') or tmpl.endswith('ova')]
def _deploy_vm(vm_parameters, logger=None): from opennode.cli import actions storage_pool = actions.storage.get_default_pool() if storage_pool is None: raise Exception("Storage pool not defined") assert type(vm_parameters) is dict, 'Parameters must be a dict: %s' % vm_parameters vm_type = vm_parameters['vm_type'] template = vm_parameters['template_name'] # convert diskspace from MBs to GBs if 'disk' in vm_parameters: assert float(vm_parameters['disk']) > 1 and float(vm_parameters['disk']) < 600,\ 'Provided disk size is strange - MB vs GB issue?' if not template: if logger: logger("Cannot deploy because template is '%s'" % (template)) raise Exception("Cannot deploy because template is '%s'" % (template)) if vm_type == 'openvz': uuid = vm_parameters['uuid'] try: conn = libvirt.open('openvz:///system') deployed_uuid_list = [ivm['uuid'] for ivm in _list_vms(conn)] if uuid in deployed_uuid_list: msg = ('Deployment failed: a VM with UUID %s is already deployed ' '(%s)' % (uuid, deployed_uuid_list)) logging.error(msg) return logging.info('Deploying %s: %s', uuid, deployed_uuid_list) finally: conn.close() ovf_file = OvfFile(os.path.join(get_pool_path(storage_pool), vm_type, "unpacked", template + ".ovf")) vm = actions.vm.get_module(vm_type) settings = vm.get_ovf_template_settings(ovf_file) settings.update(vm_parameters) for disk in settings.get("disks", []): if disk["deploy_type"] == "file": volume_name = disk.get("source_file") or "disk" disk["source_file"] = '%s--%s.%s' % (volume_name, settings["uuid"], disk.get('template_format', 'qcow2')) if not get_config().getboolean('general', 'disable_vm_sys_adjustment', False): errors = vm.adjust_setting_to_systems_resources(settings) if errors: if logger: logger("Got %s" % (errors,)) raise Exception("got errors %s" % (errors,)) vm.deploy(settings, storage_pool)
def get_template_repos(): """Return a list of formatted strings describing configured repositories""" config = get_config() repo_groups = config.getstring('general', 'repo-groups').split(',') result = [] for r in repo_groups: group = "%s-repo" % r.strip() name = config.getstring(group, 'name') vm_type = config.getstring(group, 'type') result.append(("%s (%s)" % (name, vm_type), group)) return result
def get_template_info(template_name, vm_type, storage_pool=None): config = get_config() if not storage_pool: storage_pool = config.getstring('general', 'default-storage-pool') ovf_file = OvfFile(os.path.join(storage.get_pool_path(storage_pool), vm_type, "unpacked", template_name + ".ovf")) vm = vm_ops.get_module(vm_type) template_settings = vm.get_ovf_template_settings(ovf_file) # XXX handle modification to system params #errors = vm.adjust_setting_to_systems_resources(template_settings) return template_settings
def is_default_pool_modified(): """Check if there were any modifications done by a user to the default pool""" try: config = get_config() res = execute("virsh 'pool-dumpxml default'") defined_path = parseString(res).getElementsByTagName('path')[0].lastChild.nodeValue # XXX: This will remain as-is right now. current_path = os.path.join(config.getstring('general', 'storage-endpoint'), config.getstring('general', 'default-storage-pool'), 'images') return str(defined_path) != current_path except CommandException: return False # pool is undefined or we are not sure -> so, assume it's all good
def delete_pool(pool_name): """Delete a storage pool""" try: config = get_config() if get_pool_path(pool_name) == '/storage/local': raise Exception('/storage/local can not be deleted') execute("virsh 'pool-destroy %s'" % pool_name) execute("virsh 'pool-undefine %s'" % pool_name) del_folder(get_pool_path(pool_name)) if pool_name == config.getstring('general', 'default-storage-pool'): set_default_pool('') except Exception, e: raise Exception("Failed to delete pool %s: %s" % (pool_name, e))
def delete_pool(pool_name): """Delete a storage pool""" try: config = get_config() if get_pool_path(pool_name) == '/storage/local': raise Exception('/storage/local can not be deleted') execute("virsh 'pool-destroy %s'" % pool_name) execute("virsh 'pool-undefine %s'" % pool_name) del_folder(get_pool_path(pool_name)) if pool_name == config.getstring('general', 'default-storage-pool'): set_default_pool('') except Exception, e: raise Exception("Failed to delete pool %s: %s" % (pool_name, e))
def save_as_ovf(vm_settings, storage_pool): """ Creates ovf template archive for the specified container. Steps: - archive container directory - generate ovf configuration file - pack ovf and container archive into tar.gz file """ dest_dir = path.join(get_config().getstring('general', 'storage-endpoint'), storage_pool, "openvz") unpacked_dir = path.join(dest_dir, "unpacked") ct_archive_fnm = path.join(unpacked_dir, "%s.tar.gz" % vm_settings["template_name"]) ct_source_dir = path.join("/vz/private", vm_settings["vm_name"]) # Pack vm container catalog log = get_logger() msg = "Archiving VM container catalog %s. This may take a while..." % ct_source_dir log.info(msg) with closing(tarfile.open(ct_archive_fnm, "w:gz")) as tar: for f in os.listdir(ct_source_dir): tar.add(path.join(ct_source_dir, f), arcname=f) # Archive action scripts if they are present msg = "Adding action scripts..." log.info(msg) ct_scripts_fnm = path.join(unpacked_dir, "%s.scripts.tar.gz" % vm_settings["template_name"]) with closing(tarfile.open(ct_scripts_fnm, "w:gz")) as tar: for script_type in ['premount', 'mount', 'start', 'stop', 'umount', 'postumount']: script_fnm = "/etc/vz/conf/%s.%s" % (vm_settings["vm_name"], script_type) if os.path.exists(script_fnm): tar.add(script_fnm, arcname=script_type) # generate and save ovf configuration file msg = "Generating ovf file..." log.info(msg) ovf = _generate_ovf_file(vm_settings, ct_archive_fnm) ovf_fnm = path.join(unpacked_dir, "%s.ovf" % vm_settings["template_name"]) with open(ovf_fnm, 'w') as f: ovf.writeFile(f, pretty=True, encoding='UTF-8') # pack container archive and ovf file msg = "Archiving..." log.info(msg) ovf_archive_fnm = path.join(dest_dir, "%s.ova" % vm_settings["template_name"]) with closing(tarfile.open(ovf_archive_fnm, "w")) as tar: tar.add(ct_archive_fnm, arcname=path.basename(ct_archive_fnm)) tar.add(ovf_fnm, arcname=path.basename(ovf_fnm)) tar.add(ct_scripts_fnm, arcname=path.basename(ct_scripts_fnm)) calculate_hash(ovf_archive_fnm) log.info("Done! Saved template at %s" % ovf_archive_fnm)
def save_as_ovf(vm_settings, storage_pool, unpack=True): """ Creates ovf template archive for the specified VM. Steps: - relocate kvm disk files - generate ovf configuration file - pack ovf and disk files into tar.gz file - (if unpack) leave generated files as unpacked """ config = get_config() log = get_logger() target_dir = path.join(config.getstring('general', 'storage-endpoint'), storage_pool, "kvm") if unpack: target_dir = path.join(target_dir, 'unpacked') # prepare file system msg = "Preparing disks... (This may take a while)" log.info(msg) vm_settings["disks"] = _prepare_disks(vm_settings, target_dir) # generate and save ovf configuration file msg = "Generating ovf file..." log.info(msg) ovf = _generate_ovf_file(vm_settings) ovf_fnm = path.join(target_dir, "%s.ovf" % vm_settings["template_name"]) with open(ovf_fnm, 'w') as f: ovf.writeFile(f, pretty=True, encoding='UTF-8') # pack container archive and ovf file msg = "Archiving..." log.info(msg) arch_location = path.join(config.getstring('general', 'storage-endpoint'), storage_pool, "kvm") ovf_archive_fnm = path.join(arch_location, "%s.ova" % vm_settings["template_name"]) with closing(tarfile.open(ovf_archive_fnm, "w")) as tar: tar.add(ovf_fnm, arcname=path.basename(ovf_fnm)) for disk in vm_settings["disks"]: tar.add(disk["new_path"], arcname=path.basename(disk["new_path"])) # remove generated files if not unpack: os.remove(ovf_fnm) for disk in vm_settings["disks"]: os.remove(disk["new_path"]) calculate_hash(ovf_archive_fnm) msg = "Done! Template saved at %s" % ovf_archive_fnm log.info(msg)
def is_default_pool_modified(): """Check if there were any modifications done by a user to the default pool""" try: config = get_config() res = execute("virsh 'pool-dumpxml default'") defined_path = parseString(res).getElementsByTagName( 'path')[0].lastChild.nodeValue # XXX: This will remain as-is right now. current_path = os.path.join( config.getstring('general', 'storage-endpoint'), config.getstring('general', 'default-storage-pool'), 'images') return str(defined_path) != current_path except CommandException: return False # pool is undefined or we are not sure -> so, assume it's all good
def prepare_file_system(settings, storage_pool): """ Prepare file system for VM template creation in OVF appliance format: - create template directory if it does not exist - copy disk based images - convert block device based images to file based images """ config = get_config() log = get_logger() images_dir = path.join(config.getstring("general", "storage-endpoint"), storage_pool, "images") target_dir = path.join(config.getstring("general", "storage-endpoint"), storage_pool, "kvm", "unpacked") for disk_index, disk in enumerate(settings.get("disks", [])): disk_template_path = path.join(target_dir, disk["template_name"]) if disk["deploy_type"] == "file": volume_name = "disk%s" % disk_index disk["source_file"] = '%s-%s-%s.%s' % ( settings["hostname"], settings["uuid"], volume_name, disk.get('template_format', 'qcow2')) disk_deploy_path = path.join(images_dir, disk["source_file"]) shutil.copy2(disk_template_path, disk_deploy_path) # resize disk to match the requested # XXX we assume that the size was already adjusted to the template requirements diskspace = settings.get('disk') if diskspace: diskspace = int( float(diskspace) ) # it's str by default. 'string' > int is always true (LEV-116) # get the disk size current_size = int( execute( "qemu-img info %s |grep 'virtual size' |awk '{print $4}' |cut -b2- " % disk_deploy_path)) / 1024 / 1024 / 1024 # to get to GB if diskspace > current_size: log.info('Increasing image file %s from %s to %sG' % (disk_deploy_path, current_size, diskspace)) execute("qemu-img resize %s %sG" % (disk_deploy_path, diskspace)) else: log.info( 'Ignoring disk (%s) increase request (to %s) as existing image is already larger (%s)' % (disk_deploy_path, diskspace, current_size)) elif disk["deploy_type"] in ["physical", "lvm"]: disk_deploy_path = disk["source_dev"] execute("qemu-img convert -f qcow2 -O raw %s %s" % (disk_template_path, disk_deploy_path))
def setup_scripts(vm_settings, storage_pool): """Setup action scripts for the CT""" dest_dir = path.join(get_config().getstring('general', 'storage-endpoint'), storage_pool, "openvz") unpacked_dir = path.join(dest_dir, "unpacked") ct_scripts_fnm = path.join(unpacked_dir, "%s.scripts.tar.gz" % vm_settings["template_name"]) dest_folder = '/etc/vz/conf/%s' % vm_settings['vm_id'] try: with closing(tarfile.open(ct_scripts_fnm, "r:gz")) as tar: for f in tar: target_script = '%s.%s' % (dest_folder, f.name) shutil.copyfileobj(tar.extractfile(f), open(target_script, 'w')) os.chmod(target_script, 0755) except: msg = "No action scripts in the template." get_logger().warn(msg) pass
def get_oms_server(): """Read OMS server port and address from the configuration file""" minion_conf_file = get_config().getstring('general', 'salt-minion-conf') if not os.path.exists(minion_conf_file): minion_conf_file = '/etc/salt/minion' if not os.path.exists(minion_conf_file): return ('localhost', 4506) with open(minion_conf_file, 'r') as minion_conf: minion_config = yaml.safe_load(minion_conf.read()) if minion_config is None: return ('localhost', 4506) oms_server = minion_config.get('master', 'localhost') oms_server_port = minion_config.get('master_port', 4506) return (oms_server, oms_server_port)
def prepare_storage_pool(storage_pool=get_default_pool(), check_libvirt=True): """Assures that storage pool has the correct folder structure""" # create structure storage_pool_path = "%s/%s" % (get_config().getstring('general', 'storage-endpoint'), storage_pool) mkdir_p("%s/iso/" % storage_pool_path) mkdir_p("%s/images/" % storage_pool_path) mkdir_p("%s/openvz/unpacked" % storage_pool_path) mkdir_p("%s/kvm/unpacked" % storage_pool_path) # assure that the storage is created also in libvirt if check_libvirt: try: get_pool_path(storage_pool) except CommandException, e: get_logger().warning('Default pool %s was missing from libvirt. Adding...') add_pool(storage_pool)
def get_oms_server(): """Read OMS server port and address from the configuration file""" minion_conf_file = get_config().getstring("general", "salt-minion-conf") if not os.path.exists(minion_conf_file): minion_conf_file = "/etc/salt/minion" if not os.path.exists(minion_conf_file): return ("localhost", 4506) with open(minion_conf_file, "r") as minion_conf: minion_config = yaml.safe_load(minion_conf.read()) if minion_config is None: return ("localhost", 4506) oms_server = minion_config.get("master", "localhost") oms_server_port = minion_config.get("master_port", 4506) return (oms_server, oms_server_port)
def get_oms_server(): """Read OMS server port and address from the configuration file""" minion_conf_file = get_config().getstring('general', 'salt-minion-conf') if not os.path.exists(minion_conf_file): minion_conf_file = '/etc/salt/minion' if not os.path.exists(minion_conf_file): return ('localhost', 4506) with open(minion_conf_file, 'r') as minion_conf: minion_config = yaml.safe_load(minion_conf.read()) if minion_config is None: return ('localhost', 4506) oms_server = minion_config.get('master', 'localhost') oms_server_port = minion_config.get('master_port', 4506) return (oms_server, oms_server_port)
def save_as_ovf(vm_settings, storage_pool, unpack=True): """ Creates ovf template archive for the specified VM. Steps: - relocate kvm disk files - generate ovf configuration file - pack ovf and disk files into tar.gz file - (if unpack) leave generated files as unpacked """ config = get_config() log = get_logger() target_dir = path.join(config.getstring('general', 'storage-endpoint'), storage_pool, "kvm") if unpack: target_dir = path.join(target_dir, 'unpacked') # prepare file system msg = "Preparing disks... (This may take a while)" log.info(msg) vm_settings["disks"] = _prepare_disks(vm_settings, target_dir) # generate and save ovf configuration file msg = "Generating ovf file..." log.info(msg) ovf = _generate_ovf_file(vm_settings) ovf_fnm = path.join(target_dir, "%s.ovf" % vm_settings["template_name"]) with open(ovf_fnm, 'w') as f: ovf.writeFile(f, pretty=True, encoding='UTF-8') # pack container archive and ovf file msg = "Archiving..." log.info(msg) arch_location = path.join(config.getstring('general', 'storage-endpoint'), storage_pool, "kvm") ovf_archive_fnm = path.join(arch_location, "%s.ova" % vm_settings["template_name"]) with closing(tarfile.open(ovf_archive_fnm, "w")) as tar: tar.add(ovf_fnm, arcname=path.basename(ovf_fnm)) for disk in vm_settings["disks"]: tar.add(disk["new_path"], arcname=path.basename(disk["new_path"])) # remove generated files if not unpack: os.remove(ovf_fnm) for disk in vm_settings["disks"]: os.remove(disk["new_path"]) calculate_hash(ovf_archive_fnm) msg = "Done! Template saved at %s" % ovf_archive_fnm log.info(msg)
def get_template_repos(): """Return a list of formatted strings describing configured repositories""" config = get_config() repo_groups = config.getstring('general', 'repo-groups').split(',') # XXX: Get autodetexted backends from config. If host has no kvm # capability then don't display KVM repo for template download. backends = config.getstring('general', 'backends').split(',') has_kvm = 'qemu:///system' in backends result = [] for r in repo_groups: group = "%s-repo" % r.strip() name = config.getstring(group, 'name') vm_type = config.getstring(group, 'type') if not has_kvm and 'kvm' in vm_type: continue result.append(("%s (%s)" % (name, vm_type), group)) return result
def prepare_storage_pool(storage_pool=get_default_pool(), check_libvirt=True): """Assures that storage pool has the correct folder structure""" # create structure storage_pool_path = "%s/%s" % (get_config().getstring( 'general', 'storage-endpoint'), storage_pool) mkdir_p("%s/iso/" % storage_pool_path) mkdir_p("%s/images/" % storage_pool_path) mkdir_p("%s/openvz/unpacked" % storage_pool_path) mkdir_p("%s/kvm/unpacked" % storage_pool_path) # assure that the storage is created also in libvirt if check_libvirt: try: get_pool_path(storage_pool) except CommandException, e: get_logger().warning( 'Default pool %s was missing from libvirt. Adding...') add_pool(storage_pool)
def get_template_repos(): """Return a list of formatted strings describing configured repositories""" config = get_config() repo_groups = config.getstring("general", "repo-groups").split(",") # XXX: Get autodetexted backends from config. If host has no kvm # capability then don't display KVM repo for template download. backends = config.getstring("general", "backends").split(",") has_kvm = "qemu:///system" in backends result = [] for r in repo_groups: group = "%s-repo" % r.strip() name = config.getstring(group, "name") vm_type = config.getstring(group, "type") if not has_kvm and "kvm" in vm_type: continue result.append(("%s (%s)" % (name, vm_type), group)) return result
def link_template(storage_pool, tmpl_name, overwrite=True): """Setup symlinks from the OpenVZ template to the location expected by vzctl""" # added resilience. Openvz templates are distributed as tarballs, so sometimes # name and name.tar.gz are used in a mixed way config = get_config() if not tmpl_name.endswith('.tar.gz'): tmpl_name = tmpl_name + '.tar.gz' source_file = os.path.join(get_pool_path(storage_pool), 'openvz', 'unpacked', tmpl_name) dest_file = os.path.join(config.getstring('general', 'openvz-templates'), tmpl_name) if overwrite: try: os.unlink(dest_file) except OSError as exc: if exc.errno != errno.ENOENT: raise if not os.path.exists(dest_file): os.symlink(source_file, dest_file)
def sync_templates_list(sync_tasks_fnm=None): """Sync a list of templates defined in a file. After synchronizing a template, removes it from the list. NB: multiple copies of this function should be run against the same task list file!""" if not sync_tasks_fnm: sync_tasks_fnm = get_config().getstring('general', 'sync_task_list') if os.path.exists(sync_tasks_fnm): tasks = get_templates_sync_list(sync_tasks_fnm) while tasks: # this doesn't make sense the first time, but for resilience we reread a list # each time a template was downloaded tasks = get_templates_sync_list(sync_tasks_fnm) template, storage_pool, remote_repo = tasks[0] # XXX a separate download hook for dumping progress to a file? sync_template(remote_repo, template, storage_pool) del tasks[0] set_templates_sync_list(tasks, sync_tasks_fnm) os.unlink(sync_tasks_fnm)
def sync_templates_list(sync_tasks_fnm=None): """Sync a list of templates defined in a file. After synchronizing a template, removes it from the list. NB: multiple copies of this function should be run against the same task list file!""" if not sync_tasks_fnm: sync_tasks_fnm = get_config().getstring('general', 'sync_task_list') if os.path.exists(sync_tasks_fnm): tasks = get_templates_sync_list(sync_tasks_fnm) while tasks: # this doesn't make sense the first time, but for resilience we reread a list # each time a template was downloaded tasks = get_templates_sync_list(sync_tasks_fnm) template, storage_pool, remote_repo = tasks[0] # XXX a separate download hook for dumping progress to a file? sync_template(remote_repo, template, storage_pool) del tasks[0] set_templates_sync_list(tasks, sync_tasks_fnm) os.unlink(sync_tasks_fnm)
def add_pool(pool_name, careful=True): """Add a new pool_name""" if careful and filter(lambda p: p[0] == pool_name, list_pools()): msg = "Pool '%s' already exists." % pool_name get_logger().warn(msg) return try: pool_name = re.sub(" ", "_", pool_name) # safety measure pool_path = os.path.join(get_config().getstring('general', 'storage-endpoint'), pool_name) mkdir_p(pool_path) prepare_storage_pool(pool_name, check_libvirt=False) execute("virsh 'pool-define-as %s dir --target %s'" % (pool_name, pool_path)) execute("virsh 'pool-start %s'" % pool_name) execute("virsh 'pool-autostart %s'" % pool_name) except Exception, e: msg = "Failed to create a new pool: %s" % e get_logger().error(msg)
def read_default_ovf_settings(): """ Reads default ovf configuration from file, returns a dictionary of settings.""" config = get_config("kvm") settings = { "serial": {"type": "pty", "target_port": 0}, "console": {"type": "pty", "target_port": 0}, "graphics": {"type": "vnc", "port": -1, "autoport": "yes", "keymap": config.getstring("vnc", "keymap")}, "interfaces": [], "features": [], "disks": [] } settings.update(dict(config.getlist('ovf-defaults'))) if not os.path.exists(settings.get("emulator", "")): settings["emulator"] = "/usr/bin/kvm" return settings
def link_template(storage_pool, tmpl_name, overwrite=True): """Setup symlinks from the OpenVZ template to the location expected by vzctl""" # added resilience. Openvz templates are distributed as tarballs, so sometimes # name and name.tar.gz are used in a mixed way config = get_config() if not tmpl_name.endswith('.tar.gz'): tmpl_name = tmpl_name + '.tar.gz' source_file = os.path.join(get_pool_path(storage_pool), 'openvz', 'unpacked', tmpl_name) dest_file = os.path.join(config.getstring('general', 'openvz-templates'), tmpl_name) if overwrite: try: os.unlink(dest_file) except OSError as exc: if exc.errno != errno.ENOENT: raise if not os.path.exists(dest_file): os.symlink(source_file, dest_file)
def setup_scripts(vm_settings, storage_pool): """Setup action scripts for the CT""" dest_dir = path.join(get_config().getstring('general', 'storage-endpoint'), storage_pool, "openvz") unpacked_dir = path.join(dest_dir, "unpacked") ct_scripts_fnm = path.join( unpacked_dir, "%s.scripts.tar.gz" % vm_settings["template_name"]) dest_folder = '/etc/vz/conf/%s' % vm_settings['vm_id'] try: with closing(tarfile.open(ct_scripts_fnm, "r:gz")) as tar: for f in tar: target_script = '%s.%s' % (dest_folder, f.name) shutil.copyfileobj(tar.extractfile(f), open(target_script, 'w')) os.chmod(target_script, 0755) except: msg = "No action scripts in the template." get_logger().warn(msg) pass
def set_oms_server(server, port=4506): """Write OMS server address and port to the configuration file""" minion_conf_file = get_config().getstring('general', 'salt-minion-conf') if not os.path.exists(minion_conf_file): minion_conf_file = '/etc/salt/minion' if not os.path.exists(minion_conf_file): return with open(minion_conf_file, 'r') as minion_conf: minion_config = yaml.safe_load(minion_conf.read()) if minion_config is None: minion_config = {} minion_config['master'] = server minion_config['master_port'] = port minion_config['dns_retry'] = 0 # TUI-47 with open(minion_conf_file, 'w') as conf: yaml.dump(minion_config, conf, default_flow_style=False)
def add_pool(pool_name, careful=True): """Add a new pool_name""" if careful and filter(lambda p: p[0] == pool_name, list_pools()): msg = "Pool '%s' already exists." % pool_name get_logger().warn(msg) return try: pool_name = re.sub(" ", "_", pool_name) # safety measure pool_path = os.path.join( get_config().getstring('general', 'storage-endpoint'), pool_name) mkdir_p(pool_path) prepare_storage_pool(pool_name, check_libvirt=False) execute("virsh 'pool-define-as %s dir --target %s'" % (pool_name, pool_path)) execute("virsh 'pool-start %s'" % pool_name) execute("virsh 'pool-autostart %s'" % pool_name) except Exception, e: msg = "Failed to create a new pool: %s" % e get_logger().error(msg)
def set_oms_server(server, port=4506): """Write OMS server address and port to the configuration file""" minion_conf_file = get_config().getstring('general', 'salt-minion-conf') if not os.path.exists(minion_conf_file): minion_conf_file = '/etc/salt/minion' if not os.path.exists(minion_conf_file): return with open(minion_conf_file, 'r') as minion_conf: minion_config = yaml.safe_load(minion_conf.read()) if minion_config is None: minion_config = {} minion_config['master'] = server minion_config['master_port'] = port minion_config['dns_retry'] = 0 # TUI-47 with open(minion_conf_file, 'w') as conf: yaml.dump(minion_config, conf, default_flow_style=False)
def sync_storage_pool(storage_pool, remote_repo, templates, sync_tasks_fnm=None, force=False, screen=True): """Synchronize selected storage pool with the remote repo. Only selected templates will be persisted, all of the other templates shall be purged. Ignores purely local templates - templates with no matching name in remote repo.""" config = get_config() if not sync_tasks_fnm: sync_tasks_fnm = config.getstring('general', 'sync_task_list') vm_type = config.getstring(remote_repo, 'type') existing_templates = get_local_templates(vm_type, storage_pool) # synchronize selected templates if templates is None: templates = [] purely_local_tmpl = get_purely_local_templates(storage_pool, vm_type, remote_repo) # might be not order preserving for_update = set(templates) - set(purely_local_tmpl) for_deletion = set(existing_templates) - for_update - set(templates) tasks = [(t, storage_pool, remote_repo) for t in for_update] # XXX at the moment only a single sync process is allowed. if os.path.exists(sync_tasks_fnm): if not force: raise TemplateException("Synchronization task pool already defined.") set_templates_sync_list(tasks, sync_tasks_fnm) # delete existing, but not selected templates for tmpl in for_deletion: delete_template(storage_pool, vm_type, tmpl) if screen: cli_command = "from opennode.cli.actions import templates;" cli_command += "templates.sync_templates_list('%s')" % sync_tasks_fnm execute_in_screen('OPENNODE-SYNC', 'python -c "%s"' % cli_command) else: sync_templates_list(sync_tasks_fnm)
def delete_template(storage_pool, vm_type, template): """Deletes template, unpacked folder and a hash""" # get a list of files in the template config = get_config() log.info("Deleting %s (%s) from %s..." % (template, vm_type, storage_pool)) storage_endpoint = config.getstring('general', 'storage-endpoint') templatefile = "%s/%s/%s/%s.tar" % (storage_endpoint, storage_pool, vm_type, template) if not os.path.exists(templatefile): templatefile = os.path.splitext(templatefile)[0] + '.ova' tmpl = tarfile.open(templatefile) for packed_file in tmpl.getnames(): fnm = "%s/%s/%s/unpacked/%s" % (storage_endpoint, storage_pool, vm_type, packed_file) if not os.path.isdir(fnm): delete(fnm) else: shutil.rmtree(fnm) # remove master copy delete(templatefile) delete("%s.pfff" % templatefile) # also remove symlink for openvz vm_type if vm_type == 'openvz': delete("%s/%s" % (config.getstring('general', 'openvz-templates'), "%s.tar.gz" % template))