def sync_storage_pool(storage_pool, remote_repo, templates, sync_tasks_fnm=c('general', 'sync_task_list'), force=False): """Synchronize selected storage pool with the remote repo. Only selected templates will be persisted, all of the other templates shall be purged. Ignores purely local templates - templates with no matching name in remote repo.""" vm_type = c(remote_repo, 'type') existing_templates = get_local_templates(vm_type, storage_pool) # synchronize selected templates if templates is None: templates = [] purely_local_tmpl = get_purely_local_templates(storage_pool, vm_type, remote_repo) # might be not order preserving for_update = set(templates) - set(purely_local_tmpl) for_deletion = set(existing_templates) - for_update - set(templates) tasks = [(t, storage_pool, remote_repo) for t in for_update] # XXX at the moment only a single sync process is allowed. if os.path.exists(sync_tasks_fnm): if not force: raise TemplateException("Synchronization task pool already defined.") set_templates_sync_list(tasks, sync_tasks_fnm) # delete existing, but not selected templates for tmpl in for_deletion: delete_template(storage_pool, vm_type, tmpl) # XXX a wrong place for such a construction, not sure what is a correct place cli_command = "from opennode.cli.actions import templates;" cli_command += "templates.sync_templates_list('%s')" % sync_tasks_fnm execute_in_screen('OPENNODE-SYNC', 'python -c "%s"' % cli_command)
def generate_ubc_config(settings): """ Generates UBC part of configuration file for VZ container """ st = settings ubc_params = { "physpages_limit": st["memory"], "swappages_limit": st["swap"], "diskspace_soft": st["disk"], "diskspace_hard": _compute_diskspace_hard_limit(float(st["disk"])), "diskinodes_soft": float(st["disk"]) * int(config.c("ubc-defaults", "DEFAULT_INODES", "openvz")), "diskinodes_hard": round(_compute_diskspace_hard_limit(float(st["disk"])) * int(config.c("ubc-defaults", "DEFAULT_INODES", "openvz"))), "quotatime": config.c("ubc-defaults", "DEFAULT_QUOTATIME", "openvz"), "cpus": st["vcpu"], "cpulimit": int(st["vcpulimit"]) * int(st["vcpu"]), 'cpuunits': config.c("ubc-defaults", "DEFAULT_CPUUNITS", "openvz"), } # Get rid of zeros where necessary (eg 5.0 - > 5 ) ubc_params = dict([(key, int(float(val)) if float(val).is_integer() else val) for key, val in ubc_params.items()]) ubc_params['time'] = datetime.datetime.today().ctime() return openvz_template % ubc_params
def get_template_info(template_name, vm_type, storage_pool = c('general', 'default-storage-pool')): ovf_file = OvfFile(os.path.join(c("general", "storage-endpoint"), storage_pool, vm_type, "unpacked", template_name + ".ovf")) vm = vm_ops.get_module(vm_type) template_settings = vm.get_ovf_template_settings(ovf_file) # XXX handle modification to system params #errors = vm.adjust_setting_to_systems_resources(template_settings) return template_settings
def get_template_repos(): """Return a formatted list of strings describing configured repositories""" repo_groups = c('general', 'repo-groups').split(',') result = [] for r in repo_groups: group = "%s-repo" % r.strip() name = c(group, 'name') vm_type = c(group, 'type') result.append(("%s (%s)" % (name, vm_type), group)) return result
def delete_pool(pool_name): """Delete a storage pool""" try: execute("virsh 'pool-destroy %s'" %pool_name) execute("virsh 'pool-undefine %s'" %pool_name) del_folder(os.path.join(c('general', 'storage-endpoint'), pool_name)) if pool_name == c('general', 'default-storage-pool'): set_default_pool('') except Exception, e: print "Failed to delete pool %s: %s" % (pool_name, e)
def import_template(template, vm_type, storage_pool = c('general', 'default-storage-pool')): """Import external template into ON storage pool""" if not os.path.exists(template): raise RuntimeError("Template not found: " % template) if not template.endswith('tar'): raise RuntimeError("Expecting a file ending with .tar for a template") storage_endpoint = c('general', 'storage-endpoint') tmpl_name = os.path.basename(template) target_file = os.path.join(storage_endpoint, storage_pool, vm_type, tmpl_name) print "Copying template to the storage pool..." print template, target_file shutil.copyfile(template, target_file) calculate_hash(target_file) print "Unpacking..." unpack_template(storage_pool, vm_type, tmpl_name)
def get_template_list(remote_repo): """Retrieves a tmpl_list of templates from the specified repository""" url = c(remote_repo, 'url') tmpl_list = urlopen("%s/templatelist.txt" % url) templates = [template.strip() for template in tmpl_list] tmpl_list.close() return templates
def details(name): res = {'type': 'simple', 'name': name} if os.path.exists('/sys/class/net/' + name + '/tun_flags'): res['type'] = 'virtual' sys_bridge_path = '/sys/class/net/' + name + '/brif/' if os.path.exists(sys_bridge_path): res['type'] = 'bridge' res['members'] = os.listdir(sys_bridge_path) addrs = netifaces.ifaddresses(name) if addrs.has_key(netifaces.AF_LINK): res['mac'] = addrs[netifaces.AF_LINK][0]['addr'] if addrs.has_key(netifaces.AF_INET): ip = addrs[netifaces.AF_INET][0]['addr'] mask = addrs[netifaces.AF_INET][0]['netmask'] l = 0 for b in mask.split('.'): l = l << 8 | int(b) prefix = number_of_set_bits(l) res['ip'] = '%s/%s' % (ip, prefix) default_name = config.c('general', 'main_iface') if config.has_option('general', 'main_iface') else 'vmbr0' if name == default_name: res['primary'] = True return res
def prepare_storage_pool(storage_pool=get_default_pool()): """Assures that storage pool has the correct folder structure""" # create structure storage_pool = "%s/%s" % (c('general', 'storage-endpoint'), storage_pool) mkdir_p("%s/iso/" % storage_pool) mkdir_p("%s/images/" % storage_pool) mkdir_p("%s/openvz/unpacked" % storage_pool) mkdir_p("%s/kvm/unpacked" % storage_pool)
def sync_template(remote_repo, template, storage_pool): """Synchronizes local template (cache) with the remote one (master)""" url = c(remote_repo, 'url') vm_type = c(remote_repo, 'type') storage_endpoint = c('general', 'storage-endpoint') localfile = os.path.join(storage_endpoint, storage_pool, vm_type, template) remotefile = os.path.join(url, template) # only download if we don't already have a fresh copy if not is_fresh(localfile, remotefile): # for resilience storage.prepare_storage_pool(storage_pool) download("%s.tar" % remotefile, "%s.tar" % localfile) for h in ['pfff']: r_template_hash = "%s.tar.%s" % (remotefile, h) l_template_hash = "%s.tar.%s" % (localfile, h) download(r_template_hash, l_template_hash) unpack_template(storage_pool, vm_type, localfile)
def link_template(storage_pool, tmpl_name, overwrite=True): """Setup symlinks from the OpenVZ template to the location expected by vzctl""" # added resilience. Openvz templates are distributed as tarballs, so sometimes # name and name.tar.gz are used in a mixed way if not tmpl_name.endswith('.tar.gz'): tmpl_name = tmpl_name + '.tar.gz' source_file = os.path.join(config.c('general', 'storage-endpoint'), storage_pool, 'openvz', 'unpacked', tmpl_name) dest_file = os.path.join(config.c('general', 'openvz-templates'), tmpl_name) if overwrite: try: os.unlink(dest_file) except OSError as exc: if exc.errno != errno.ENOENT: raise if not os.path.exists(dest_file): os.symlink(source_file, dest_file)
def set_oms_server(server, port=51235): """Write OMS server address and port to the configuration file""" minion_conf_file = config.c('general', 'minion-conf') minion_config = ConfigParser.RawConfigParser() minion_config.read(minion_conf_file) minion_config.set('main', 'certmaster', server) minion_config.set('main', 'certmaster_port', port) with open(minion_conf_file, 'w') as conf: minion_config.write(conf)
def get_active_template_settings(vm_name, storage_pool): """ Reads ovf settings of the specified VM """ ovf_fnm = path.join(config.c("general", "storage-endpoint"), storage_pool, "openvz", "unpacked", get_template_name(vm_name) + ".ovf") if path.exists(ovf_fnm): ovf_file = OvfFile(ovf_fnm) return get_ovf_template_settings(ovf_file) else: return read_default_ovf_settings()
def prepare_file_system(settings, storage_pool): """ Prepare file system for VM template creation in OVF appliance format: - create template directory if it does not exist - copy disk based images - convert block device based images to file based images """ images_dir = path.join(config.c("general", "storage-endpoint"), storage_pool, "images") target_dir = path.join(config.c("general", "storage-endpoint"), storage_pool, "kvm", "unpacked") for disk in settings["disks"]: disk_template_path = path.join(target_dir, disk["template_name"]) if disk["deploy_type"] == "file": disk_deploy_path = path.join(images_dir, settings["vm_type"] + "-" + disk["source_file"]) shutil.copy2(disk_template_path, disk_deploy_path) elif disk["deploy_type"] in ["physical", "lvm"]: disk_deploy_path = disk["source_dev"] execute("qemu-img convert -f qcow2 -O raw %s %s" % (disk_template_path, disk_deploy_path))
def get_oms_server(): """Read OMS server port and address from the configuration file""" minion_conf_file = config.c('general', 'minion-conf') minion_config = ConfigParser.RawConfigParser() minion_config.read(minion_conf_file) try: oms_server = minion_config.get('main', 'certmaster') oms_server_port = minion_config.get('main', 'certmaster_port') return (oms_server, oms_server_port) except ConfigParser.NoOptionError: return ('', '')
def save_as_ovf(vm_settings, storage_pool, unpack=True): """ Creates ovf template archive for the specified VM. Steps: - relocate kvm disk files - generate ovf configuration file - pack ovf and disk files into tar.gz file - (if unpack) leave generated files as unpacked """ target_dir = path.join(config.c('general', 'storage-endpoint'), storage_pool, "kvm") if unpack: target_dir = path.join(target_dir, 'unpacked') # prepare file system print "Preparing disks... (This may take a while)" vm_settings["disks"] = _prepare_disks(vm_settings, target_dir) # generate and save ovf configuration file print "Generating ovf file..." ovf = _generate_ovf_file(vm_settings) ovf_fnm = path.join(target_dir, "%s.ovf" % vm_settings["template_name"]) with open(ovf_fnm, 'w') as f: ovf.writeFile(f, pretty=True, encoding='UTF-8') # pack container archive and ovf file print "Archiving..." arch_location = path.join(config.c('general', 'storage-endpoint'), storage_pool, "kvm") ovf_archive_fnm = path.join(arch_location, "%s.tar" % vm_settings["template_name"]) with closing(tarfile.open(ovf_archive_fnm, "w")) as tar: tar.add(ovf_fnm, arcname=path.basename(ovf_fnm)) for disk in vm_settings["disks"]: tar.add(disk["new_path"], arcname=path.basename(disk["new_path"])) # remove generated files if not unpack: os.remove(ovf_fnm) for disk in vm_settings["disks"]: os.remove(disk["new_path"]) calculate_hash(ovf_archive_fnm) print "Done! Template saved at %s" % ovf_archive_fnm
def list_templates(): """ Prints all local and remote templates """ # local templates for vm_type in ["openvz", "kvm"]: print "%s local templates:" % vm_type.upper() for storage_pool in storage.list_pools(): print "\t", "Storage:", os.path.join(config.c("general", "storage-endpoint"), storage_pool, vm_type) for tmpl in get_local_templates(storage_pool, vm_type): print "\t\t", tmpl print # remote templates repo_groups = re.split(",\s*", config.c("general", "repo-groups")) repo_groups = [repo_group + "-repo" for repo_group in repo_groups] for repo_group in repo_groups: url, vm_type = config.c(repo_group, "url"), config.c(repo_group, "type") print "%s remote templates:" % vm_type.upper() print "\t", "Repository:", url for tmpl in get_template_list(repo_group): print "\t\t", tmpl print
def delete_template(storage_pool, vm_type, template): """Deletes template, unpacked folder and a hash""" # get a list of files in the template print "Deleting %s (%s) from %s..." % (template, vm_type, storage_pool) storage_endpoint = c('general', 'storage-endpoint') templatefile = "%s/%s/%s/%s.tar" % (storage_endpoint, storage_pool, vm_type, template) tmpl = tarfile.open(templatefile) for packed_file in tmpl.getnames(): fnm = "%s/%s/%s/unpacked/%s" % (storage_endpoint, storage_pool, vm_type, packed_file) if not os.path.isdir(fnm): delete(fnm) else: shutil.rmtree(fnm) # remove master copy delete(templatefile) delete("%s.pfff" % templatefile) # also remove symlink for openvz vm_type if vm_type == 'openvz': delete("%s/%s" % (c('general', 'openvz-templates'), "%s.tar.gz" % template))
def add_pool(pool_name, careful=True): """Add a new pool_name""" if careful and filter(lambda p: p[0] == pool_name, list_pools()): print "Pool '%s' already exists." %pool_name return try: pool_name = re.sub(" " , "_", pool_name) # safety measure pool_path = os.path.join(c('general', 'storage-endpoint'), pool_name) mkdir_p(pool_path) prepare_storage_pool(pool_name) execute("virsh 'pool-define-as %s dir --target %s'" %(pool_name, pool_path)) execute("virsh 'pool-start %s'" %pool_name) execute("virsh 'pool-autostart %s'" %pool_name) except Exception, e: print "Failed to create a new pool: %s" %e
def unpack_template(storage_pool, vm_type, tmpl_name): """Unpacks template into the 'unpacked' folder of the storage pool. Adds symlinks as needed by the VM template vm_type.""" # we assume location of the 'unpacked' to be the same as the location of the file basedir = os.path.join(c('general', 'storage-endpoint'), storage_pool, vm_type) tmpl = tarfile.open(os.path.join(basedir, "%s.tar" %tmpl_name)) unpacked_dir = os.path.join(basedir, 'unpacked') tmpl.extractall(unpacked_dir) # special case for openvz vm_type if vm_type == 'openvz': from opennode.cli.actions import vm tmpl_name = [fnm for fnm in tmpl.getnames() if fnm.endswith('tar.gz')] # make sure we have only a single tarball with the image assert len(tmpl_name) == 1 vm.openvz.link_template(storage_pool, tmpl_name[0])
def setup_scripts(vm_settings, storage_pool): """Setup action scripts for the CT""" dest_dir = path.join(config.c('general', 'storage-endpoint'), storage_pool, "openvz") unpacked_dir = path.join(dest_dir, "unpacked") ct_scripts_fnm = path.join(unpacked_dir, "%s.scripts.tar.gz" % vm_settings["template_name"]) dest_folder = '/etc/vz/conf/%s' % vm_settings['vm_id'] try: with closing(tarfile.open(ct_scripts_fnm, "r:gz")) as tar: for f in tar: target_script = '%s.%s' % (dest_folder, f.name) shutil.copyfileobj(tar.extractfile(f), open(target_script, 'w')) os.chmod(target_script, 0755) except: print "No action scripts in the template." pass
def sync_templates_list(sync_tasks_fnm=c('general', 'sync_task_list')): """Sync a list of templates defined in a file. After synchronizing a template, removes it from the list. NB: multiple copies of this function should be run against the same task list file!""" if os.path.exists(sync_tasks_fnm): tasks = get_templates_sync_list(sync_tasks_fnm) while tasks: # this doesn't make sense the first time, but for resilience we reread a list # each time a template was downloaded tasks = get_templates_sync_list(sync_tasks_fnm) template, storage_pool, remote_repo = tasks[0] # XXX a separate download hook for dumping progress to a file? sync_template(remote_repo, template, storage_pool) del tasks[0] set_templates_sync_list(tasks, sync_tasks_fnm) os.unlink(sync_tasks_fnm)
def save_as_ovf(vm_settings, storage_pool): """ Creates ovf template archive for the specified container. Steps: - archive container directory - generate ovf configuration file - pack ovf and container archive into tar.gz file """ dest_dir = path.join(config.c('general', 'storage-endpoint'), storage_pool, "openvz") unpacked_dir = path.join(dest_dir, "unpacked") ct_archive_fnm = path.join(unpacked_dir, "%s.tar.gz" % vm_settings["template_name"]) ct_source_dir = path.join("/vz/private", vm_settings["vm_name"]) # Pack vm container catalog print "Archiving VM container catalog %s. This may take a while..." % ct_source_dir with closing(tarfile.open(ct_archive_fnm, "w:gz")) as tar: for f in os.listdir(ct_source_dir): tar.add(path.join(ct_source_dir, f), arcname=f) # Archive action scripts if they are present print "Adding action scripts..." ct_scripts_fnm = path.join(unpacked_dir, "%s.scripts.tar.gz" % vm_settings["template_name"]) with closing(tarfile.open(ct_scripts_fnm, "w:gz")) as tar: for script_type in ['premount', 'mount', 'start', 'stop', 'umount', 'postumount']: script_fnm = "/etc/vz/conf/%s.%s" % (vm_settings["vm_name"], script_type) if os.path.exists(script_fnm): tar.add(script_fnm, arcname=script_type) # generate and save ovf configuration file print "Generating ovf file..." ovf = _generate_ovf_file(vm_settings, ct_archive_fnm) ovf_fnm = path.join(unpacked_dir, "%s.ovf" % vm_settings["template_name"]) with open(ovf_fnm, 'w') as f: ovf.writeFile(f, pretty=True, encoding='UTF-8') # pack container archive and ovf file print "Archiving..." ovf_archive_fnm = path.join(dest_dir, "%s.tar" % vm_settings["template_name"]) with closing(tarfile.open(ovf_archive_fnm, "w")) as tar: tar.add(ct_archive_fnm, arcname=path.basename(ct_archive_fnm)) tar.add(ovf_fnm, arcname=path.basename(ovf_fnm)) tar.add(ct_scripts_fnm, arcname=path.basename(ct_scripts_fnm)) calculate_hash(ovf_archive_fnm) print "Done! Saved template at %s" % ovf_archive_fnm
def rename_template(storage_pool, vm_type, template,new_template): """Rename the choosen template""" storage_endpoint = c('general', 'storage-endpoint') if (vm_type=='default-openvz-repo'): vm='openvz' if (vm_type=='default-kvm-repo'): vm='kvm' templatefile = "%s/%s/%s/%s.tar" % (storage_endpoint, storage_pool,vm, template) new_templatefile = "%s/%s/%s/%s.tar" % (storage_endpoint, storage_pool,vm,new_template) if os.path.isfile(new_templatefile): return else: os.rename(templatefile,new_templatefile) if os.path.isfile(os.path.join(templatefile+'.pfff')): os.rename (os.path.join(templatefile+'.pfff'),os.path.join(new_templatefile+'.pfff')) ovfpath = "%s/%s/%s/unpacked/" % (storage_endpoint, storage_pool,vm) os.rename (os.path.join(ovfpath,template+".ovf"),os.path.join(ovfpath,new_template+".ovf")) if os.path.isfile(os.path.join(ovfpath,template+".tar.gz")): os.rename (os.path.join(ovfpath,template+".scripts.tar.gz"),os.path.join(ovfpath,new_template+".scripts.tar.gz")) if os.path.isfile(os.path.join(ovfpath,template+".tar.gz")): os.rename (os.path.join(ovfpath,template+".tar.gz"),os.path.join(ovfpath,new_template+".tar.gz"))
def get_local_templates(vm_type, storage_pool=c('general', 'default-storage-pool')): """Returns a list of templates of a certain vm_type from the storage pool""" storage_endpoint = c('general', 'storage-endpoint') return [tmpl[:-4] for tmpl in os.listdir("%s/%s/%s" % (storage_endpoint, storage_pool, vm_type)) if tmpl.endswith('tar')]
def get_default_pool(): """Return name of the storage pool to use by default. Or None if not configured""" name = c('general', 'default-storage-pool') return None if name is None or name == '' or name == 'None' else name
def generate_libvirt_conf(settings): """ Prepare Libvirt XML configuration file from OVF template/appliance. @return: Libvirt XML configuration @rtype: DOM Document """ libvirt_conf_dom = xml.dom.minidom.Document() domain_dom = libvirt_conf_dom.createElement("domain") domain_dom.setAttribute("type", settings["domain_type"]) libvirt_conf_dom.appendChild(domain_dom) name_dom = libvirt_conf_dom.createElement("name") name_value = libvirt_conf_dom.createTextNode(settings["hostname"]) name_dom.appendChild(name_value) domain_dom.appendChild(name_dom) memory_dom = libvirt_conf_dom.createElement("memory") memory_value = libvirt_conf_dom.createTextNode(str(int(float(settings["memory"]) * 1024 ** 2))) # Gb -> Kb memory_dom.appendChild(memory_value) domain_dom.appendChild(memory_dom) vcpu_dom = libvirt_conf_dom.createElement("vcpu") vcpu_value = libvirt_conf_dom.createTextNode(str(settings["vcpu"])) vcpu_dom.appendChild(vcpu_value) domain_dom.appendChild(vcpu_dom) os_dom = libvirt_conf_dom.createElement("os") os_type_dom = libvirt_conf_dom.createElement("type") os_type_dom.setAttribute("arch", settings["arch"]) os_type_dom.setAttribute("machine", settings["machine"]) os_type_value = libvirt_conf_dom.createTextNode(settings["virt_type"]) os_type_dom.appendChild(os_type_value) os_dom.appendChild(os_type_dom) os_boot_dom = libvirt_conf_dom.createElement("boot") os_boot_dom.setAttribute("dev", settings["boot_dev"]) os_dom.appendChild(os_boot_dom) domain_dom.appendChild(os_dom) features_dom = libvirt_conf_dom.createElement("features") for feature in settings["features"]: feature_dom = libvirt_conf_dom.createElement(feature) features_dom.appendChild(feature_dom) domain_dom.appendChild(features_dom) clock_dom = libvirt_conf_dom.createElement("clock") clock_dom.setAttribute("offset", settings["clock_offset"]) domain_dom.appendChild(clock_dom) on_poweroff_dom = libvirt_conf_dom.createElement("on_poweroff") on_poweroff_value = libvirt_conf_dom.createTextNode(settings["on_poweroff"]) on_poweroff_dom.appendChild(on_poweroff_value) domain_dom.appendChild(on_poweroff_dom) on_reboot_dom = libvirt_conf_dom.createElement("on_reboot") on_reboot_value = libvirt_conf_dom.createTextNode(settings["on_reboot"]) on_reboot_dom.appendChild(on_reboot_value) domain_dom.appendChild(on_reboot_dom) on_crash_dom = libvirt_conf_dom.createElement("on_crash") on_crash_value = libvirt_conf_dom.createTextNode(settings["on_crash"]) on_crash_dom.appendChild(on_crash_value) domain_dom.appendChild(on_crash_dom) devices_dom = libvirt_conf_dom.createElement("devices") domain_dom.appendChild(devices_dom) emulator_dom = libvirt_conf_dom.createElement("emulator") emulator_value = libvirt_conf_dom.createTextNode(settings["emulator"]) emulator_dom.appendChild(emulator_value) devices_dom.appendChild(emulator_dom) drive_letter_count = 0 for disk in settings["disks"]: if disk["deploy_type"] == "file": #File based disk disk_dom = libvirt_conf_dom.createElement("disk") disk_dom.setAttribute("type", disk["type"]) disk_dom.setAttribute("device", disk["device"]) devices_dom.appendChild(disk_dom) disk_source_dom = libvirt_conf_dom.createElement("source") image_path = path.join(config.c("general", "storage-endpoint"), config.c("general", "default-storage-pool"), "images") disk_source_dom.setAttribute("file", path.join(image_path, "%s-%s" % (settings["vm_type"], disk["source_file"]))) disk_dom.appendChild(disk_source_dom) disk_target_dom = libvirt_conf_dom.createElement("target") disk_target_dom.setAttribute("dev", disk["target_dev"]) disk_target_dom.setAttribute("bus", disk["target_bus"]) disk_dom.appendChild(disk_target_dom) elif disk["deploy_type"] == "physical": #Physical block-device based disk disk_dom = libvirt_conf_dom.createElement("disk") disk_dom.setAttribute("type", disk["type"]) disk_dom.setAttribute("device", disk["device"]) devices_dom.appendChild(disk_dom) driver_dom = libvirt_conf_dom.createElement("driver") driver_dom.setAttribute("name", "qemu") driver_dom.setAttribute("cache", "none") devices_dom.appendChild(driver_dom) disk_source_dom = libvirt_conf_dom.createElement("source") disk_source_dom.setAttribute("dev", disk["source_dev"]) disk_dom.appendChild(disk_source_dom) disk_target_dom = libvirt_conf_dom.createElement("target") disk_target_dom.setAttribute("dev", disk["target_dev"]) disk_target_dom.setAttribute("bus", disk["target_bus"]) disk_dom.appendChild(disk_target_dom) elif (disk["deploy_type"] == "lvm"): #LVM block-device based disk disk_dom = libvirt_conf_dom.createElement("disk") disk_dom.setAttribute("type", disk["type"]) disk_dom.setAttribute("device", disk["device"]) devices_dom.appendChild(disk_dom) disk_source_dom = libvirt_conf_dom.createElement("source") disk_source_dom.setAttribute("dev", disk["source_dev"]) disk_dom.appendChild(disk_source_dom) disk_target_dom = libvirt_conf_dom.createElement("target") disk_target_dom.setAttribute("dev", disk["target_dev"]) disk_target_dom.setAttribute("bus", disk["target_bus"]) disk_dom.appendChild(disk_target_dom) drive_letter_count = drive_letter_count + 1 for interface in settings["interfaces"]: interface_dom = libvirt_conf_dom.createElement("interface") interface_dom.setAttribute("type", interface["type"]) devices_dom.appendChild(interface_dom) interface_source_dom = libvirt_conf_dom.createElement("source") interface_source_dom.setAttribute("bridge", interface["source_bridge"]) interface_dom.appendChild(interface_source_dom) serial_dom = libvirt_conf_dom.createElement("serial") serial_dom.setAttribute("type", settings["serial"]["type"]) devices_dom.appendChild(serial_dom) serial_target_dom = libvirt_conf_dom.createElement("target") serial_target_dom.setAttribute("port", str(settings["serial"]["target_port"])) serial_dom.appendChild(serial_target_dom) console_dom = libvirt_conf_dom.createElement("console") console_dom.setAttribute("type", settings["console"]["type"]) devices_dom.appendChild(console_dom) console_target_dom = libvirt_conf_dom.createElement("target") console_target_dom.setAttribute("port", str(settings["console"]["target_port"])) console_dom.appendChild(console_target_dom) input_type_dom = libvirt_conf_dom.createElement("input") input_type_dom.setAttribute("type", "mouse") input_type_dom.setAttribute("bus", settings["mouse_bus"]) devices_dom.appendChild(input_type_dom) graphics_dom = libvirt_conf_dom.createElement("graphics") graphics_dom.setAttribute("type", settings["graphics"]["type"]) graphics_dom.setAttribute("port", str(settings["graphics"]["port"])) graphics_dom.setAttribute("autoport", settings["graphics"]["autoport"]) graphics_dom.setAttribute("keymap", settings["graphics"]["keymap"]) devices_dom.appendChild(graphics_dom) return libvirt_conf_dom
def sync_oms_template(storage_pool=c('general', 'default-storage-pool')): """Synchronize OMS template""" repo = c('opennode-oms-template', 'repo') tmpl = c('opennode-oms-template', 'template_name') sync_template(repo, tmpl, storage_pool)
def get_templates_sync_list(sync_tasks_fnm=c('general', 'sync_task_list')): """Return current template synchronisation list""" with open(sync_tasks_fnm, 'r') as tf: return pickle.load(tf)
def set_templates_sync_list(tasks, sync_tasks_fnm=c('general', 'sync_task_list')): """Set new template synchronisation list. Function should be handled with care, as some retrieval might be in progress""" with open(sync_tasks_fnm, 'w') as tf: pickle.dump(tasks, tf)