def refresh_vm_list(self): vms = {} if not self.node_proxy.file_exists(self.pid_dir): mkdir2(self.node, self.pid_dir) pid_files = self.node_proxy.listdir(self.pid_dir) for pid_file in pid_files: f = None pid = 0 pid_string = None pid_filename=os.path.join(self.pid_dir, pid_file) pid_string = self._get_file_content(pid_filename, 10) if pid_string and pid_string.strip().isdigit(): pid_string = pid_string.strip() pid = int(pid_string) else: print "ignoring up ", pid_filename continue if not pid: print "pid is none, skipping" continue info = self.refresh(pid,pid_filename) if info is not None: vms[info.id]=info self.vm_list = vms return vms
def fetch_catalog(self, catalog, url): feed_conf = self.get_conf_name(catalog) feed_conf_dir = os.path.dirname(feed_conf) mkdir2(self.local_node, feed_conf_dir) fetch_isp(url, feed_conf, "text/plain") print "fetched ", url, feed_conf return feed_conf
def import_appliance(auth,local, appliance_entry, image_store, image_group_id,\ image_name, platform, force, progress=None): # TODO : # image_store.validate_image_name(image_name) appliance_url = appliance_entry["href"] image_dir = image_store._get_location(image_name) if not local.node_proxy.file_exists(image_dir): mkdir2(local, image_dir) # fetch the image filename = appliance_entry.get("filename") ###DIRTY FIX... need to check which transaction is going on import transaction transaction.commit() downloaded_filename = ImageUtils.download_appliance(local,appliance_url, image_dir, filename, progress) #Make the image entry into the database after the appliance is downloaded. #so that database and image store filesystem will be in sync #for image_group in image_store.get_image_groups(auth).values(): # if image_store.image_exists_by_name(image_name): # raise Exception("Image "+image_name+" already exists.") image = image_store.create_image(auth,image_group_id, image_name, platform) # gunzip/unzip if required ImageUtils.open_package(local, downloaded_filename, image_dir, progress) # get ova / xva information from the package (ova_location, full_appliance_dir, appliance_dir) = search_ova(image_dir) (cfg, hacks, vbds, vdis) = get_ova_info(ova_location) # create disk from the chunks disk_info = create_disks(local, full_appliance_dir, vbds, vdis, progress) # get rid of the hacks appliance_entry["is_hvm"] = hacks["is_hvm"] cfg["extra"] = hacks["kernel_boot_cmdline"] vm_template = ImageUtils.get_vm_conf_template(local, appliance_entry, cfg, disk_info) image_conf = ImageUtils.get_image_config(local, appliance_entry, disk_info, image_dir) ImageUtils.create_files(local, appliance_entry, image_store,image_group_id, image, vm_template, image_conf, force) return True
def __init__(self, local_node, config): self.local_node = local_node self.config = config self._feeds = None self.cache_dir = None self.catalogs = {} self.appliance_list = {} self.appliance_store_dir = config.get(constants.prop_appliance_store) if self.appliance_store_dir is None or self.appliance_store_dir is "": self.appliance_store_dir = self.DEFAULT_APPLIANCE_STORE catalogs = DBHelper().get_all(ApplianceCatalog) for catalog in catalogs: self.catalogs[catalog.name] = catalog.url print self.catalogs mkdir2(self.local_node, self.get_cache_dir())
def populate_appliances(self, feed_name): feed = self.feeds.get(feed_name) if feed is None: return None cache_dir = self.get_feed_cache_dir(feed_name) utils.mkdir2(self.local_node, cache_dir) cache_file = self.get_feed_file_name(feed_name) feed_dest = os.path.join(cache_dir, cache_file) url = self.get_feed_url(feed_name) try: fetch_isp(url, feed_dest, "/xml") except Exception as ex: print "Error downloading feed ", url, ex print "Will try to use cached copy if available." details = [] if self.local_node.node_proxy.file_exists(feed_dest): details = self._make_details(feed_name, feed_dest) else: print "Skipping ", feed_dest, " not found." return details
def start(self, config, timeout=5): if config is None: raise Exception("No context provided to start the vm") info = self.info() if info.get(key_version): v = info.get(key_version) # if v.find('kvm-') < 0: # raise Exception("You seem to have an older version of KVM/QEMU\n The version does not contain 'kvm-' token :%s\n Please make sure kvm-70 or higher is installed and is in PATH." % v) # take the config.. and generate a cmdline for kvm/qemu cmd = self.kvm_binary known_options = self.kvm_options if config.get("type") and config.get("type") == "qemu": print "Using simple qemu" cmd = self.qemu_binary known_options = self.qemu_options # build the cmd line cmdline = cmd vnc_processed = False skip_kernel_rd = False # add disks first opt = "disk" value = config.get(opt) disk_entries = config.getDisks() boot_flag=False for d in disk_entries: flag = 0 if d.device.find(":cdrom") > -1 or \ d.filename == ("/dev/cdrom"): opt = "cdrom" hd=d.device.replace(":cdrom","") value1 = config.get(hd+"_use_drive_opt") if value1 and value1==1: flag = 1 opt=hd else: opt = d.device use_drive = opt+"_use_drive_opt" value1 = config.get(use_drive) if value1 and value1==1: flag = 1 value = d.filename """ here, if opt is either of vdx, then call one more function which adds the -drive option and other values """ if opt.startswith("vd") or flag == 1: drive_boot=to_str(config.get(opt+"_drive_boot")) if drive_boot=="on": cmdline = cmdline.replace(",boot=on","",1) cmdline = self.qemuCall(cmdline, opt, value, config) if boot_flag==False: auto_boot=to_str(config.get("virtio_no_auto_boot")) if auto_boot!="1": cmdline+=",boot=on" boot_flag=True # mode, and type are not used. # filename can be file or device so it would work. # mode : Dont know how to specify readonly disk else: cmdline = self.process_option(cmdline, opt, value, known_options) for opt in config.keys(): value = config.get(opt) opt = opt.replace("_", "-") # python prohibits - in variables if opt == "extra": opt = "append" if opt == "memory" : opt = "m" elif opt == "vcpus": opt = "smp" elif opt == "stdvga": opt = "std-vga" if to_str(value) != '1': continue elif opt == "ramdisk": opt = "initrd" elif opt == "acpi": if to_str(value) == '0': cmdline = self.process_option(cmdline, "no-acpi", "", known_options) continue elif opt == "vif" and not config.get("net"): #Transform vif in to -net options vifs = value if vifs: vlan=-1 for vif in vifs: vlan = vlan + 1 parts = vif.split(',') x = dict([p.strip().split('=') for p in parts]) macaddr = x.get("mac") if not macaddr: macaddr=randomMAC() opt_val = "nic,vlan=%d,macaddr=%s" % ( vlan, macaddr) # model model = x.get("model") if model: opt_val = opt_val + ",model=" + model cmdline = self.process_option(cmdline, "net", opt_val, known_options) # if bridge is specified, lets try to specify the script # Assumes bridge is created and script would # add the tap interface to the bridge # TODO : if the bridge can be somehow specified as # param to the script in /etc/qemu-ifup and # /etc/qemu-ifdown bridge=x.get("bridge") mode = config.get("network_mode") if not mode: if bridge: mode = "tap" else: mode = "user" opt_val = "%s,vlan=%d" % (mode, vlan) if mode == "tap": # interface name ifname = x.get("ifname") if ifname: opt_val = opt_val + ",ifname=" + ifname # script script = x.get("script") if script: opt_val = opt_val + ",script=" + script down_script = x.get('down_script') if down_script: opt_val = opt_val + ",downscript=" +down_script else: # see if the bridge specific script is there. if bridge: s1 = "/etc/kvm/kvm-ifup-%s" % (bridge,) s2 = "/etc/kvm/qemu-ifup-%s" % (bridge,) s3 = "/etc/qemu-ifup-%s" % (bridge,) s4 = "/etc/qemu/qemu-ifup-%s" % (bridge,) for s in [ s1, s2, s3, s4 ]: if self.node_proxy.file_exists(s): # assume it is executable. opt_val = opt_val + ",script=" + s break elif mode == "user": # hostname hname = x.get("hostname") if hname: opt_val = opt_val + ",hostname=" + hname cmdline = self.process_option(cmdline, "net", opt_val, known_options) # TODO : Support custom script continue elif opt in ["vnc","vncdisplay"] and not vnc_processed: vnc_processed = True value = config.get("vnc") if value == 1 or value == "1": vncdisplay = config.get("vncdisplay") if not vncdisplay: vncdisplay = self.node.get_unused_display() if vncdisplay: value = ":" + to_str(vncdisplay) cmdline = self.process_option(cmdline, opt, value,\ known_options) continue elif opt in ["kernel", "initrd", "append"] : if not skip_kernel_rd : # hack k_value = config.get("kernel") if k_value: if k_value.find("hvmloader") > -1: #skip xen hvmloader skip_kernel_rd = True continue else: # ignore the initrd and append/extra too. continue if opt in self.kvm_options_no_v: if value == 0 or value == "0" : continue value = "" else: if not value: continue cmdline = self.process_option(cmdline, opt, value, known_options) # The following is done to have the convention and # temporarily have the name of VM available in the command line. if not self.node_proxy.file_exists(self.monitor_dir): mkdir2(self.node, self.monitor_dir) monitor_path = os.path.join(self.monitor_dir, config.get("name")) cmdline = cmdline + " -monitor " + """"unix:%s,server,nowait\"""" % (monitor_path,) pid_fname = os.path.join(self.pid_dir, config.get("name")) cmdline = cmdline + " -pidfile " + """"%s\"""" % (pid_fname,) #cmdline = cmdline + ' -localtime' #cmdline = cmdline + ' clock=host' # daemonize.. the command can return cmdline = cmdline + " -daemonize" #incoming_val = config.get("incoming") #if incoming_val and (incoming_val.find("tcp://") == 0 or \ # incoming_val.find("ssh://") == 0 ): # cmdline += " &" cmdline = self.replaceCmdline(cmdline) print "CMDLINE ***** ", cmdline (output, ret) = self.node_proxy.exec_cmd(cmdline, self.kvm_binary_path, timeout) if ret != 0: print "start failed :", cmdline,output raise Exception((output, ret)) print "start : success ", output self.get_vms() if config.get('vncpasswd'): self.set_vnc_password(config.get('name'),config.get('vncpasswd')) return config.get("name")