def stat(self, dbg, sr): # SR path (sr) is file://<mnt_path> # Get mnt_path by dropping url scheme uri = urlparse.urlparse(sr) mnt_path = "/%s/%s" % (uri.netloc, uri.path) if not(os.path.isdir(mnt_path)) or not(os.path.ismount(mnt_path)): raise xapi.storage.api.volume.Sr_not_attached(mnt_path) # Get the filesystem size statvfs = os.statvfs(mnt_path) psize = statvfs.f_blocks * statvfs.f_frsize fsize = statvfs.f_bfree * statvfs.f_frsize log.debug("%s: statvfs says psize = %Ld" % (dbg, psize)) overprovision = \ VHDVolume.get_sr_provisioned_size(sr, gfs2.Callbacks()) / psize return { "sr": sr, "name": "SR Name", "description": "GFS2 SR", "total_space": psize, "free_space": fsize, "overprovision": overprovision, "datasources": [], "clustered": True, "health": ["Healthy", ""] }
def volumeCreate(self, opq, name, size): log.debug("volumeCreate opq=%s name=%s size=%d" % (opq, name, size)) vol_dir = os.path.join(opq, name) vol_path = os.path.join(vol_dir, name) os.makedirs(vol_dir, mode=0755) open(vol_path, 'a').close() return vol_path
def dlm_fence_clear_by_id(node_id, scsi_id): n = int(node_id) bd = "/dev/" + scsi_id + "/sbd" log.debug("dlm_fence_clear_by_id: clearing node_id=%d, scsi_id=%s" % (n, scsi_id)) ret = block_write(bd, BLK_SIZE * 2 * n, MSG_OK) ret = block_write(bd, BLK_SIZE * ((2 * n) + 1), MSG_OK)
def dlm_fence_no_args(): log.debug("dlm_fence_no_args") for line in sys.stdin: log.debug("dlm_fence_no_args: %s" % line) if line.startswith("node="): node_id = int(int(line[5:]) % 4096) dlm_fence_node(node_id)
def mount(dbg, dev_path): # Ensure corosync+dlm are configured and running inventory = xcp.environ.readInventory() session = XenAPI.xapi_local() session.xenapi.login_with_password("root", "") this_host = session.xenapi.host.get_by_uuid( inventory.get("INSTALLATION_UUID")) log.debug("%s: setting up corosync and dlm on this host" % (dbg)) session.xenapi.host.call_plugin( this_host, "gfs2setup", "gfs2Setup", {}) mnt_path = os.path.abspath(mountpoint_root + dev_path) try: os.makedirs(mnt_path) except OSError as exc: if exc.errno == errno.EEXIST and os.path.isdir(mnt_path): pass else: raise if not os.path.ismount(mnt_path): cmd = ["/usr/sbin/modprobe", "gfs2"] call(dbg, cmd) cmd = ["/usr/bin/mount", "-t", "gfs2", "-o", "noatime,nodiratime", dev_path, mnt_path] call(dbg, cmd) return mnt_path
def dlm_fence_daemon(node_id): n = int(node_id) log.debug("Starting dlm_fence_daemon on node_id=%d" % n) wd = os.open("/dev/watchdog", os.O_WRONLY) def dlm_fence_daemon_signal_handler(sig, frame): log.debug("dlm_fence_daemon_signal_handler") os.write(wd, "V") os.close(wd) log.debug("dlm_fence_daemon: exiting cleanly") exit(0) signal.signal(signal.SIGUSR1, dlm_fence_daemon_signal_handler) demonize() while True: f = util.lock_file("SSSS", DLMREF_LOCK, "r+") d = shelve.open(DLMREF) klist = d.keys() for key in klist: bd = "/dev/" + key + "/sbd" ret = block_read(bd, BLK_SIZE * 2 * n) if ret == MSG_OK: pass elif ret == MSG_FENCE: log.debug("dlm_fence_daemon: MSG_FENCE") log.debug("dlm_fence_daemon: Setting WD timeout to 1 second") s = struct.pack ("i", 1) fcntl.ioctl(wd, 3221509894 , s) log.debug("dlm_fence_daemon: writing MSG_FENCE_ACK") ret = block_write(bd, BLK_SIZE * ((2 * n) + 1), MSG_FENCE_ACK) log.debug("dlm_fence_daemon: MSG_FENCE_ACK sent") # host will be fenced in 1 second d.close() util.unlock_file("SSSS", f) os.write(wd, "w") time.sleep(1)
def volumeRename(self, opq, old_name, new_name): log.debug("volumeRename opq=%s old=%s new=%s" % (opq, old_name, new_name)) os.rename(os.path.join(opq, old_name), os.path.join(opq, new_name)) os.rename(os.path.join(opq, new_name, old_name), os.path.join(opq, new_name, new_name)) return os.path.join(opq, new_name, new_name)
def getUniqueIdentifier(self, opq): log.debug("getUniqueIdentifier opq=%s" % opq) meta_path = os.path.join(opq, "meta.json") with open(meta_path, "r") as fd: meta = json.load(fd) value = meta["unique_id"] return value
def epc_close(dbg, uri, cb): log.debug("{}: Datapath.epc_close: uri == {}".format(dbg, uri)) sr, key = _parse_uri(uri) opq = cb.volumeStartOperations(sr, 'w') meta_path = cb.volumeMetadataGetPath(opq) db = VHDMetabase(meta_path) try: with Lock(opq, 'gl', cb): with db.write_context(): vdi = db.get_vdi_by_id(key) vol_path = cb.volumeGetPath(opq, str(vdi.vhd.id)) if vdi.nonpersistent: # truncate VHDUtil.reset(dbg, vol_path) db.update_vdi_nonpersistent(vdi.uuid, None) except: log.error( ("{}: Datapath.epc_close: failed to complete " "close, {}").format(dbg, sys.exc_info()[1]) ) raise finally: db.close() return None
def attach(self, dbg, uri, domain): # FIXME: add lvm activation code u = urlparse.urlparse(uri) (vgname, lvname, scsid) = self._getVgLvScsid(dbg, u.path) log.debug("%s Vg=%s Lv=%s Scsid%s" % (dbg, vgname, lvname, scsid)) vg = self._vgOpen(dbg, vgname, "r", scsid) lv = vg.lvFromName(lvname) lv.activate() vg.close() cmd = ["/usr/bin/vhd-util", "query", "-n", u.path, "-P"] output = call(dbg, cmd) log.debug("%s output=%s" % (dbg, output)) output = output[:-1] if output[-6:] == "parent": log.debug("No Parent") else: output = output.replace("--", "-") log.debug("%s" % output[-36:]) activation_file = "/var/run/nonpersistent/" + vgname + "/" + output[-36:] if (not os.path.exists(activation_file)): vg = self._vgOpen(dbg, vgname, "r", scsid) lv = vg.lvFromName(output[-36:]) log.debug("Activating %s" % lv.getName()) lv.activate() vg.close() open(activation_file, 'a').close() tap = tapdisk.create(dbg) tapdisk.save_tapdisk_metadata(dbg, u.path, tap) return { 'domain_uuid': '0', 'implementation': ['Tapdisk3', tap.block_device()], }
def call(dbg, cmd_args, error=True, simple=True, exp_rc=0): """[call dbg cmd_args] executes [cmd_args] if [error] and exit code != exp_rc, log and throws a BackendError if [simple], returns only stdout """ log.debug("{}: Running cmd {}".format(dbg, cmd_args)) p = subprocess.Popen( cmd_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True ) stdout, stderr = p.communicate() if error and p.returncode != exp_rc: log.error( "{}: {} exitted with code {}: {}".format( dbg, ' '.join(cmd_args), p.returncode, stderr ) ) # TODO: FIXME: Remove dependency on Xapi. #raise xapi.InternalError("%s exitted with non-zero code %d: %s" # % (" ".join(cmd_args), p.returncode, stderr)) if simple: return stdout return stdout, stderr, p.returncode
def _getVgLvScsid(self, dbg, path): log.debug("%s path=%s" % (dbg, path)) parts = path.split("/") vgname = parts[2] lvname = parts[3] with open("/var/run/nonpersistent/" + vgname + "/scsid", "r") as text_file: scsid = text_file.read() return (vgname, lvname, scsid)
def dlm_fence_daemon_start(node_id): import subprocess args = ['/usr/libexec/xapi-storage-script/volume/org.xen.xapi.storage.gfs2/fence_tool.py', "dlm_fence_daemon", str(node_id)] dlm_fence_daemon = subprocess.Popen(args) log.debug("dlm_fence_daemon_start: node_id=%d" % node_id) with open("/var/run/sr-ref/dlm_fence_daemon.pickle", 'w+') as f: pickle.dump(dlm_fence_daemon, f)
def getPVName(dbg, sr): try: uri = getFromSRMetadata(dbg, sr, 'uri') dev_path = blkinfo.get_device_path(dbg, uri) cmd = ["readlink", "-f", dev_path] output = call(dbg, cmd) return output.rstrip() except Exception,e: log.debug("Exception raised in getting PV name: %s" %str(e))
def volumeTryLock(self, opq, name): try: log.debug("volumeLock opq=%s name=%s" % (opq, name)) vol_path = os.path.join(opq, name) lock = open(vol_path, 'w+') fcntl.flock(lock, fcntl.LOCK_EX| fcntl.LOCK_NB) return lock except IOError, e: if e.errno in [errno.EACCES, errno.EAGAIN]: return None raise
def getFromSRMetadata(dbg, sr, key): value = None u = urlparse.urlparse(sr) if u.scheme == 'file': # Get the device path metapath = "%s/meta.json" % (u.path) log.debug("%s: metapath = %s" % (dbg, metapath)) if os.path.exists(metapath): with open(metapath, "r") as fd: meta = json.load(fd) value = meta[key] log.debug("%s: SR metadata says '%s' -> '%s'" % (dbg, key, value)) return value
def detach(self, dbg, sr): import shelve # Get the iSCSI uri from the SR metadata uri = getFromSRMetadata(dbg, sr, 'uri') # Get the unique_id from the SR metadata unique_id = getFromSRMetadata(dbg, sr, 'unique_id') # stop GC try: pass # VHDCoalesce.stop_gc(dbg, "gfs2", sr) except: log.debug("GC already stopped") # Unmount the FS mnt_path = urlparse.urlparse(sr).path umount(dbg, mnt_path) dlmref = os.path.join(DLM_REFDIR, "dlmref") f = util.lock_file(dbg, dlmref + ".lock", "r+") d = shelve.open(dlmref) del d[str(unique_id)] klist = d.keys() current = len(klist) d.close() if current == 0: cmd = ["/usr/bin/systemctl", "stop", "dlm"] call(dbg, cmd) # stop fencing daemon node_id = get_node_id(dbg) log.debug("Calling dlm_fence_daemon_stop: node_id=%d" % node_id) fence_tool.dlm_fence_daemon_stop(node_id) util.unlock_file(dbg, f) # deactivate gfs2 LV cmd = ["/usr/sbin/lvchange", "-an", unique_id + "/gfs2"] call(dbg, cmd) # Fixme: kill fencing daemon # deactivate sbd LV cmd = ["/usr/sbin/lvchange", "-an", unique_id + "/sbd"] call(dbg, cmd) # Unplug device if need be unplug_device(dbg, uri)
def call_plugin_on_host(dbg, host_name, plugin_name, plugin_function, args): log.debug("%s: calling plugin '%s' function '%s' with args %s on %s" % (dbg, plugin_name, plugin_function, args, host_name)) session = XenAPI.xapi_local() try: session.xenapi.login_with_password('root', '') except: # ToDo: We ought to raise something else raise try: for host_ref in get_online_host_refs(dbg, session): log.debug("%s: host_ref %s - host_name %s)" % (dbg, session.xenapi.host.get_name_label(host_ref), host_name)) if session.xenapi.host.get_name_label(host_ref) == host_name: log.debug("%s: calling plugin '%s' function '%s' with args %s on host %s - %s)" % (dbg, plugin_name, plugin_function, args, host_ref, host_name)) resulttext = session.xenapi.host.call_plugin( host_ref, plugin_name, plugin_function, args) log.debug("%s: resulttext = %s" % (dbg, resulttext)) if resulttext != "True": # ToDo: We ought to raise something else raise xapi.storage.api.volume.Unimplemented( "Failed to get hostref %s to run %s(%s)" % (host_ref, plugin_name, plugin_function, args)) except: # ToDo: We ought to raise something else raise finally: session.xenapi.session.logout()
def load_tapdisk_metadata(dbg, path): """Recover the tapdisk metadata for this VDI from host-local storage.""" dirname = _metadata_dir(path) log.debug("%s: load_tapdisk_metadata: trying '%s'" % (dbg, dirname)) filename = dirname + "/" + TD_PROC_METADATA_FILE if not(os.path.exists(filename)): # XXX throw a better exception raise xapi.storage.api.volume.Volume_does_not_exist(dirname) with open(filename, "r") as fd: meta = pickle.load(fd) tap = Tapdisk(meta['minor'], meta['pid'], meta['f']) tap.secondary = meta['secondary'] return tap
def vg_stats(dbg, vg_name): try: cmd = ["/usr/sbin/vgs", "--noheadings", "--nosuffix", "--units", "b", vg_name] output = call(dbg, cmd) stats = {} text = output.split() size = long(text[5]) freespace = long(text[6]) utilisation = size - freespace stats['physical_size'] = size stats['physical_utilisation'] = utilisation stats['freespace'] = freespace except Exception,e: log.debug("Error in getting vg stats %s, vgs output: %s" %(str(e), output))
def waitForDevice(dbg, keys): # Wait for new device(s) to appear cmd = ["/usr/sbin/udevadm", "settle"] call(dbg, cmd) # FIXME: For some reason, udevadm settle isn't sufficient # to ensure the device is present. Why not? for i in range(1,10): time.sleep(1) if keys['scsiid'] != None: try: os.stat(DEV_PATH_ROOT + keys['scsiid']) return except: log.debug("%s: Waiting for device to appear" % dbg)
def ls(self, dbg, sr): pv_name = getPVName(dbg,sr) vg_name = getVGName(dbg,sr) lv_name = "/dev/" + vg_name +"/gfs2" try: # refresh iscsi connection to reflect LUN's new size call(dbg, ["/usr/sbin/iscsiadm", "-m", "node", "-R"]) # Does not matter if LUN is resized or not, go ahead and resize pv, # incase if LUN is resized pv size will get updated call(dbg, ["/usr/sbin/pvresize" , pv_name, "--config", "global{metadata_read_only=0}"]) # if pv was expanded, this will reflect as freespace # in the associated volume group, only then we need to expand gfs2 lv stats = vg_stats(dbg,vg_name) if stats['freespace'] > VG_FREE_SPACE_THRESHOLD: log.debug("Free space (%s) detected in VG, expanding gfs2 LV." %str(stats['freespace'])) opq = urlparse.urlparse(sr).path try: gl = os.path.join(opq, "gl") f = util.lock_file(dbg, gl, "w+") # extend lv call(dbg, ["lvextend", "-l+100%FREE", lv_name, "--config", "global{metadata_read_only=0}"]) #inform other node about LUN resize inventory = xcp.environ.readInventory() session = XenAPI.xapi_local() session.xenapi.login_with_password("root", "") this_host = session.xenapi.host.get_by_uuid( inventory.get("INSTALLATION_UUID")) for host in session.xenapi.host.get_all(): if host != this_host: log.debug("%s: setup host %s" % (dbg, session.xenapi.host.get_name_label(host))) session.xenapi.host.call_plugin( host, "gfs2setup", "refreshDM", {'lv_name': lv_name, 'pv_dev': pv_name.split('/')[2]}) # grow gfs2 call(dbg, ["gfs2_grow", mountpoint_root + "dev/" + vg_name +"/gfs2"]) except Exception, e: raise e finally: if f: util.unlock_file(dbg,f)
def call(dbg, cmd_args, error=True, simple=True, expRc=0): log.debug("%s: Running cmd %s" % (dbg, cmd_args)) p = subprocess.Popen( cmd_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) stdout, stderr = p.communicate() if error and p.returncode != expRc: log.error("%s: %s exitted with code %d: %s" % (dbg, " ".join(cmd_args), p.returncode, stderr)) raise xapi.InternalError("%s exitted with non-zero code %d: %s" % (" ".join(cmd_args), p.returncode, stderr)) if simple: return stdout return stdout, stderr, p.returncode
def call(dbg, cmd_args, error=True, simple=True, expRc=0): log.debug('{}: Running cmd {}'.format(dbg, cmd_args)) proc = subprocess.Popen( cmd_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) stdout, stderr = proc.communicate() if error and proc.returncode != expRc: log.error('{}: {} exitted with code {}: {}'.format( dbg, " ".join(cmd_args), proc.returncode, stderr)) raise xapi.InternalError('{} exitted with non-zero code {}: {}'.format( " ".join(cmd_args), proc.returncode, stderr)) if simple: return stdout return stdout, stderr, proc.returncode
def stat(self, dbg, sr): # Get the filesystem size statvfs = os.statvfs(urlparse.urlparse(sr).path) psize = statvfs.f_blocks * statvfs.f_frsize fsize = statvfs.f_bfree * statvfs.f_frsize log.debug("%s: statvfs says psize = %Ld" % (dbg, psize)) return { "sr": sr, "name": "SR Name", "description": "GFS2 SR", "total_space": psize, "free_space": fsize, "datasources": [], "clustered": True, "health": ["Healthy", ""] }
def volumeDestroy(self, opq, name): log.debug("volumeDestroy opq=%s name=%s" % (opq, name)) vol_dir = os.path.join(opq, name) vol_path = os.path.join(vol_dir, name) try: os.unlink(vol_path) except OSError as exc: if exc.errno == errno.ENOENT: pass else: raise try: os.rmdir(vol_dir) except OSError as exc: if exc.errno == errno.ENOENT: pass else: raise
def stat(self, dbg, sr): uri = getFromSRMetadata(dbg, sr, 'uri') # Get the filesystem size statvfs = os.statvfs(getSRpath(dbg, uri)) psize = statvfs.f_blocks * statvfs.f_frsize fsize = statvfs.f_bfree * statvfs.f_frsize log.debug("%s: statvfs says psize = %Ld" % (dbg, psize)) return { "sr": sr, "name": "SR Name", "description": "GFS2 SR", "total_space": psize, "free_space": fsize, "datasources": [], "clustered": True, "health": ["Healthy", ""] }
def load_tapdisk_metadata(dbg, path): """Recover the tapdisk metadata for this VDI from host-local storage.""" dirname = _metadata_dir(path) log.debug("%s: load_tapdisk_metadata: trying '%s'" % (dbg, dirname)) filename = dirname + "/" + TD_PROC_METADATA_FILE # No need to check for file existence; # if file not there, IOError is raised #if not(os.path.exists(filename)): # raise Exception('volume doesn\'t exist') # #raise xapi.storage.api.volume.Volume_does_not_exist(dirname) with open(filename, "r") as fd: meta = cPickle.load(fd) tap = Tapdisk(meta['minor'], meta['pid'], meta['f']) tap.secondary = meta['secondary'] tap.type = meta['type'] tap.file_path = meta['file_path'] return tap
def login(dbg, uri, keys): iqn_map = discoverIQN(dbg, keys) output = iqn_map[0] # FIXME: only take the first one returned. # This might not always be the one we want. log.debug("%s: output = %s" % (dbg, output)) portal = output[0] # FIXME: error handling # Provide authentication details if necessary if keys['username'] != None: set_chap_settings(dbg, portal, keys['target'], keys['username'], keys['password']) # Lock refcount file before login if not os.path.exists(ISCSI_REFDIR): os.mkdir(ISCSI_REFDIR) filename = os.path.join(ISCSI_REFDIR, keys['iqn']) f = lock_file(dbg, filename, "a+") current_sessions = listSessions(dbg) log.debug("%s: current iSCSI sessions are %s" % (dbg, current_sessions)) sessionid = findMatchingSession(dbg, portal, keys['iqn'], current_sessions) if sessionid: # If there's an existing session, rescan it # in case new LUNs have appeared in it log.debug("%s: rescanning session %d for %s on %s" % (dbg, sessionid, keys['iqn'], keys['target'])) rescanSession(dbg, sessionid) else: # Otherwise, perform a fresh login cmd = ["/usr/sbin/iscsiadm", "-m", "node", "-T", keys['iqn'], "--portal", portal, "-l"] output = call(dbg, cmd) log.debug("%s: output = %s" % (dbg, output)) # FIXME: check for success # Increment refcount found = False for line in f.readlines(): if line.find(uri) != -1: found = True if not found: f.write("%s\n" % uri) unlock_file(dbg, f) waitForDevice(dbg) # Return path to logged in target target_path = "/dev/iscsi/%s/%s" % (keys['iqn'], portal) return target_path
def volumeCreate(self, opq, name, size): log.debug("volumeCreate opq=%s name=%s size=%d" % (opq, name, size)) vol_dir = os.path.join(opq, name) vol_path = os.path.join(vol_dir, name) try: os.makedirs(vol_dir, mode=0755) except OSError as exc: if exc.errno == errno.EEXIST: pass else: raise try: open(vol_path, 'a').close() except OSError as exc: if exc.errno == errno.EEXIST: pass else: raise return vol_path
def create(self, dbg, sr, name, description, size, sharable): log.debug( "%s: xcpng.volume.Volume.create: SR: %s Name: %s Description: %s Size: %s, Sharable: %s" % (dbg, sr, name, description, size, sharable)) vdi_uuid = str(uuid.uuid4()) image_uuid = str(uuid.uuid4()) vdi_uri = "%s/%s" % (sr, vdi_uuid) volume_meta = { KEY_TAG: vdi_uuid, VDI_UUID_TAG: vdi_uuid, IMAGE_UUID_TAG: image_uuid, TYPE_TAG: get_vdi_type_by_uri(dbg, vdi_uri), NAME_TAG: name, DESCRIPTION_TAG: description, READ_WRITE_TAG: True, VIRTUAL_SIZE_TAG: size, PHYSICAL_UTILISATION_TAG: 0, URI_TAG: [vdi_uri], SHARABLE_TAG: sharable, CUSTOM_KEYS_TAG: {} } try: self.MetadataHandler.update_vdi_meta(dbg, vdi_uri, volume_meta) volume_meta = self._create(dbg, sr, name, description, size, sharable, volume_meta) except Exception as e: log.error( "%s: xcpng.volume.Volume.create: Failed to create volume: key %s: SR: %s" % (dbg, vdi_uuid, sr)) try: self.destroy(dbg, sr, vdi_uuid) self.MetadataHandler.remove_vdi_meta(dbg, vdi_uri) except: pass raise Exception(e) return volume_meta
def __reparent_children(opq, callbacks, journal_entries): """ Reparent the children of a node after it has been coalesced """ for child in journal_entries: child_path = callbacks.volumeGetPath(opq, str(child.id)) with callbacks.db_context(opq) as db: child_volume = db.get_volume_by_id(child.id) # Find all leaves having child as an ancestor leaves = [] log.debug('Find active leaves of {}'.format(child.id)) find_active_leaves(child_volume, db, leaves) image_utils = ImageFormat.get_format( child_volume.image_type).image_utils # reparent child to grandparent log.debug("Reparenting {} to {}".format(child.id, child.new_parent_id)) with callbacks.db_context(opq) as db: db.update_volume_parent(child.id, child.new_parent_id) new_parent_path = callbacks.volumeGetPath(opq, str(child.new_parent_id)) image_utils.set_parent(GC, child_path, new_parent_path) db.remove_journal_entry(child.id) # Add leaves to database if leaves: # Refresh all leaves having child as an ancestor leaves_to_refresh = db.add_refresh_entries( child.id, child.parent_id, child.new_parent_id, leaves) log.debug("Children {}: leaves: {} will be refreshed".format( child.id, [str(x) for x in leaves_to_refresh]))
def close(self, dbg, key, f): # FIXME: this would not work for raw support # assert isinstance(f, image.Cow) log.debug("%s: closing image %s in qemu with sock %s" % (dbg, f, self.qmp_sock)) try: self._qmp_connect(dbg) path = "{}/{}".format(var_run_prefix(), key) try: with open(path, 'r') as f: line = f.readline().strip() os.unlink(path) args = { 'type': 'qdisk', 'domid': int(re.search(r'domain/(\d+)/', line).group(1)), 'devid': int(re.search(r'vbd/(\d+)/', line).group(1)) } self._qmp_command(dbg, "xen-unwatch-device", **args) except: log.debug('No VBD found') # Stop the NBD server self._qmp_command(dbg, "nbd-server-stop") # Remove the block device args = {"node-name": LEAF_NODE_NAME} self._qmp_command(dbg, "blockdev-del", **args) self._qmp_disconnect(dbg) except Exception as e: log.debug('{}: failed to close qemu: {}'.format(dbg, e)) self._kill_qemu()
def attach(self, dbg, configuration): log.debug("%s: SR.attach: configuration: %s" % (dbg, configuration)) uri = "rbd+%s+%s://%s/%s" % ( configuration['image-format'], configuration['datapath'], configuration['cluster'], configuration['sr_uuid']) ceph_cluster = ceph_utils.connect(dbg, uri) #sr_uuid=utils.get_sr_uuid_by_uri(dbg,uri) log.debug("%s: SR.attach: sr_uuid: %s uri: %s" % (dbg, configuration['sr_uuid'], uri)) if not ceph_cluster.pool_exists(utils.get_pool_name_by_uri(dbg, uri)): raise Sr_not_attached(configuration['sr_uuid']) # Create pool metadata image if it doesn't exist log.debug("%s: SR.attach: name: %s/%s" % (dbg, utils.get_pool_name_by_uri( dbg, uri), utils.SR_METADATA_IMAGE_NAME)) if not rbd_utils.if_image_exist( dbg, ceph_cluster, '%s/%s' % (utils.get_pool_name_by_uri( dbg, uri), utils.SR_METADATA_IMAGE_NAME)): rbd_utils.create( dbg, ceph_cluster, '%s/%s' % (utils.get_pool_name_by_uri( dbg, uri), utils.SR_METADATA_IMAGE_NAME), 0) ceph_utils.disconnect(dbg, ceph_cluster) return uri
def updatePoolMetadata(dbg, cluster, _pool_, metadata): log.debug( "%s: rbd_utils.updatePoolMeta: Cluster ID: %s Name: %s Metadata: %s " % (dbg, cluster.get_fsid(), _pool_, metadata)) _image_ = utils.SR_METADATA_IMAGE_NAME ioctx = cluster.open_ioctx(_pool_) try: image = rbd.Image(ioctx, _image_) except Exception: create(dbg, cluster, "%s/%s" % (_pool_, _image_), 0) image = rbd.Image(ioctx, _image_) try: for tag, value in metadata.iteritems(): if value is None: log.debug( "%s: rbd_utils.updatePoolMeta: tag: %s remove value" % (dbg, tag)) image.metadata_remove(str(tag)) else: log.debug( "%s: rbd_utils.updatePoolMeta: tag: %s set value: %s" % (dbg, tag, value)) image.metadata_set(str(tag), str(value)) finally: image.close() ioctx.close()
def __update(self, dbg, uuid, table_name, meta): log.debug( "%s: xcpng.meta.MetadataHandler.__update: uuid: %s table_name: %s meta: %s" % (dbg, uuid, table_name, meta)) if table_name == 'sr': uuid_tag = SR_UUID_TAG elif table_name == 'vdis': uuid_tag = VDI_UUID_TAG else: raise Exception('Incorrect table name') table = self.db.table(table_name) try: if table.contains(Query()[uuid_tag] == uuid): for tag, value in meta.iteritems(): if value is None: log.debug( "%s: xcpng.meta.MetadataHandler.__update: tag: %s remove value" % (dbg, tag)) table.update(delete(tag), Query()[uuid_tag] == uuid) else: log.debug( "%s: xcpng.meta.MetadataHandler.__update: tag: %s set value: %s" % (dbg, tag, value)) table.update({tag: value}, Query()[uuid_tag] == uuid) else: table.insert(meta) self.__updated = True except Exception as e: log.error( "%s: xcpng.meta.MetadataHandler._update: Failed to update metadata" % dbg) raise Exception(e)
def open(self, dbg): log.debug("%s: xcpng.qemudisk.Qemudisk.open: vdi_uuid %s pid %d qmp_sock %s" % (dbg, self.vdi_uuid, self.pid, self.qmp_sock)) log.debug("%s: xcpng.qemudisk.Qemudisk.open: args: %s" % (dbg, self.open_args)) _qmp_ = qmp.QEMUMonitorProtocol(self.qmp_sock) try: _qmp_.connect() _qmp_.command("blockdev-add", **self.open_args) # Start an NBD server exposing this blockdev _qmp_.command("nbd-server-start", addr={'type': 'unix', 'data': {'path': self.nbd_sock}}) _qmp_.command("nbd-server-add", device=LEAF_NODE_NAME, writable=True) log.debug("%s: xcpng.qemudisk.Qemudisk.open: Image opened: %s" % (dbg, self.open_args)) except Exception as e: log.error("%s: xcpng.qemudisk.Qemudisk.open: Failed to open image in qemu_dp instance: uuid: %s pid %s" % (dbg, self.vdi_uuid, self.pid)) try: _qmp_.close() except: pass raise Exception(e)
def create(dbg, qemudisk, uri, img_qemu_uri): log.debug("%s: xcpng.qemudisk.create: uri: %s " % (dbg, uri)) vdi_uuid = utils.get_vdi_uuid_by_uri(dbg, uri) sr_uuid = utils.get_sr_uuid_by_uri(dbg, uri) vdi_type = utils.get_vdi_type_by_uri(dbg, uri) if vdi_type not in IMAGE_TYPES: raise Exception('Incorrect VDI type') utils.mkdir_p(QEMU_DP_SOCKET_DIR, 0o0700) nbd_sock = QEMU_DP_SOCKET_DIR + "/qemu-nbd.{}".format(vdi_uuid) qmp_sock = QEMU_DP_SOCKET_DIR + "/qmp_sock.{}".format(vdi_uuid) qmp_log = QEMU_DP_SOCKET_DIR + "/qmp_log.{}".format(vdi_uuid) log.debug("%s: xcpng.qemudisk.create: Spawning qemu process for VDI %s with qmp socket at %s" % (dbg, vdi_uuid, qmp_sock)) cmd = [QEMU_DP, qmp_sock] try: log_fd = open(qmp_log, 'w+') p = subprocess.Popen(cmd, stdout=log_fd, stderr=log_fd) except Exception as e: log.error("%s: xcpng.qemudisk.create: Failed to create qemu_dp instance: uri %s" % (dbg, uri)) try: log_fd.close() except: pass raise Exception(e) log.debug("%s: xcpng.qemudisk.create: New qemu process has pid %d" % (dbg, p.pid)) return qemudisk(dbg, sr_uuid, vdi_uuid, vdi_type, img_qemu_uri, p.pid, qmp_sock, nbd_sock, qmp_log)
def updateImageMetadata(dbg, cluster, name, metadata): log.debug( "%s: rbd_utils.updateMetadata: Cluster ID: %s Name: %s Metadata: %s " % (dbg, cluster.get_fsid(), name, metadata)) _pool_ = _getPoolName(name) _image_ = _getImageName(name) ioctx = cluster.open_ioctx(_pool_) image = rbd.Image(ioctx, _image_) try: for tag, value in metadata.iteritems(): if value is None: log.debug( "%s: rbd_utils.updateMetadata: tag: %s remove value" % (dbg, tag)) image.metadata_remove(str(tag)) else: log.debug( "%s: rbd_utils.updateMetadata: tag: %s set value: %s" % (dbg, tag, value)) image.metadata_set(str(tag), str(value)) finally: image.close() ioctx.close()
def call_plugin_in_pool(dbg, plugin_name, plugin_function, args): log.debug("%s: calling plugin '%s' function '%s' with args %s in pool" % (dbg, plugin_name, plugin_function, args)) session = XenAPI.xapi_local() try: session.xenapi.login_with_password('root', '') except: # ToDo: We ought to raise something else raise try: for host_ref in get_online_host_refs(dbg, session): log.debug("%s: calling plugin '%s' function '%s' with args %s on host %s" % (dbg, plugin_name, plugin_function, args, host_ref)) resulttext = session.xenapi.host.call_plugin( host_ref, plugin_name, plugin_function, args) log.debug("%s: resulttext = %s" % (dbg, resulttext)) if resulttext != "True": # ToDo: We ought to raise something else raise xapi.storage.api.volume.Unimplemented( "Failed to get hostref %s to run %s(%s)" % (host_ref, plugin_name, plugin_function, args)) except: # ToDo: We ought to raise something else raise finally: session.xenapi.session.logout()
def attach(self, dbg, configuration): uri = configuration['uri'] log.debug('{}: SR.attach: config={}, uri={}'.format( dbg, configuration, uri)) nfs_uri = urlparse.urlsplit(uri) if nfs_uri.scheme != 'nfs': raise ValueError('Incorrect URI scheme') nfs_server = '{0}:{1}'.format(nfs_uri.netloc, nfs_uri.path) sr_uuid = configuration['sr_uuid'] mnt_path = self._mount_path(sr_uuid) sr_dir = os.path.join(mnt_path, sr_uuid) sr = urlparse.urlunsplit(('file', '', sr_dir, None, None)) if os.path.exists(mnt_path) and os.path.ismount(mnt_path): log.debug("%s: SR.attach: uri=%s ALREADY ATTACHED" % (dbg, uri)) return sr log.debug("%s: SR.attach: uri=%s NOT ATTACHED YET" % (dbg, uri)) # Mount the file system mnt_path = self._mount(dbg, nfs_server, sr_uuid) if not os.path.exists(sr_dir) or not os.path.isdir(sr_dir): raise ValueError('SR directory doesn\'t exist') # Start GC for this host COWCoalesce.start_gc(dbg, 'nfs-ng', sr) return sr
def __remove(self, dbg, uuid, table_name): log.debug( "%s: xcpng.meta.MetadataHandler.__remove: uuid: %s table_name: %s" % (dbg, uuid, table_name)) if table_name == 'sr': uuid_tag = SR_UUID_TAG elif table_name == 'vdis': uuid_tag = VDI_UUID_TAG else: raise Exception('Incorrect table name') table = self.db.table(table_name) try: table.remove(where(uuid_tag) == uuid) self.__updated = True except Exception as e: log.error( "%s: xcpng.meta.MetadataHandler._remove: Failed to remove metadata" % dbg) raise Exception(e)
def login(dbg, target, iqn, usechap=False, username=None, password=None): cmd = ["/usr/sbin/iscsiadm", "-m", "discovery", "-t", "st", "-p", target] output = call(dbg, cmd).split('\n')[0] # FIXME: only take the first one returned. # This might not always be the one we want. log.debug("%s: output = %s" % (dbg, output)) portal = output.split(' ')[0] # FIXME: error handling # Provide authentication details if necessary if usechap: cmd = [ "/usr/sbin/iscsiadm", "-m", "node", "-T", iqn, "--portal", portal, "--op", "update", "-n", "node.session.auth.authmethod", "-v", "CHAP" ] output = call(dbg, cmd) log.debug("%s: output = %s" % (dbg, output)) cmd = [ "/usr/sbin/iscsiadm", "-m", "node", "-T", iqn, "--portal", portal, "--op", "update", "-n", "node.session.auth.username", "-v", username ] output = call(dbg, cmd) log.debug("%s: output = %s" % (dbg, output)) cmd = [ "/usr/sbin/iscsiadm", "-m", "node", "-T", iqn, "--portal", portal, "--op", "update", "-n", "node.session.auth.password", "-v", password ] output = call(dbg, cmd) log.debug("%s: output = %s" % (dbg, output)) # Log in cmd = [ "/usr/sbin/iscsiadm", "-m", "node", "-T", iqn, "--portal", portal, "-l" ] output = call(dbg, cmd) log.debug("%s: output = %s" % (dbg, output))
def _create(self, dbg, sr, name, description, size, sharable, image_meta): log.debug( "%s: xcpng.volume.RAWVolume._create: SR: %s Name: %s Description: %s Size: %s" % (dbg, sr, name, description, size)) uri = image_meta[URI_TAG][0] try: self.VolOpsHendler.create(dbg, uri, self._get_full_vol_size(dbg, size)) except Exception as e: log.error( "%s: xcpng.volume.RAWVolume._create: Failed to create volume: key %s: SR: %s" % (dbg, image_meta[VDI_UUID_TAG], sr)) log.error(traceback.format_exc()) try: self.VolOpsHendler.destroy(dbg, uri) except: pass raise Exception(e) return image_meta
def commit(self, dbg, top, base): log.debug( "%s: xcpng.qemudisk.Qemudisk.commit: vdi_uuid %s pid %d qmp_sock %s" % (dbg, self.vdi_uuid, self.pid, self.qmp_sock)) _qmp_ = qmp.QEMUMonitorProtocol(self.qmp_sock) try: _qmp_.connect() # Commit args = { "job-id": "commit-{}".format(self.vdi_uuid), "device": LEAF_NODE_NAME, "top": top, "base": base } _qmp_.command('block-commit', **args) for i in range(50): res = _qmp_.command(dbg, "query-block-jobs") if len(res) == 0: if self.img_uri == top: args = {"device": "commit-{}".format(self.vdi_uuid)} _qmp_.command('block-job-complete', **args) else: break time.sleep(0.1) _qmp_.close() except Exception as e: log.error( "%s: xcpng.qemudisk.Qemudisk.commit: Failed to commit changes for image in qemu_dp instance: " "uuid: %s pid %s" % (dbg, self.vdi_uuid, self.pid)) log.error(traceback.format_exc()) try: _qmp_.close() except: pass raise Exception(e)
def resume(self, dbg): log.debug( "%s: xcpng.qemudisk.Qemudisk.resume: vdi_uuid %s pid %d qmp_sock %s" % (dbg, self.vdi_uuid, self.pid, self.qmp_sock)) _qmp_ = qmp.QEMUMonitorProtocol(self.qmp_sock) try: _qmp_.connect() # Resume IO on blockdev args = {"device": LEAF_NODE_NAME} _qmp_.command("x-blockdev-resume", **args) except Exception as e: log.error( "%s: xcpng.qemudisk.Qemudisk.resume: Failed to resume IO for image in qemu_dp instance: " "uuid: %s pid %s" % (dbg, self.vdi_uuid, self.pid)) log.error(traceback.format_exc()) try: _qmp_.close() except: pass raise Exception(e)
def create(self, dbg, uri, configuration): log.debug( "%s: xcpng.librbd.sr.SROperations.create: uri: %s configuration %s" % (dbg, uri, configuration)) if CEPH_CLUSTER_TAG not in configuration: raise Exception( 'Failed to connect to CEPH cluster. Parameter \'cluster\' is not specified' ) cluster = ceph_cluster(dbg, configuration[CEPH_CLUSTER_TAG]) try: cluster.connect() cluster.create_pool(get_sr_name_by_uri(dbg, uri)) except Exception as e: log.debug( "%s: xcpng.librbd.sr.SROperations.create: uri: Failed to create SR: uri: %s" % dbg, uri) raise Exception(e) finally: cluster.shutdown()
def activate(cls, dbg, uri, domain, cb): this_host_label = cb.get_current_host() sr, key = cls.parse_uri(uri) with VolumeContext(cb, sr, 'w') as opq: with Lock(opq, 'gl', cb): with cb.db_context(opq) as db: vdi = db.get_vdi_by_id(key) # Raise Storage Error VDIInUse - 24 if vdi.active_on: raise util.create_storage_error( "SR_BACKEND_FAILURE_24", ["VDIInUse", "The VDI is currently in use"]) vol_path = cb.volumeGetPath(opq, str(vdi.volume.id)) img = cls._get_image_from_vdi(vdi, vol_path) if not vdi.sharable: db.update_vdi_active_on(vdi.uuid, this_host_label) try: cls.activate_internal(dbg, opq, vdi, img, cb) except: log.debug('{}: activate_internal failed'.format(dbg)) raise
def sr_import(self, dbg, uri, configuration): log.debug( "%s: xcpng.librbd.sr.SROperations.sr_import: uri: %s configuration %s" % (dbg, uri, configuration)) cluster = ceph_cluster(dbg, get_cluster_name_by_uri(dbg, uri)) pool_name = get_sr_name_by_uri(dbg, uri) try: cluster.connect() if not cluster.pool_exists(pool_name): raise Exception("CEPH pool %s doesn\'t exist" % pool_name) except Exception as e: log.debug( "%s: xcpng.librbd.sr.SROperations.sr_import: uri: Failed to import SR: uri: %s" % dbg, uri) log.error(traceback.format_exc()) raise Exception(e) finally: cluster.shutdown() mkdir_p("%s/%s" % (SR_PATH_PREFIX, get_sr_uuid_by_uri(dbg, uri)))
def activate(self, dbg, uri, domain): log.debug("%s: xcpng.datapath.Datapath.activate: uri: %s domain: %s" % (dbg, uri, domain)) # TODO: Check that VDI is not active on other host try: self._activate(dbg, uri, domain) image_meta = {ACTIVE_ON_TAG: get_current_host_uuid()} self.MetadataHandler.update_vdi_meta(dbg, uri, image_meta) except Exception as e: log.error( "%s: xcpng.datapath.Datapath.activate: Failed to activate datapath for volume: uri: %s" % (dbg, uri)) log.error(traceback.format_exc()) try: self._deactivate(dbg, uri, domain) except: pass raise Exception(e)
def _check_clone(vdi, db, callbacks, image_utils, is_snapshot): if vdi.sharable: # TODO: Report storage error raise NotImplementedError("Sharable VDIs cannot be cloned") if db.get_vdi_chain_height( vdi.uuid) >= (image_utils.get_max_chain_height()): raise util.create_storage_error( "SR_BACKEND_FAILURE_109", ["The snapshot chain is too long", ""]) if vdi.active_on: if not is_snapshot: raise util.create_storage_error( 'SR_BACKEND_FAILURE_24', ['The VDI is currently in use', '']) current_host = callbacks.get_current_host() if vdi.active_on != current_host: log.debug("{} can not snapshot a vdi already" " active on {}".format(current_host, vdi.active_on)) raise xapi.storage.api.v5.volume.Activated_on_another_host( vdi.active_on)
def close(self, dbg): log.debug( "%s: xcpng.qemudisk.Qemudisk.close: vdi_uuid %s pid %d qmp_sock %s" % (dbg, self.vdi_uuid, self.pid, self.qmp_sock)) _qmp_ = qmp.QEMUMonitorProtocol(self.qmp_sock) try: _qmp_.connect() if platform.linux_distribution()[1] == '7.5.0': try: path = "{}/{}".format(utils.VAR_RUN_PREFIX, self.vdi_uuid) with open(path, 'r') as f: line = f.readline().strip() utils.call(dbg, ["/usr/bin/xenstore-write", line, "5"]) os.unlink(path) except Exception: log.debug( "%s: xcpng.qemudisk.Qemudisk.close: There was no xenstore setup" % dbg) elif platform.linux_distribution()[1] == '7.6.0' or \ platform.linux_distribution()[1] == '8.0.0' or \ platform.linux_distribution()[1] == '8.1.0' or \ platform.linux_distribution()[1] == '8.2.0' or \ platform.linux_distribution()[1] == '8.2.1': path = "{}/{}".format(utils.VAR_RUN_PREFIX, self.vdi_uuid) try: with open(path, 'r') as f: line = f.readline().strip() os.unlink(path) args = { 'type': 'qdisk', 'domid': int(re.search('domain/(\d+)/', line).group(1)), 'devid': int(re.search('vbd/(\d+)/', line).group(1)) } _qmp_.command(dbg, "xen-unwatch-device", **args) except Exception: log.debug( "%s: xcpng.qemudisk.Qemudisk.close: There was no xenstore setup" % dbg) # Stop the NBD server _qmp_.command("nbd-server-stop") # Remove the block device args = {"node-name": LEAF_NODE_NAME} _qmp_.command("blockdev-del", **args) except Exception as e: log.error( "%s: xcpng.qemudisk.Qemudisk.close: Failed to close image in qemu_dp instance: uuid: %s pid %s" % (dbg, self.vdi_uuid, self.pid)) log.error(traceback.format_exc()) try: _qmp_.close() except: pass raise Exception(e)
def sr_import(self, dbg, uri, configuration): log.debug( "%s: xcpng.libsbd.sr.SROperations.sr_import: uri: %s configuration %s" % (dbg, uri, configuration)) if 'bindnetaddr' not in configuration: raise Exception( 'Failed to connect to Sheepdog cluster. Parameter \'bindnetaddr\' is not specified' ) elif 'mcastaddr' not in configuration: raise Exception( 'Failed to connect to Sheepdog cluster. Parameter \'mcastaddr\' is not specified' ) elif 'mcastport' not in configuration: raise Exception( 'Failed to connect to Sheepdog cluster. Parameter \'mcastport\' is not specified' ) sr_uuid = get_sr_uuid_by_uri(dbg, uri) create_chroot(dbg, sr_uuid) set_chroot(dbg, sr_uuid) write_corosync_conf( dbg, sr_uuid, gen_corosync_conf(dbg, configuration['bindnetaddr'], configuration['mcastaddr'], configuration['mcastport'])) start_sheepdog_gateway(dbg, get_sheep_port(dbg, sr_uuid), sr_uuid) mkdir_p("%s/%s" % (SR_PATH_PREFIX, get_sr_uuid_by_uri(dbg, uri))) call(dbg, [ 'ln', '-s', "%s/%s/var/lib/sheepdog/sock" % (CHROOT_BASE, get_sr_uuid_by_uri(dbg, uri)), "%s/%s/sock" % (SR_PATH_PREFIX, get_sr_uuid_by_uri(dbg, uri)) ])
def lock(self, dbg, uri, timeout=10): log.debug("%s: xcpng.libsbd.meta.MetaDBOpeations.lock: uri: %s" % (dbg, uri)) start_time = time() while True: try: dog_vdi_setattr(dbg, get_sheep_port(dbg, get_sr_uuid_by_uri(dbg, uri)), '__meta__', 'locked', 'locked', exclusive=True) break except Exception as e: if time() - start_time >= timeout: log.debug( "%s: xcpng.libsbd.meta.MetaDBOpeations.lock: Failed to lock MetaDB for uri: %s" % (dbg, uri)) raise Exception(e) pass
def _detach(self, dbg, uri, domain): log.debug( "%s: xcpng.datapath.QdiskDatapath._detach: uri: %s domain: %s" % (dbg, uri, domain)) try: qemu_dp = self._load_qemu_dp(dbg, uri, domain) qemu_dp.quit(dbg) volume_meta = { QEMU_PID_TAG: None, QEMU_QMP_SOCK_TAG: None, QEMU_NBD_SOCK_TAG: None, QEMU_QMP_LOG_TAG: None, QEMU_IMAGE_URI_TAG: None } self.MetadataHandler.update_vdi_meta(dbg, uri, volume_meta) except Exception as e: log.error( "%s: xcpng.datapath.QdiskDatapath._detach: Failed to detach datapath for volume: uri: %s" % (dbg, uri)) raise Exception(e)
def rbd_clone(dbg, cluster, parent_pool, parent, snapshot, clone_pool, clone): log.debug( "%s: xcpng.librbd.rbd_utils.rbd_clone: Cluster ID: %s Parent Pool: %s Parent: %s Snapshot: %s Clone Pool: %s Clone: %s" % (dbg, cluster.get_fsid(), parent_pool, parent, snapshot, clone_pool, clone)) p_ioctx = cluster.open_ioctx(parent_pool) p_image = Image(p_ioctx, parent) c_ioctx = cluster.open_ioctx(clone_pool) rbd_inst = RBD() try: if not p_image.is_protected_snap(snapshot): p_image.protect_snap(snapshot) rbd_inst.clone(p_ioctx, parent, snapshot, c_ioctx, clone) except Exception as e: log.error( "%s: xcpng.librbd.rbd_utils.rbd_clone: Failed to make a clone: Cluster ID: %s Parent Pool: %s Parent: %s Snapshot: %s Clone Pool: %s Clone: %s" % (dbg, cluster.get_fsid(), parent_pool, parent, snapshot, clone_pool, clone)) raise Exception(e) finally: p_ioctx.close() c_ioctx.close()
def create(self, dbg, uri, db, size=8388608): log.debug("%s: xcpng.librbd.meta.MetaDBOpeations.create: uri: %s" % (dbg, uri)) cluster = ceph_cluster(dbg, get_cluster_name_by_uri(dbg, uri)) try: cluster.connect() rbd_create(dbg, cluster, get_sr_name_by_uri(dbg, uri), '__meta__', size) # default size = 8388608 = 8Mb rbd_create(dbg, cluster, get_sr_name_by_uri(dbg, uri), '__lock__', 0) length = len(db) rbd_write(dbg, cluster, get_sr_name_by_uri(dbg, uri), '__meta__', pack("!I%ss" % length, length, db), 0, length + 4) except Exception as e: log.error( "%s: xcpng.librbd.meta.MetaDBOpeations.create: Failed to create MetaDB: uri: %s" % (dbg, uri)) log.error(traceback.format_exc()) raise Exception(e) finally: cluster.shutdown()
def get_sheep_port(dbg, name): log.debug("%s: sbd_utils.get_sheep_port: name: %s " % (dbg, name)) SRS_MAX = 32 port_found = False if not path.isfile(SHEEP_PORTS_DB): ports = [None] * SRS_MAX port = 0 else: with open(SHEEP_PORTS_DB, 'rb') as fd: ports = pickle.load(fd) fd.close() port = None for _index_, _name_ in enumerate(ports): if _name_ is not None: if _name_ == name: port = _index_ port_found = True break if port is None: for _index_, _name_ in enumerate(ports): if _name_ is None: port = _index_ ports[port] = name break if port is None: raise Exception('Failed to get/allocate port for sheep daemon') if not port_found: with open(SHEEP_PORTS_DB, 'wb') as fd: pickle.dump(ports, fd) fd.close() return START_SHEEP_PORT + port
def _update(dbg, uri, image_meta, use_image_prefix=True): log.debug( "%s: meta.RBDMetadataHandler._update_meta: uri: %s image_meta: %s" % (dbg, uri, image_meta)) ceph_cluster = ceph_utils.connect(dbg, uri) if use_image_prefix: image_name = "%s%s/%s%s" % ( utils.RBDPOOL_PREFIX, utils.get_sr_uuid_by_uri( dbg, uri), utils.VDI_PREFIXES[utils.get_vdi_type_by_uri( dbg, uri)], utils.get_vdi_uuid_by_uri(dbg, uri)) else: image_name = "%s%s/%s" % (utils.RBDPOOL_PREFIX, utils.get_sr_uuid_by_uri(dbg, uri), utils.get_vdi_uuid_by_uri(dbg, uri)) try: rbd_utils.updateImageMetadata(dbg, ceph_cluster, image_name, image_meta) except Exception: raise Volume_does_not_exist(uri) finally: ceph_utils.disconnect(dbg, ceph_cluster)
def __init__(self, dbg, sr_uuid, vdi_uuid, vdi_type, img_qemu_uri, pid, qmp_sock, nbd_sock, qmp_log): log.debug("%s: xcpng.qemudisk.Qemudisk.__init__: sr_uuid: %s vdi_uuid: %s vdi_type: %s image_uri: %s pid: %d " "qmp_sock: %s nbd_sock: %s qmp_log: %s" % (dbg, sr_uuid, vdi_uuid, vdi_type, img_qemu_uri, pid, qmp_sock, nbd_sock, qmp_log)) self.vdi_uuid = vdi_uuid self.sr_uuid = sr_uuid self.vdi_type = vdi_type self.pid = pid self.qmp_sock = qmp_sock self.nbd_sock = nbd_sock self.qmp_log = qmp_log self.img_uri = img_qemu_uri self.params = 'nbd:unix:%s' % self.nbd_sock qemu_params = '%s:%s:%s' % (self.vdi_uuid, LEAF_NODE_NAME, self.qmp_sock) self.params = "hack|%s|%s" % (self.params, qemu_params) self.open_args = {'driver': self.vdi_type, 'cache': {'direct': True, 'no-flush': True}, # 'discard': 'unmap', 'file': self._parse_image_uri(dbg), 'node-name': LEAF_NODE_NAME}