def call(dbg, cmd_args, error=True, simple=True, exp_rc=0): """[call dbg cmd_args] executes [cmd_args] if [error] and exit code != exp_rc, log and throws a BackendError if [simple], returns only stdout """ log.debug("{}: Running cmd {}".format(dbg, cmd_args)) p = subprocess.Popen( cmd_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True ) stdout, stderr = p.communicate() if error and p.returncode != exp_rc: log.error( "{}: {} exitted with code {}: {}".format( dbg, ' '.join(cmd_args), p.returncode, stderr ) ) # TODO: FIXME: Remove dependency on Xapi. #raise xapi.InternalError("%s exitted with non-zero code %d: %s" # % (" ".join(cmd_args), p.returncode, stderr)) if simple: return stdout return stdout, stderr, p.returncode
def relink(self, dbg, top, base): log.debug( "%s: xcpng.qemudisk.Qemudisk.relink: vdi_uuid %s pid %d qmp_sock %s" % (dbg, self.vdi_uuid, self.pid, self.qmp_sock)) _qmp_ = qmp.QEMUMonitorProtocol(self.qmp_sock) try: _qmp_.connect() # Commit args = { "job-id": "relink-{}".format(self.vdi_uuid), "device": LEAF_NODE_NAME, "top": top, "base": base, "backing-file": base } _qmp_.command('relink-chain', **args) for i in range(50): res = _qmp_.command(dbg, "query-block-jobs") if len(res) == 0: break time.sleep(0.1) _qmp_.close() except Exception as e: log.error( "%s: xcpng.qemudisk.Qemudisk.relink: Failed to relink chain for image in qemu_dp instance: " "uuid: %s pid %s" % (dbg, self.vdi_uuid, self.pid)) try: _qmp_.close() except: pass raise Exception(e)
def run_coalesce(sr_type, uri): """ GC/Coalesce main loop """ util.daemonize() callbacks = util.get_sr_callbacks(sr_type) this_host = callbacks.get_current_host() while True: done_work = False try: remove_garbage_volumes(uri, callbacks) recover_journal(uri, this_host, callbacks) child, parent = _find_best_non_leaf_coalesceable(uri, callbacks) if (child, parent) != (None, None): non_leaf_coalesce(child, parent, uri, callbacks) done_work = True elif _find_best_leaf_coalesceable(this_host, uri, callbacks): done_work = True # If we did no work then delay by some time if not done_work: time.sleep(30) else: time.sleep(10) except Exception: import traceback log.error("Exception in GC main loop {}, {}".format( sys.exc_info(), traceback.format_exc())) raise
def epc_close(dbg, uri, cb): log.debug("{}: Datapath.epc_close: uri == {}".format(dbg, uri)) sr, key = _parse_uri(uri) opq = cb.volumeStartOperations(sr, 'w') meta_path = cb.volumeMetadataGetPath(opq) db = VHDMetabase(meta_path) try: with Lock(opq, 'gl', cb): with db.write_context(): vdi = db.get_vdi_by_id(key) vol_path = cb.volumeGetPath(opq, str(vdi.vhd.id)) if vdi.nonpersistent: # truncate VHDUtil.reset(dbg, vol_path) db.update_vdi_nonpersistent(vdi.uuid, None) except: log.error( ("{}: Datapath.epc_close: failed to complete " "close, {}").format(dbg, sys.exc_info()[1]) ) raise finally: db.close() return None
def unmap_vol(self, dbg, uri, chained=False): path = self.gen_vol_path(dbg, uri) if exists(path): log.debug( "%s: xcpng.datapath.DatapathOperations.unmap_vol: uri: %s" % (dbg, uri)) try: volume_meta = self.MetadataHandler.get_vdi_meta(dbg, uri) if REF_COUNT_TAG in volume_meta: new_meta = {} if volume_meta[REF_COUNT_TAG] == 1: new_meta[REF_COUNT_TAG] = None call(dbg, ['unlink', path]) else: new_meta[ REF_COUNT_TAG] = volume_meta[REF_COUNT_TAG] - 1 self.MetadataHandler.update_vdi_meta(dbg, uri, new_meta) if chained: if PARENT_URI_TAG in volume_meta: self.unmap_vol(dbg, volume_meta[PARENT_URI_TAG][0], chained) except Exception as e: log.error( "%s: xcpng.datapath.DatapathOperations.unmap_vol: Failed to unmap volume: uri: %s" % (dbg, uri)) raise Exception(e)
def create(self, dbg, uri): log.debug("%s: xcpng.meta.MetadataHandler.create: uri: %s " % (dbg, uri)) try: self.MetaDBOpsHandler.create(dbg, uri) except Exception as e: log.error("%s: xcpng.meta.MetadataHandler.create: Failed to create metadata database: uri: %s " % (dbg, uri)) raise Exception(e)
def resize(dbg, sr, key, new_size, cb): size_mib, vsize = _get_size_mib_and_vsize(new_size) with VolumeContext(cb, sr, 'w') as opq: image_format = None with cb.db_context(opq) as db: vdi = db.get_vdi_by_id(key) image_format = ImageFormat.get_format(vdi.image_type) if vdi.sharable: # TODO: Report Storage error here. raise NotImplementedError( "Sharable VDIs cannot be resized") if new_size < vdi.volume.vsize: log.error("Volume cannot be shrunk from {} to {}".format( vdi.volume.vsize, new_size)) raise util.create_storage_error( "SR_BACKEND_FAILURE_79", ["VDI Invalid size", "shrinking not allowed"]) db.update_volume_vsize(vdi.volume.id, None) with cb.db_context(opq) as db: cb.volumeResize(opq, str(vdi.volume.id), vsize) vol_path = cb.volumeGetPath(opq, str(vdi.volume.id)) if (util.is_block_device(vol_path)): raise util.create_storage_error( 'SR_BACKEND_FAILURE_110', ['Cannot resize block device', '']) image_format.image_utils.resize(dbg, vol_path, size_mib) db.update_volume_vsize(vdi.volume.id, vsize)
def map_vol(self, dbg, uri, chained=False): if self.blkdev: log.debug( "%s: xcpng.datapath.DatapathOperations.map_vol: uri: %s" % (dbg, uri)) _blkdev_ = self.blkdev try: volume_meta = self.MetadataHandler.get_vdi_meta(dbg, uri) if chained is True: if PARENT_URI_TAG in volume_meta: self.map_vol(dbg, volume_meta[PARENT_URI_TAG][0], chained) if REF_COUNT_TAG in volume_meta: new_meta = {} new_meta[REF_COUNT_TAG] = volume_meta[REF_COUNT_TAG] + 1 self.MetadataHandler.update_vdi_meta(dbg, uri, new_meta) else: new_meta = {} new_meta[REF_COUNT_TAG] = 1 call(dbg, ['ln', '-s', _blkdev_, self.gen_vol_path(dbg, uri)]) self.MetadataHandler.update_vdi_meta(dbg, uri, new_meta) except Exception as e: log.error( "%s: xcpng.datapath.DatapathOperations.map_vol: Failed to map volume: uri: %s device: %s" % (dbg, uri, _blkdev_)) raise Exception(e)
def destroy(self, dbg, uri): log.debug("%s: xcpng.meta.MetadataHandler.destroy: uri: %s " % (dbg, uri)) try: self.MetaDBOpsHandler.destroy(dbg, uri) except Exception as e: log.error("%s: xcpng.meta.MetadataHandler.destroy: Failed to destroy metadata database: uri: %s " % (dbg, uri)) raise Exception(e)
def _create(self, dbg, sr, name, description, size, sharable, image_meta): log.debug("%s: xcpng.volume.QCOW2Volume._create: SR: %s Name: %s Description: %s Size: %s" % (dbg, sr, name, description, size)) uri = image_meta[URI_TAG][0] datapath = get_vdi_datapath_by_uri(dbg, uri) try: super(QCOW2Volume, self)._create(dbg, sr, name, description, size, sharable, image_meta) self.Datapathes[datapath].DatapathOpsHandler.map_vol(dbg, uri) call(dbg, ["/usr/lib64/qemu-dp/bin/qemu-img", "create", "-f", image_meta[TYPE_TAG], self.Datapathes[datapath].DatapathOpsHandler.gen_vol_path(dbg, uri), str(size)]) self.Datapathes[datapath].DatapathOpsHandler.unmap_vol(dbg, uri) return image_meta except Exception as e: log.error("%s: xcpng.volume.QCOW2Volume._create: Failed to create volume: key %s: SR: %s" % (dbg, image_meta[VDI_UUID_TAG], sr)) try: self.Datapathes[datapath].DatapathOpsHandler.unmap_vol(dbg, uri) except: pass raise Exception(e)
def __get_meta(self, dbg, uuid, table_name): log.debug( "%s: xcpng.meta.MetadataHandler.__get_meta: uuid: %s table_name: %s" % (dbg, uuid, table_name)) if table_name == 'sr': uuid_tag = SR_UUID_TAG elif table_name == 'vdis': uuid_tag = VDI_UUID_TAG else: raise Exception('Incorrect table name') table = self.db.table(table_name) try: if uuid_tag == SR_UUID_TAG and uuid == '12345678-1234-1234-1234-123456789012': meta = table.all()[0] else: meta = table.search(where(uuid_tag) == uuid)[0] return meta except Exception as e: log.error( "%s: xcpng.meta.MetadataHandler.__get_meta: Failed to get metadata" % dbg) log.error(traceback.format_exc()) raise Exception(e)
def unlock(self, dbg, uri): log.debug("%s: xcpng.meta.MetadataHandler.unlock: uri: %s " % (dbg, uri)) try: self.MetaDBOpsHandler.unlock(dbg, uri) except Exception as e: log.error("%s: xcpng.meta.MetadataHandler.unlock: Failed to unlock metadata DB" % dbg) raise Exception(e)
def load(self, dbg, uri): log.debug("%s: xcpng.libsbd.meta.MetaDBOpeations.load: uri: %s" % (dbg, uri)) cluster = ceph_cluster(dbg, get_cluster_name_by_uri(dbg, uri)) try: cluster.connect() length = unpack( '!I', rbd_read(dbg, cluster, get_sr_name_by_uri(dbg, uri), '__meta__', 0, 4))[0] data = unpack( '!%ss' % length, rbd_read(dbg, cluster, get_sr_name_by_uri(dbg, uri), '__meta__', 4, length))[0] return data except Exception as e: log.error( "%s: xcpng.librbd.meta.MetaDBOpeations.load: Failed to load MetaDB: uri: %s" % (dbg, uri)) log.error(traceback.format_exc()) raise Exception(e) finally: cluster.shutdown()
def __update(self, dbg, uuid, table_name, meta): log.debug("%s: xcpng.meta.MetadataHandler.__update: uuid: %s table_name: %s meta: %s" % (dbg, uuid, table_name, meta)) if table_name == 'sr': uuid_tag = SR_UUID_TAG elif table_name == 'vdis': uuid_tag = VDI_UUID_TAG else: raise Exception('Incorrect table name') table = self.db.table(table_name) try: if table.contains(Query()[uuid_tag] == uuid): for tag, value in meta.iteritems(): if value is None: log.debug("%s: xcpng.meta.MetadataHandler.__update: tag: %s remove value" % (dbg, tag)) table.update(delete(tag), Query()[uuid_tag] == uuid) else: log.debug("%s: xcpng.meta.MetadataHandler.__update: tag: %s set value: %s" % (dbg, tag, value)) table.update({tag: value}, Query()[uuid_tag] == uuid) else: table.insert(meta) self.__updated = True except Exception as e: log.error("%s: xcpng.meta.MetadataHandler._update: Failed to update metadata" % dbg) raise Exception(e)
def find_coalesceable_pairs(self, dbg, sr): log.debug("%s: xcpng.meta.MetadataHandler.find_coalesceable pairs: sr: %s" % (dbg, sr)) if self.__loaded is False: self.__load(dbg, sr) pairs = [] table = self.db.table('vdis') try: roots = table.search(~ (where(PARENT_URI_TAG).exists())) while len(roots) != 0: _roots_ = [] for root in roots: children = table.search(where(PARENT_URI_TAG) == root[KEY_TAG]) if len(children) == 1: pairs.append((root, children[0])) elif len(children) > 1: _roots_.extend(children) roots = _roots_ return pairs except Exception as e: log.error("%s: xcpng.meta.MetadataHandler.find_coalesceable_pairs: Failed to find " "coalesceable pairs for sr: %s " % (dbg, sr)) raise Exception(e)
def _resize(self, dbg, sr, key, new_size): log.debug("%s: xcpng.volume.QCOW2Volume._resize: SR: %s Key: %s New_size: %s" % (dbg, sr, key, new_size)) uri = "%s/%s" % (sr, key) datapath = get_vdi_datapath_by_uri(dbg, uri) try: super(QCOW2Volume, self)._resize(dbg, sr, key, new_size) self.Datapathes[datapath].DatapathOpsHandler.map_vol(dbg, uri) call(dbg, ["/usr/lib64/qemu-dp/bin/qemu-img", "resize", self.Datapathes[datapath].DatapathOpsHandler.gen_vol_path(dbg, uri), str(new_size)]) self.Datapathes[datapath].DatapathOpsHandler.unmap_vol(dbg, uri) except Exception as e: log.error("%s: xcpng.volume.QCOW2Volume._resize: Failed to resize volume: key %s: SR: %s" % (dbg, key, sr)) try: self.Datapathes[datapath].DatapathOpsHandler.unmap_vol(dbg, uri) except: pass raise Exception(e)
def open(self, dbg): log.debug("%s: xcpng.qemudisk.Qemudisk.open: vdi_uuid %s pid %d qmp_sock %s" % (dbg, self.vdi_uuid, self.pid, self.qmp_sock)) log.debug("%s: xcpng.qemudisk.Qemudisk.open: args: %s" % (dbg, self.open_args)) _qmp_ = qmp.QEMUMonitorProtocol(self.qmp_sock) try: _qmp_.connect() _qmp_.command("blockdev-add", **self.open_args) # Start an NBD server exposing this blockdev _qmp_.command("nbd-server-start", addr={'type': 'unix', 'data': {'path': self.nbd_sock}}) _qmp_.command("nbd-server-add", device=LEAF_NODE_NAME, writable=True) log.debug("%s: xcpng.qemudisk.Qemudisk.open: Image opened: %s" % (dbg, self.open_args)) except Exception as e: log.error("%s: xcpng.qemudisk.Qemudisk.open: Failed to open image in qemu_dp instance: uuid: %s pid %s" % (dbg, self.vdi_uuid, self.pid)) try: _qmp_.close() except: pass raise Exception(e)
def attach(self, dbg, configuration): log.debug("%s: xcpng.sr.SR.attach: configuration: %s" % (dbg, configuration)) if IMAGE_FORMAT_TAG in configuration: uri = "%s+%s" % (self.sr_type, configuration[IMAGE_FORMAT_TAG]) if DATAPATH_TAG in configuration: uri = "%s+%s://" % (uri, configuration[DATAPATH_TAG]) else: uri = "%s://" % self.sr_type uri = self.SROpsHendler.extend_uri(dbg, uri, configuration) uri = "%s/%s" % (uri, configuration[SR_UUID_TAG] ) if SR_UUID_TAG in configuration else uri log.debug("%s: xcpng.sr.SR.attach: uri: %s" % (dbg, uri)) configuration['mountpoint'] = "%s/%s" % (SR_PATH_PREFIX, get_sr_uuid_by_uri(dbg, uri)) try: call(dbg, ['mkdir', '-p', configuration['mountpoint']]) self.SROpsHendler.sr_import(dbg, uri, configuration) except Exception as e: log.error( "%s: xcpng.sr.SR.attach: Failed to attach SR - sr_uuid: %s" % (dbg, get_sr_uuid_by_uri(dbg, uri))) try: self.SROpsHendler.sr_export(dbg, uri) call(dbg, ['rm', '-rf', configuration['mountpoint']]) except: pass raise Exception(e) return uri
def create(dbg, qemudisk, uri, img_qemu_uri): log.debug("%s: xcpng.qemudisk.create: uri: %s " % (dbg, uri)) vdi_uuid = utils.get_vdi_uuid_by_uri(dbg, uri) sr_uuid = utils.get_sr_uuid_by_uri(dbg, uri) vdi_type = utils.get_vdi_type_by_uri(dbg, uri) if vdi_type not in IMAGE_TYPES: raise Exception('Incorrect VDI type') utils.mkdir_p(QEMU_DP_SOCKET_DIR, 0o0700) nbd_sock = QEMU_DP_SOCKET_DIR + "/qemu-nbd.{}".format(vdi_uuid) qmp_sock = QEMU_DP_SOCKET_DIR + "/qmp_sock.{}".format(vdi_uuid) qmp_log = QEMU_DP_SOCKET_DIR + "/qmp_log.{}".format(vdi_uuid) log.debug("%s: xcpng.qemudisk.create: Spawning qemu process for VDI %s with qmp socket at %s" % (dbg, vdi_uuid, qmp_sock)) cmd = [QEMU_DP, qmp_sock] try: log_fd = open(qmp_log, 'w+') p = subprocess.Popen(cmd, stdout=log_fd, stderr=log_fd) except Exception as e: log.error("%s: xcpng.qemudisk.create: Failed to create qemu_dp instance: uri %s" % (dbg, uri)) try: log_fd.close() except: pass raise Exception(e) log.debug("%s: xcpng.qemudisk.create: New qemu process has pid %d" % (dbg, p.pid)) return qemudisk(dbg, sr_uuid, vdi_uuid, vdi_type, img_qemu_uri, p.pid, qmp_sock, nbd_sock, qmp_log)
def clone(self, dbg, sr, key, mode): log.debug("%s: xcpng.volume.Volume.clone: SR: %s Key: %s Mode: %s" % (dbg, sr, key, mode)) orig_uri = "%s/%s" % (sr, key) try: orig_meta = self.MetadataHandler.get_vdi_meta(dbg, orig_uri) if SNAPSHOT_OF_TAG in orig_meta[CUSTOM_KEYS_TAG]: base_uri = orig_meta[PARENT_URI_TAG][0] base_meta = self.MetadataHandler.get_vdi_meta(dbg, base_uri) else: base_meta = deepcopy(orig_meta) if ACTIVE_ON_TAG in base_meta: current_host = get_current_host_uuid() if base_meta[ACTIVE_ON_TAG] != current_host: log.debug("%s: librbd.Volume.clone: SR: %s Key: %s Can not snapshot on %s as VDI already active on %s" % (dbg, sr, base_meta[VDI_UUID_TAG], current_host, base_meta[ACTIVE_ON_TAG])) raise Activated_on_another_host(base_meta[ACTIVE_ON_TAG]) return self._clone(dbg, sr, key, mode, base_meta) except Exception as e: log.error("%s: xcpng.volume.Volume.set: Failed to clone volume: key %s: SR: %s" % (dbg, key, sr)) raise Exception(e)
def snap(self, dbg, snap_uri): log.debug("%s: xcpng.qemudisk.Qemudisk.snap: vdi_uuid %s pid %d qmp_sock %s snap_uri %s" % (dbg, self.vdi_uuid, self.pid, self.qmp_sock, snap_uri)) if self.vdi_type != 'qcow2': raise Exception('Incorrect VDI type') _qmp_ = qmp.QEMUMonitorProtocol(self.qmp_sock) try: _qmp_.connect() args = {'driver': 'qcow2', 'cache': {'direct': True, 'no-flush': True}, # 'discard': 'unmap', 'file': self._parse_image_uri(dbg), 'node-name': SNAP_NODE_NAME, 'backing': ''} _qmp_.command('blockdev-add', **args) args = {'node': LEAF_NODE_NAME, 'overlay': SNAP_NODE_NAME} _qmp_.command('blockdev-snapshot', **args) _qmp_.close() except Exception as e: log.error("%s: xcpng.qemudisk.Qemudisk.snap: Failed to set backing file for image in qemu_dp instance: " "uuid: %s pid %s" % (dbg, self.vdi_uuid, self.pid)) try: _qmp_.close() except: pass raise Exception(e)
def attach(self, dbg, uri, domain): log.debug("%s: xcpng.datapath.Datapath.attach: uri: %s domain: %s" % (dbg, uri, domain)) try: self.DatapathOpsHandler.map_vol(dbg, uri, chained=True) if platform.linux_distribution()[1] == '7.5.0': protocol, params = self._attach(dbg, uri, domain) return { 'domain_uuid': '0', 'implementation': [protocol, params] } elif platform.linux_distribution( )[1] == '7.6.0' or platform.linux_distribution()[1] == '8.0.0': return {'implementations': self._attach(dbg, uri, domain)} except Exception as e: log.error( "%s: xcpng.datapath.Datapath.attach: Failed to attach datapath for volume: uri: %s" % (dbg, uri)) try: self.DatapathOpsHandler.unmap_vol(dbg, uri, chained=True) except: pass raise Exception(e)
def _parse_image_uri(self, dbg): log.debug("%s: xcpng.qemudisk.Qemudisk.parse_image_uri: vdi_uuid %s uri %s" % (dbg, self.vdi_uuid, self.img_uri)) regex = re.compile('^([A-Za-z+]*):(.*)$') result = regex.match(self.img_uri) driver = result.group(1) path = result.group(2) if driver == 'file': # file:/tmp/test.qcow2 file = {'driver': 'file', 'filename': path} elif driver == 'rbd': # rbd:pool/image:conf=/etc/ceph/ceph.conf regex = re.compile('^([A-Za-z0-9+_-]*)/([A-Za-z0-9+_-]*):conf=([A-Za-z0-9/.]*)$') result = regex.match(path) pool = result.group(1) image = result.group(2) conf = result.group(3) file = {'driver': 'rbd', 'pool': pool, 'image': image, 'conf': conf} elif driver == 'sheepdog+unix': # sheepdog+unix:///vdi?socket=socket_path regex = re.compile('^///([A-Za-z0-9_-]*)\\?socket=([A-Za-z0-9/.-]*)$') result = regex.match(path) vdi = result.group(1) socket = result.group(2) file = {'driver': 'sheepdog', 'server': {'type': 'unix', 'path': socket}, 'vdi': vdi} else: log.error("%s: xcpng.qemudisk.Qemudisk.parse_uri: Driver %s is not supported" % (dbg, driver)) raise Exception("Qemu-dp driver %s is not supported" % driver) return file
def close(self, dbg, uri): log.debug("%s: xcpng.datapath.Datapath.close: uri: %s" % (dbg, uri)) try: image_meta = self.MetadataHandler.get_vdi_meta(dbg, uri) if NON_PERSISTENT_TAG in image_meta: vdi_non_persistent = image_meta[NON_PERSISTENT_TAG] else: vdi_non_persistent = False log.debug( "%s: xcpng.Datapath.close: uri: %s will be marked as persistent" % (dbg, uri)) if vdi_non_persistent: # unmark as non-peristent image_meta = { NON_PERSISTENT_TAG: None, } self.MetadataHandler.update_vdi_meta(dbg, uri, image_meta) self._close(dbg, uri) except Exception as e: log.error( "%s: xcpng.datapath.Datapath.close: Failed to close datapath for volume: uri: %s" % (dbg, uri)) raise Exception(e)
def _attach(self, dbg, uri, domain): log.debug( "%s: xcpng.datapath.QdiskDatapath._attach: uri: %s domain: %s" % (dbg, uri, domain)) protocol = 'Qdisk' try: image_qemu_uri = self.DatapathOpsHandler.gen_vol_uri(dbg, uri) qemu_dp = create(dbg, self.qemudisk, uri, image_qemu_uri) volume_meta = { QEMU_PID_TAG: qemu_dp.pid, QEMU_QMP_SOCK_TAG: qemu_dp.qmp_sock, QEMU_NBD_SOCK_TAG: qemu_dp.nbd_sock, QEMU_QMP_LOG_TAG: qemu_dp.qmp_log, QEMU_IMAGE_URI_TAG: image_qemu_uri } self.MetadataHandler.update_vdi_meta(dbg, uri, volume_meta) if platform.linux_distribution()[1] == '7.5.0': return (protocol, qemu_dp.params) elif platform.linux_distribution( )[1] == '7.6.0' or platform.linux_distribution()[1] == '8.0.0': implementations = [[ 'XenDisk', { 'backend_type': 'qdisk', 'params': "vdi:{}".format(qemu_dp.vdi_uuid), 'extra': {} } ], [ 'Nbd', { 'uri': 'nbd:unix:{}:exportname={}'.format( qemu_dp.nbd_sock, LEAF_NODE_NAME) } ]] return (implementations) except Exception as e: log.error( "%s: xcpng.datapath.QdiskDatapath._attach: Failed to attach datapath for volume: uri: %s" % (dbg, uri)) try: qemu_dp.quit(dbg) volume_meta = { QEMU_PID_TAG: None, QEMU_QMP_SOCK_TAG: None, QEMU_NBD_SOCK_TAG: None, QEMU_QMP_LOG_TAG: None, QEMU_IMAGE_URI_TAG: None } self.MetadataHandler.update_vdi_meta(dbg, uri, volume_meta) except: pass raise Exception(e)
def __init__(self, dbg, base_device): self.name = name_of_device(base_device) t = table(base_device) existing = call(dbg, ["dmsetup", "table", self.name]).strip() if existing != t: message = "Device mapper device %s has table %s, expected %s" % (self.name, existing, t) log.error("%s: %s" % (dbg, message)) raise xapi.InternalError(message)
def __dump(self, dbg, uri): log.debug("%s: xcpng.meta.MetadataHandler.__dump: uri: %s " % (dbg, uri)) try: self.MetaDBOpsHandler.dump(dbg, uri, dumps(self.db._storage.read())) self.__updated = False except Exception as e: log.error("%s: xcpng.meta.MetadataHandler.__dump: Failed to dump metadata" % dbg) raise Exception(e)
def close(self, dbg): log.debug( "%s: xcpng.qemudisk.Qemudisk.close: vdi_uuid %s pid %d qmp_sock %s" % (dbg, self.vdi_uuid, self.pid, self.qmp_sock)) _qmp_ = qmp.QEMUMonitorProtocol(self.qmp_sock) try: _qmp_.connect() if platform.linux_distribution()[1] == '7.5.0': try: path = "{}/{}".format(utils.VAR_RUN_PREFIX, self.vdi_uuid) with open(path, 'r') as f: line = f.readline().strip() utils.call(dbg, ["/usr/bin/xenstore-write", line, "5"]) os.unlink(path) except Exception: log.debug( "%s: xcpng.qemudisk.Qemudisk.close: There was no xenstore setup" % dbg) elif platform.linux_distribution()[1] == '7.6.0' or \ platform.linux_distribution()[1] == '8.0.0' or \ platform.linux_distribution()[1] == '8.1.0' or \ platform.linux_distribution()[1] == '8.2.0' or \ platform.linux_distribution()[1] == '8.2.1': path = "{}/{}".format(utils.VAR_RUN_PREFIX, self.vdi_uuid) try: with open(path, 'r') as f: line = f.readline().strip() os.unlink(path) args = { 'type': 'qdisk', 'domid': int(re.search('domain/(\d+)/', line).group(1)), 'devid': int(re.search('vbd/(\d+)/', line).group(1)) } _qmp_.command(dbg, "xen-unwatch-device", **args) except Exception: log.debug( "%s: xcpng.qemudisk.Qemudisk.close: There was no xenstore setup" % dbg) # Stop the NBD server _qmp_.command("nbd-server-stop") # Remove the block device args = {"node-name": LEAF_NODE_NAME} _qmp_.command("blockdev-del", **args) except Exception as e: log.error( "%s: xcpng.qemudisk.Qemudisk.close: Failed to close image in qemu_dp instance: uuid: %s pid %s" % (dbg, self.vdi_uuid, self.pid)) log.error(traceback.format_exc()) try: _qmp_.close() except: pass raise Exception(e)
def _log_exceptions(*args, **kwargs): try: return function(*args, **kwargs) except Rpc_light_failure as e: log.info('Reporting failure {} to caller'.format(e)) raise except Exception: log.error('Exception in xapi.storage.plugin', exc_info=True) raise
def __init__(self, dbg, base_device): self.name = name_of_device(base_device) t = table(base_device) existing = call(dbg, ["dmsetup", "table", self.name]).strip() if existing != t: message = ("Device mapper device %s has table %s, expected %s" % (self.name, existing, t)) log.error("%s: %s" % (dbg, message)) raise xapi.InternalError(message)
def pool_list(dbg, cluster): log.debug("%s: xcpng.librbd.rbd_utils.pool_list: Cluster ID: %s" % (dbg, cluster.get_fsid())) try: pools = cluster.list_pools() return pools except Exception as e: log.error("%s: xcpng.librbd.rbd_utils.get_pool_list: Failed to get list of pools for Cluster ID: %s" % (dbg, cluster.get_fsid())) raise Exception(e)
def create(self, dbg, sr_uuid, configuration, name, description): log.debug( "%s: xcpng.sr.SR.create: sr_uuid %s configuration %s name '%s' description: '%s'" % (dbg, sr_uuid, configuration, name, description)) if IMAGE_FORMAT_TAG in configuration: uri = "%s+%s" % (self.sr_type, configuration[IMAGE_FORMAT_TAG]) if DATAPATH_TAG in configuration: uri = "%s+%s://" % (uri, configuration[DATAPATH_TAG]) else: uri = "%s://" % self.sr_type uri = self.SROpsHendler.extend_uri(dbg, uri, configuration) uri = "%s/%s" % (uri, sr_uuid) log.debug("%s: xcpng.sr.SR.create: uri %s" % (dbg, uri)) configuration['mountpoint'] = "%s/%s" % (SR_PATH_PREFIX, get_sr_uuid_by_uri(dbg, uri)) try: call(dbg, ['mkdir', '-p', configuration['mountpoint']]) self.SROpsHendler.create(dbg, uri, configuration) self.MetadataHandler.create(dbg, uri) configuration['sr_uuid'] = sr_uuid sr_meta = { SR_UUID_TAG: sr_uuid, NAME_TAG: name, DESCRIPTION_TAG: description, #CONFIGURATION_TAG: json.dumps(configuration) CONFIGURATION_TAG: configuration } self.MetadataHandler.update_sr_meta(dbg, uri, sr_meta) self.MetadataHandler.dump(dbg, uri) except Exception as e: log.error( "%s: xcpng.sr.SR.create: Failed to create SR - sr_uuid: %s" % (dbg, sr_uuid)) try: self.SROpsHendler.destroy(dbg, uri) except: pass raise Exception(e) try: self.SROpsHendler.sr_export(dbg, uri) except Exception as e: log.error( "%s: xcpng.sr.SR.create: Created but failed to export SR after creation - sr_uuid: %s" "Please check and export SR manually before attaching the SR" % (dbg, sr_uuid)) return configuration
def __load(self, dbg, uri): log.debug("%s: xcpng.meta.MetadataHandler.__load: uri: %s " % (dbg, uri)) self.__uri = uri self.__dbg = uri try: self.db._storage.write(loads(self.MetaDBOpsHandler.load(dbg, uri))) self.__loaded = True except Exception as e: log.error("%s: xcpng.meta.MetadataHandler.load: Failed to load metadata" % dbg) raise Exception(e)
def epc_open(dbg, uri, persistent, cb): log.debug("{}: Datapath.epc_open: uri == {}".format(dbg, uri)) sr, key = _parse_uri(uri) opq = cb.volumeStartOperations(sr, 'w') meta_path = cb.volumeMetadataGetPath(opq) db = VHDMetabase(meta_path) try: with Lock(opq, 'gl', cb): try: with db.write_context(): vdi = db.get_vdi_by_id(key) vol_path = cb.volumeGetPath(opq, str(vdi.vhd.id)) if (persistent): log.debug( ("{}: Datapath.epc_open: " "{} is persistent").format(dbg, vol_path) ) if vdi.nonpersistent: # Truncate, etc VHDUtil.reset(dbg, vol_path) db.update_vdi_nonpersistent(vdi.uuid, 1) elif vdi.nonpersistent: log.debug( ("{}: Datapath.epc_open: {} already " "marked non-persistent").format(dbg, vol_path) ) # truncate VHDUtil.reset(dbg, vol_path) else: log.debug( ("{}: Datapath.epc_open: {} is " "non-persistent").format(dbg, vol_path) ) db.update_vdi_nonpersistent(vdi.uuid, 1) if not VHDUtil.is_empty(dbg, vol_path): # Create single clone VHDDatapath.create_single_clone(db, sr, key, cb) except: log.error( ("{}: Datapath.epc_open: failed to complete " "open, {}").format(dbg, sys.exc_info()[0]) ) raise finally: db.close() return None
def call(dbg, cmd_args, error=True, simple=True, expRc=0): log.debug("%s: Running cmd %s" % (dbg, cmd_args)) p = subprocess.Popen( cmd_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) stdout, stderr = p.communicate() if error and p.returncode != expRc: log.error("%s: %s exitted with code %d: %s" % (dbg, " ".join(cmd_args), p.returncode, stderr)) raise xapi.InternalError("%s exitted with non-zero code %d: %s" % (" ".join(cmd_args), p.returncode, stderr)) if simple: return stdout return stdout, stderr, p.returncode
def call(dbg, cmd_args, error=True, simple=True, expRc=0): log.debug('{}: Running cmd {}'.format(dbg, cmd_args)) proc = subprocess.Popen( cmd_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) stdout, stderr = proc.communicate() if error and proc.returncode != expRc: log.error('{}: {} exitted with code {}: {}'.format( dbg, " ".join(cmd_args), proc.returncode, stderr)) raise xapi.InternalError('{} exitted with non-zero code {}: {}'.format( " ".join(cmd_args), proc.returncode, stderr)) if simple: return stdout return stdout, stderr, proc.returncode
def query(self, dbg): return { "plugin": "vhd+tapdisk", "name": "The vhd + tapdisk user-space datapath plugin", "description": ("This plugin manages and configures tapdisk" " instances backend for vhd image format built" " using libvhd, like file or lvm based Volume" " plugins"), "vendor": "Citrix", "copyright": "(C) 2015 Citrix Inc", "version": "3.0", "required_api_version": "3.0", "features": [ "NONPERSISTENT", # Retire this one "VDI_NONPERSISTENT"], "configuration": {}, "required_cluster_stack": []} if __name__ == "__main__": try: log.log_call_argv() cmd = xapi.storage.api.plugin.Plugin_commandline(Implementation()) base = os.path.basename(sys.argv[0]) if base == "Plugin.Query": cmd.query() else: raise xapi.storage.api.plugin.Unimplemented(base) except: log.error("plugin:vhd+tapdisk: error {}".format(sys.exc_info())) raise
VHDDatapath.epc_open(dbg, uri, persistent, cb) return None def close(self, dbg, uri): cb = get_sr_callbacks(dbg, uri) VHDDatapath.epc_close(dbg, uri, cb) return None if __name__ == "__main__": try: log.log_call_argv() cmd = xapi.storage.api.datapath.Datapath_commandline(Implementation()) base = os.path.basename(sys.argv[0]) if base == "Datapath.activate": cmd.activate() elif base == "Datapath.attach": cmd.attach() elif base == "Datapath.close": cmd.close() elif base == "Datapath.deactivate": cmd.deactivate() elif base == "Datapath.detach": cmd.detach() elif base == "Datapath.open": cmd.open() else: raise xapi.storage.api.datapath.Unimplemented(base) except: log.error("datapath:vhd+tapdisk: error {}".format(sys.exc_info())) raise
"datasources": [], "clustered": True, "health": ["Healthy", ""] } if __name__ == "__main__": try: log.log_call_argv() cmd = xapi.storage.api.volume.SR_commandline(Implementation()) base = os.path.basename(sys.argv[0]) if base == 'SR.probe': cmd.probe() elif base == 'SR.attach': cmd.attach() elif base == 'SR.create': cmd.create() elif base == 'SR.destroy': cmd.destroy() elif base == 'SR.detach': cmd.detach() elif base == 'SR.ls': cmd.ls() elif base == 'SR.stat': cmd.stat() else: raise xapi.storage.api.volume.Unimplemented(base) except: log.error("gfs2 sr: error {}".format(sys.exc_info())) raise
"SR_CREATE", "VDI_CREATE", "VDI_DESTROY", "VDI_ATTACH", "VDI_ATTACH_OFFLINE", "VDI_DETACH", "VDI_ACTIVATE", "VDI_DEACTIVATE", "VDI_UPDATE", "VDI_CLONE", "VDI_SNAPSHOT", "VDI_RESIZE", "SR_METADATA"], "configuration": {}, "required_cluster_stack": ['corosync']} if __name__ == "__main__": try: log.log_call_argv() cmd = xapi.storage.api.plugin.Plugin_commandline(Implementation()) base = os.path.basename(sys.argv[0]) if base == 'Plugin.diagnostics': cmd.diagnostics() elif base == 'Plugin.Query': cmd.query() else: raise xapi.storage.api.plugin.Unimplemented(base) except: log.error("gfs2 volume plugin: error {}".format(sys.exc_info())) raise
opq = cb.volumeStartOperations(uri, 'w') gc_running = os.path.join( '/var/run/sr-private', cb.getUniqueIdentifier(opq), 'gc-running' ) gc_exited = os.path.join( "/var/run/sr-private", cb.getUniqueIdentifier(opq), 'gc-exited' ) os.unlink(gc_running) while True: if os.path.exists(gc_exited): os.unlink(gc_exited) return else: time.sleep(1) if __name__ == '__main__': try: sr_type = sys.argv[1] uri = sys.argv[2] run_coalesce(sr_type, uri) except: log.error("libvhd:coalesce: error {}".format(sys.exc_info()))