def _create(ioctx): rbd_inst = rbd.RBD() # Set order l_order = None if obj_size and obj_size > 0: l_order = int(round(math.log(float(obj_size), 2))) # Set features feature_bitmask = format_features(features) rbd_inst.create(ioctx, name, size, order=l_order, old_format=False, features=feature_bitmask, stripe_unit=stripe_unit, stripe_count=stripe_count, data_pool=data_pool) RbdConfiguration(pool_ioctx=ioctx, namespace=namespace, image_name=name).set_configuration(configuration)
def _create_image(self, fsid, conn, ioctx, image_name, size, order, context=None): """ Create an rbd image. If librbd supports it, make it a cloneable snapshot, so that copy-on-write volumes can be created from it. :param image_name: Image's name :retval: `glance_store.rbd.StoreLocation` object """ librbd = rbd.RBD() features = conn.conf_get('rbd_default_features') if ((features is None) or (int(features) == 0)): features = rbd.RBD_FEATURE_LAYERING librbd.create(ioctx, image_name, size, order, old_format=False, features=int(features)) return StoreLocation({ 'fsid': fsid, 'pool': self.pool, 'image': image_name, 'snapshot': DEFAULT_SNAPNAME, }, self.conf)
def create_image(self, name:str, size:int, data_pool=None): ''' Create an rbd image. :param name: what the image is called :param size: how big the image is in bytes :param data_pool: 如果指定,数据存储的到此pool :return: True # success None # image already exists :raises: FunctionNotSupported, RadosError ''' cluster = self.get_cluster() try: with cluster.open_ioctx(self.pool_name) as ioctx: rbd.RBD().create(ioctx=ioctx, name=name, size=size, old_format=False, data_pool=data_pool) except rbd.ImageExists as e: return None except (TypeError, rbd.InvalidArgument, Exception) as e: raise RadosError(f'create_image error:{str(e)}') return True
def _rbd_list(self, pool_name): ioctx = mgr.rados.open_ioctx(pool_name) self.rbd = rbd.RBD() names = self.rbd.list(ioctx) result = [] for name in names: i = rbd.Image(ioctx, name) stat = i.stat() stat['name'] = name features = i.features() stat['features'] = features stat['features_name'] = self._format_bitmask(features) try: parent_info = i.parent_info() parent = "{}@{}".format(parent_info[0], parent_info[1]) if parent_info[0] != pool_name: parent = "{}/{}".format(parent_info[0], parent) stat['parent'] = parent except rbd.ImageNotFound: pass result.append(stat) return result
def create(self): """ Create an rbd image compatible with exporting through LIO to multiple clients :return: status code and msg """ with rados.Rados(conffile=settings.config.cephconf) as cluster: with cluster.open_ioctx(self.pool) as ioctx: rbd_inst = rbd.RBD() try: rbd_inst.create(ioctx, self.image, self.size_bytes, features=RBDDev.supported_features(), old_format=False) except (rbd.ImageExists, rbd.InvalidArgument) as err: self.error = True self.error_msg = ("Failed to create rbd image {} in " "pool {} : {}".format(self.image, self.pool, err))
def clone(self, image_location, dest_name, dest_pool=None): _fsid, pool, image, snapshot = self.parse_url(image_location['url']) LOG.debug( 'cloning %(pool)s/%(img)s@%(snap)s to ' '%(dest_pool)s/%(dest_name)s', dict(pool=pool, img=image, snap=snapshot, dest_pool=dest_pool, dest_name=dest_name)) with RADOSClient(self, str(pool)) as src_client: with RADOSClient(self, dest_pool) as dest_client: try: rbd.RBD().clone(src_client.ioctx, image.encode('utf-8'), snapshot.encode('utf-8'), dest_client.ioctx, str(dest_name), features=src_client.features) except rbd.PermissionError: raise exception.Forbidden( _('no write permission on ' 'storage pool %s') % dest_pool)
def _create_image(self, fsid, ioctx, image_name, size, order): """ Create an rbd image. If librbd supports it, make it a cloneable snapshot, so that copy-on-write volumes can be created from it. :param image_name Image's name :retval `glance.store.rbd.StoreLocation` object """ librbd = rbd.RBD() if hasattr(rbd, 'RBD_FEATURE_LAYERING'): librbd.create(ioctx, image_name, size, order, old_format=False, features=rbd.RBD_FEATURE_LAYERING) return StoreLocation({ 'fsid': fsid, 'pool': self.pool, 'image': image_name, 'snapshot': DEFAULT_SNAPNAME, }) else: librbd.create(ioctx, image_name, size, order, old_format=True) return StoreLocation({'image': image_name})
def queue_trash_remove(self, image_id_spec: str) -> Tuple[int, str, str]: image_id_spec = self.extract_image_spec(image_id_spec) authorize_request(self.module, image_id_spec[0], image_id_spec[1]) self.log.info("queue_trash_remove: {}".format(image_id_spec)) refs = { TASK_REF_ACTION: TASK_REF_ACTION_TRASH_REMOVE, TASK_REF_POOL_NAME: image_id_spec[0], TASK_REF_POOL_NAMESPACE: image_id_spec[1], TASK_REF_IMAGE_ID: image_id_spec[2] } task = self.find_task(refs) if task: return 0, task.to_json(), '' # verify that image exists in trash with self.open_ioctx(image_id_spec[:2]) as ioctx: rbd.RBD().trash_get(ioctx, image_id_spec[2]) return 0, self.add_task( ioctx, "Removing image {} from trash".format( self.format_image_spec(image_id_spec)), refs), ''
def add(self, image_id, image_file, image_size): """ Stores an image file with supplied identifier to the backend storage system and returns an `glance.store.ImageAddResult` object containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :retval `glance.store.ImageAddResult` object :raises `glance.common.exception.Duplicate` if the image already existed """ location = StoreLocation({'image': image_id}) checksum = hashlib.md5() image_name = str(image_id) with rados.Rados(conffile=self.conf_file, rados_id=self.user) as conn: with conn.open_ioctx(self.pool) as ioctx: order = int(math.log(self.chunk_size, 2)) logger.debug('creating image %s with order %d', image_name, order) try: rbd.RBD().create(ioctx, image_name, image_size, order) except rbd.ImageExists: raise exception.Duplicate( _('RBD image %s already exists') % image_id) with rbd.Image(ioctx, image_name) as image: bytes_left = image_size while bytes_left > 0: length = min(self.chunk_size, bytes_left) data = image_file.read(length) image.write(data, image_size - bytes_left) bytes_left -= length checksum.update(data) return (location.get_uri(), image_size, checksum.hexdigest())
def clone_image(self, snap_image_name: str, snap_name: str, new_image_name: str, data_pool=None): """ 从快照克隆一个rbd image :param snap_image_name: 快照父image名称 :param snap_name: 快照名称 :param new_image_name: 新克隆的image名称 :param data_pool: 如果指定,数据存储的到此pool :return: True # success raise RadosError # failed :raise class: `RadosError`, ImageExistsError """ if not snap_name: raise RadosError(f'clone_image error:invalid param "snap_name"') cluster = self.get_cluster() try: with cluster.open_ioctx(self.pool_name) as p_ioctx: c_ioctx = p_ioctx # 克隆的image元数据保存在同一个pool,通过data_pool参数可指定数据块存储到data_pool rbd.RBD().clone(p_ioctx=p_ioctx, p_name=snap_image_name, p_snapname=snap_name, c_ioctx=c_ioctx, c_name=new_image_name, data_pool=data_pool) except rbd.ImageExists as e: raise ImageExistsError(f'clone_image error,image exists,{str(e)}') except Exception as e: raise RadosError(f'clone_image error:{str(e)}') return True
def cleanup_volumes(self, instance): def _cleanup_vol(ioctx, volume, retryctx): try: rbd.RBD().remove(client.ioctx, volume) raise loopingcall.LoopingCallDone(retvalue=False) except (rbd.ImageBusy, rbd.ImageHasSnapshots): LOG.warn( _LW('rbd remove %(volume)s in pool %(pool)s ' 'failed'), { 'volume': volume, 'pool': self.pool }) retryctx['retries'] -= 1 if retryctx['retries'] <= 0: raise loopingcall.LoopingCallDone() with RADOSClient(self, self.pool) as client: def belongs_to_instance(disk): return disk.startswith(instance['uuid']) # pylint: disable=E1101 volumes = rbd.RBD().list(client.ioctx) for volume in filter(belongs_to_instance, volumes): # NOTE(danms): We let it go for ten seconds retryctx = {'retries': 10} timer = loopingcall.FixedIntervalLoopingCall( _cleanup_vol, client.ioctx, volume, retryctx) timed_out = timer.start(interval=1).wait() if timed_out: # NOTE(danms): Run this again to propagate the error, but # if it succeeds, don't raise the loopingcall exception try: _cleanup_vol(client.ioctx, volume, retryctx) except loopingcall.LoopingCallDone: pass
def get_rbd_images(ceph_pool='rbd'): """ Grab a dictionary of rbd images in a pool across all clusters """ all_images = dict() for cluster_name, cluster_config in get_ceph_clusters().iteritems(): all_images[cluster_name] = [] ceph_config = parse_ceph_config(cluster_config['conffile']) ceph_monitors = get_ceph_config_monitors(ceph_config) with Rados(**cluster_config) as cluster: with cluster.open_ioctx(ceph_pool) as ioctx: rbd_inst = rbd.RBD() for rbd_image_name in rbd_inst.list(ioctx): with rbd.Image(ioctx, rbd_image_name) as rbd_image: rbd_size = (rbd_image.size() / 1000000000) rbd_data = { 'name': rbd_image_name, 'size': rbd_size, 'monitors': ceph_monitors } all_images[cluster_name].append(rbd_data) return all_images
def rename_image(self, image_name:str, new_name:str): ''' 重命名一个rbd image :param image_name: 被重命名的image卷名称 :param new_name: image的新名称 :return: True # success raise RadosError # failed :raise class: `RadosError` ''' cluster = self.get_cluster() try: with cluster.open_ioctx(self.pool_name) as ioctx: rbd.RBD().rename(ioctx=ioctx, src=image_name, dest=new_name) except rbd.ImageNotFound as e: raise RadosError('rename_image error: image not found') except rbd.ImageExists as e: raise RadosError('rename_image error: A image with the same name already exists') except Exception as e: raise RadosError(f'rename_image error:{str(e)}') return True
def clone(dbg, cluster, parent, snapshot, clone): log.debug( "%s: rbd_utils.clone: Cluster ID: %s Parent: %s Snapshot: %s Clone: %s" % (dbg, cluster.get_fsid(), parent, snapshot, clone)) p_pool = _getPoolName(parent) p_name = _getImageName(parent) p_ioctx = cluster.open_ioctx(p_pool) p_image = rbd.Image(p_ioctx, p_name) c_pool = _getPoolName(clone) c_name = _getImageName(clone) c_ioctx = cluster.open_ioctx(c_pool) rbd_inst = rbd.RBD() log.debug( "%s: rbd_utils.clone: Cluster ID: %s p_pool: %s p_name: %s snap: %s c_pool: %s c_name %s" % (dbg, cluster.get_fsid(), p_pool, p_name, snapshot, c_pool, c_name)) try: if not p_image.is_protected_snap(snapshot): p_image.protect_snap(snapshot) rbd_inst.clone(p_ioctx, p_name, snapshot, c_ioctx, c_name) finally: p_ioctx.close() c_ioctx.close()
class RbdNamespace(RESTController): rbd_inst = rbd.RBD() def create(self, pool_name, namespace): with mgr.rados.open_ioctx(pool_name) as ioctx: namespaces = self.rbd_inst.namespace_list(ioctx) if namespace in namespaces: raise DashboardException( msg='Namespace already exists', code='namespace_already_exists', component='rbd') return self.rbd_inst.namespace_create(ioctx, namespace) def delete(self, pool_name, namespace): with mgr.rados.open_ioctx(pool_name) as ioctx: # pylint: disable=unbalanced-tuple-unpacking _, images = RbdService.rbd_pool_list(pool_name, namespace) if images: raise DashboardException( msg='Namespace contains images which must be deleted first', code='namespace_contains_images', component='rbd') return self.rbd_inst.namespace_remove(ioctx, namespace) def list(self, pool_name): with mgr.rados.open_ioctx(pool_name) as ioctx: result = [] namespaces = self.rbd_inst.namespace_list(ioctx) for namespace in namespaces: # pylint: disable=unbalanced-tuple-unpacking _, images = RbdService.rbd_pool_list(pool_name, namespace) result.append({ 'namespace': namespace, 'num_images': len(images) if images else 0 }) return result
def queue_migration_commit(self, image_spec): image_spec = self.extract_image_spec(image_spec) self.log.info("queue_migration_commit: {}".format(image_spec)) refs = {TASK_REF_ACTION: TASK_REF_ACTION_MIGRATION_COMMIT, TASK_REF_POOL_NAME: image_spec[0], TASK_REF_POOL_NAMESPACE: image_spec[1], TASK_REF_IMAGE_NAME: image_spec[2]} task = self.find_task(refs) if task: return 0, task.to_json(), '' with self.open_ioctx(image_spec) as ioctx: self.validate_image_migrating(ioctx, image_spec) status = rbd.RBD().migration_status(ioctx, image_spec[2]) if status['state'] != rbd.RBD_IMAGE_MIGRATION_STATE_EXECUTED: raise rbd.InvalidArgument("Image {} has not completed migration".format( self.format_image_spec(image_spec)), errno=errno.EINVAL) return 0, self.add_task(ioctx, "Committing image migration for {}".format( self.format_image_spec(image_spec)), refs), ''
def deleteImage(poolName, imageNames): ctx = cluster.open_ioctx(poolName) for imageName in imageNames: rbd.RBD().remove(ctx, imageName) ctx.close()
def delete(self, pool_name, peer_uuid): ioctx = mgr.rados.open_ioctx(pool_name) rbd.RBD().mirror_peer_remove(ioctx, peer_uuid) _reset_view_cache()
def createImage(poolName, images): ctx = cluster.open_ioctx(poolName) for image in images: rbd.RBD().create(ctx, image['name'], image['size'] * (1024**3)) ctx.close()
def create_token(self, pool_name): ioctx = mgr.rados.open_ioctx(pool_name) token = rbd.RBD().mirror_peer_bootstrap_create(ioctx) return {'token': token}
def list(self, pool_name): ioctx = mgr.rados.open_ioctx(pool_name) peer_list = rbd.RBD().mirror_peer_list(ioctx) return [x['uuid'] for x in peer_list]
def set(self, site_name): rbd.RBD().mirror_site_name_set(mgr.rados, site_name) return self._get_site_name()
def _get_site_name(self): return {'site_name': rbd.RBD().mirror_site_name_get(mgr.rados)}
def get_pools(daemons): # pylint: disable=R0912, R0915 pool_names = [pool['pool_name'] for pool in CephService.get_pool_list('rbd') if pool.get('type', 1) == 1] pool_stats = {} rbdctx = rbd.RBD() for pool_name in pool_names: logger.debug("Constructing IOCtx %s", pool_name) try: ioctx = mgr.rados.open_ioctx(pool_name) except TypeError: logger.exception("Failed to open pool %s", pool_name) continue try: mirror_mode = rbdctx.mirror_mode_get(ioctx) peer_uuids = [x['uuid'] for x in rbdctx.mirror_peer_list(ioctx)] except: # noqa pylint: disable=W0702 logger.exception("Failed to query mirror settings %s", pool_name) mirror_mode = None peer_uuids = [] stats = {} if mirror_mode == rbd.RBD_MIRROR_MODE_DISABLED: mirror_mode = "disabled" stats['health_color'] = "info" stats['health'] = "Disabled" elif mirror_mode == rbd.RBD_MIRROR_MODE_IMAGE: mirror_mode = "image" elif mirror_mode == rbd.RBD_MIRROR_MODE_POOL: mirror_mode = "pool" else: mirror_mode = "unknown" stats['health_color'] = "warning" stats['health'] = "Warning" pool_stats[pool_name] = dict(stats, **{ 'mirror_mode': mirror_mode, 'peer_uuids': peer_uuids }) for daemon in daemons: for _, pool_data in daemon['status'].items(): stats = pool_stats.get(pool_data['name'], None) if stats is None: continue if pool_data.get('leader', False): # leader instance stores image counts stats['leader_id'] = daemon['metadata']['instance_id'] stats['image_local_count'] = pool_data.get('image_local_count', 0) stats['image_remote_count'] = pool_data.get('image_remote_count', 0) if (stats.get('health_color', '') != 'error' and pool_data.get('image_error_count', 0) > 0): stats['health_color'] = 'error' stats['health'] = 'Error' elif (stats.get('health_color', '') != 'error' and pool_data.get('image_warning_count', 0) > 0): stats['health_color'] = 'warning' stats['health'] = 'Warning' elif stats.get('health', None) is None: stats['health_color'] = 'success' stats['health'] = 'OK' for _, stats in pool_stats.items(): if stats['mirror_mode'] == 'disabled': continue if stats.get('health', None) is None: # daemon doesn't know about pool stats['health_color'] = 'error' stats['health'] = 'Error' elif stats.get('leader_id', None) is None: # no daemons are managing the pool as leader instance stats['health_color'] = 'warning' stats['health'] = 'Warning' return pool_stats
def _get_pool_datum(pool_name): data = {} logger.debug("Constructing IOCtx %s", pool_name) try: ioctx = mgr.rados.open_ioctx(pool_name) except TypeError: logger.exception("Failed to open pool %s", pool_name) return None mirror_state = { 'down': { 'health': 'issue', 'state_color': 'warning', 'state': 'Unknown', 'description': None }, rbd.MIRROR_IMAGE_STATUS_STATE_UNKNOWN: { 'health': 'issue', 'state_color': 'warning', 'state': 'Unknown' }, rbd.MIRROR_IMAGE_STATUS_STATE_ERROR: { 'health': 'issue', 'state_color': 'error', 'state': 'Error' }, rbd.MIRROR_IMAGE_STATUS_STATE_SYNCING: { 'health': 'syncing' }, rbd.MIRROR_IMAGE_STATUS_STATE_STARTING_REPLAY: { 'health': 'ok', 'state_color': 'success', 'state': 'Starting' }, rbd.MIRROR_IMAGE_STATUS_STATE_REPLAYING: { 'health': 'ok', 'state_color': 'success', 'state': 'Replaying' }, rbd.MIRROR_IMAGE_STATUS_STATE_STOPPING_REPLAY: { 'health': 'ok', 'state_color': 'success', 'state': 'Stopping' }, rbd.MIRROR_IMAGE_STATUS_STATE_STOPPED: { 'health': 'ok', 'state_color': 'info', 'state': 'Primary' } } rbdctx = rbd.RBD() try: mirror_image_status = rbdctx.mirror_image_status_list(ioctx) data['mirror_images'] = sorted([ dict({ 'name': image['name'], 'description': image['description'] }, **mirror_state['down' if not image['up'] else image['state']]) for image in mirror_image_status ], key=lambda k: k['name']) except rbd.ImageNotFound: pass except: # noqa pylint: disable=W0702 logger.exception("Failed to list mirror image status %s", pool_name) raise return data
def from_name(cls, module: 'Module', name: str, namespace_validator: Optional[Callable] = None, image_validator: Optional[Callable] = None, allow_image_level: bool = True) -> 'LevelSpec': # parse names like: # '', 'rbd/', 'rbd/ns/', 'rbd//image', 'rbd/image', 'rbd/ns/image' match = re.match(r'^(?:([^/]+)/(?:(?:([^/]*)/|)(?:([^/@]+))?)?)?$', name) if not match: raise ValueError("failed to parse {}".format(name)) if match.group(3) and not allow_image_level: raise ValueError( "invalid name {}: image level is not allowed".format(name)) id = "" pool_id = None namespace = None image_name = None image_id = None if match.group(1): pool_name = match.group(1) try: pool_id = module.rados.pool_lookup(pool_name) if pool_id is None: raise ValueError( "pool {} does not exist".format(pool_name)) if pool_id not in get_rbd_pools(module): raise ValueError("{} is not an RBD pool".format(pool_name)) id += str(pool_id) if match.group(2) is not None or match.group(3): id += "/" with module.rados.open_ioctx(pool_name) as ioctx: namespace = match.group(2) or "" if namespace: namespaces = rbd.RBD().namespace_list(ioctx) if namespace not in namespaces: raise ValueError( "namespace {} does not exist".format( namespace)) id += namespace ioctx.set_namespace(namespace) if namespace_validator: namespace_validator(ioctx) if match.group(3): image_name = match.group(3) try: with rbd.Image(ioctx, image_name, read_only=True) as image: image_id = image.id() id += "/" + image_id if image_validator: image_validator(image) except rbd.ImageNotFound: raise ValueError( "image {} does not exist".format( image_name)) except rbd.InvalidArgument: raise ValueError( "image {} is not in snapshot mirror mode". format(image_name)) except rados.ObjectNotFound: raise ValueError("pool {} does not exist".format(pool_name)) # normalize possible input name like 'rbd//image' if not namespace and image_name: name = "{}/{}".format(pool_name, image_name) return LevelSpec(name, id, pool_id, namespace, image_id)
def main(): ## Create directory structure and chdir into root_dir os.mkdir(DIR_ROOT) os.mkdir(DIR_MAP_MON) os.mkdir(DIR_MAP_OSD) os.mkdir(DIR_MAP_PG) os.chdir(DIR_ROOT) ## Initiate _new_ database and tables ## db = dataset.connect(DATABASE_CONNECTION + DATABASE_FILE) table_ceph_cluster = db.create_table('ceph_cluster', primary_id=False) table_ceph_pools = db.create_table('ceph_pools', primary_id=False) table_rbd_images = db.create_table('ceph_rbd_images', primary_id=False) table_snapshots = db.create_table('ceph_snapshots', primary_id=False) table_ceph_objects = db.create_table('ceph_objects') table_mon_map = db.create_table('ceph_monmap', primary_id=False) table_osd_map = db.create_table('ceph_osdmap', primary_id=False) # MON: check current epoch and fill MON table monmap = Popen(['ceph', 'mon', 'dump', '-f', 'json-pretty'], stdout=PIPE) monmap = monmap.stdout.read().decode('ascii') monmap = json.loads(monmap) mon_epoch = monmap['epoch'] while (mon_epoch > 0): monmap = Popen( ['ceph', 'mon', 'dump', str(mon_epoch), '-f', 'json-pretty'], stdout=PIPE) monmap = monmap.stdout.read().decode('ascii') monmap_json = json.loads(monmap) ctime = datetime.datetime.strptime(monmap_json['created'], '%Y-%m-%d %H:%M:%S.%f') mtime = datetime.datetime.strptime(monmap_json['modified'], '%Y-%m-%d %H:%M:%S.%f') monmap_data = dict( fsid=monmap_json['fsid'], epoch=monmap_json['epoch'], created=monmap_json['created'], modified=monmap_json['modified'], mons_amount=monmap_json['mons'].__len__(), ) with open( DIR_MAP_MON + '/epoch_' + str(mon_epoch).rjust(INT_LENGTH, '0') + '.json', 'w+') as mon_file: mon_file.write(monmap) table_mon_map.insert(monmap_data) mon_epoch -= 1 # OSD: check current epoch osdmap = Popen(['ceph', 'osd', 'dump', '-f', 'json-pretty'], stdout=PIPE) osdmap = osdmap.stdout.read().decode('ascii') osdmap = json.loads(osdmap) osd_epoch = osdmap['epoch'] while (osd_epoch > 0): osdmap = Popen( ['ceph', 'osd', 'dump', str(osd_epoch), '-f', 'json-pretty'], stdout=PIPE) osdmap = osdmap.stdout.read().decode('ascii') osdmap_json = json.loads(osdmap) ctime = datetime.datetime.strptime(osdmap_json['created'], '%Y-%m-%d %H:%M:%S.%f') mtime = datetime.datetime.strptime(osdmap_json['modified'], '%Y-%m-%d %H:%M:%S.%f') osdmap_data = dict(fsid=osdmap_json['fsid'], epoch=osdmap_json['epoch'], created=osdmap_json['created'], modified=osdmap_json['modified'], flags=osdmap_json['flags'], osds_amount=osdmap_json['osds'].__len__(), pool_amount=osdmap_json['pools'].__len__()) with open( DIR_MAP_OSD + '/epoch_' + str(osd_epoch).rjust(INT_LENGTH, '0') + '.json', 'w+') as osd_file: osd_file.write(osdmap) table_osd_map.insert(osdmap_data) osd_epoch -= 1 # PG: check current epoch pgmap = Popen(['ceph', 'pg', 'dump', '-f', 'json-pretty'], stdout=PIPE) pgmap = pgmap.stdout.read().decode('ascii') pgmap_json = json.loads(pgmap) pgmap_epoch = pgmap_json['version'] with open(DIR_MAP_PG + '/epoch_' + str(pgmap_epoch) + '.json', 'w+') as pg_file: pg_file.write(pgmap) for element in pgmap_json['pg_stats']: pgid = element['pgid'] print(pgid) pgquery = Popen( ['ceph', 'pg', str(pgid), 'query', '-f', 'json-pretty'], stdout=PIPE) print(pgquery) pgquery = pgquery.stdout.read().decode('ascii') print(pgquery) with open(DIR_MAP_PG + '/pg_query_' + str(pgid) + '.json', 'w+') as pgquery_file: pgquery_file.write(pgquery) exit() ## Initiate new cluster and rbd instance and connect ## cluster = rados.Rados(conffile=CONF_FILE, conf=dict(keyring=KEYRING_FILE)) rbd_inst = rbd.RBD() cluster.connect() ## Cluster: list properties and save statistics ## cluster_stats = cluster.get_cluster_stats() cluster_data = { 'rados_id': cluster.rados_id, # 'fsid': print(cluster.get_fsid()), 'fsid': "Not implemented yet", 'inconsistent_pgs': "Not implemented yet", 'stats_kb': cluster_stats['kb'], 'stats_kb_used': cluster_stats['kb_used'], 'stats_kb_avail': cluster_stats['kb_avail'], 'stats_num_objects': cluster_stats['num_objects'], 'version': print(cluster.version()), 'state': cluster.state } for key, value in cluster_data.items(): table_ceph_cluster.insert(dict(Key=key, Value=value)) ## Pools: list and save statistics ## listpool = cluster.list_pools() for pool_name in listpool: # if pool_name != 'libvirt-pool': # continue ## Open Input/Output context on pool name ## ioctx = cluster.open_ioctx(pool_name) pool_stats = ioctx.get_stats() # table_ceph_pools.insert(pool_data) ## RBD: List RADOS block devices ## listrbd = rbd_inst.list(ioctx) list_images = [] list_image_id = [] if listrbd.__len__() > 0: for rbdname in listrbd: image = rbd.Image(ioctx, rbdname, read_only=True) image_id = image.id() image_stats = image.stat() try: parent_id = image.parent_id() except: parent_id = None image_data = dict( pool=pool_name, image_name=rbdname, image_id=image_id, image_stripe_count=image.stripe_count(), image_block_name_prefix=image_stats['block_name_prefix'], num_objs=image_stats['num_objs'], obj_size=image_stats['obj_size'], order=image_stats['order'], parent_id=parent_id, parent_name=image_stats['parent_name'], parent_pool=str(image_stats['parent_pool']), size=image_stats['size'], ) ## Snapshot stuff snaplist = image.list_snaps() for snapshot in snaplist.__iter__(): snapshot_ctime = image.get_snap_timestamp(snapshot['id']) snapshot_data = dict(pool=pool_name, image_id=image_id, image_name=rbdname, snapshot_id=snapshot['id'], size=snapshot['size'], name=snapshot['name'], ctime=snapshot_ctime.timestamp()) table_snapshots.insert(snapshot_data) list_images.append(image_data) list_image_id.append(image_id) table_rbd_images.insert(image_data) logging.debug(pp.pprint(list_images)) ## Object: list and export ## listobjects = ioctx.list_objects() for object in listobjects: ## Get object statistics and parse timestamp object_stats = object.stat() mtime = object_stats[1] object_mtime = datetime.datetime( mtime.tm_year, mtime.tm_mon, mtime.tm_mday, mtime.tm_hour, mtime.tm_min, mtime.tm_sec, ) ## Xattrs: list and jsonify x-attributes ## xattrlist = object.get_xattrs() object_xattributes = [(str(key), str(value)) for (key, value) in xattrlist] object_xattributes = json.dumps(object_xattributes) object_data = dict( pool_name=pool_name, key=object.key, namespace=object.nspace, size=object_stats[0], mtime=object_mtime.timestamp(), mtime_weekday=mtime.tm_wday, mtime_yearday=mtime.tm_yday, mtime_is_dst=mtime.tm_isdst, offset=object.offset, state=object.state, xattr=object_xattributes, ) ## Parse additional RBD data if object.key[:3] == 'rbd': splitted_key = object.key.split('.') # splitted_key[0] rbd_object_map, rbd_info, rbd_header, rbd_data # splitted_key[1] rbd id # splitted_key[2] rbd offset # if splitted_key[1] in list_image_id: # print(splitted_key[1]) try: splitted_key[0] except IndexError: splitted_key.append(None) try: splitted_key[1] except IndexError: splitted_key.append(None) try: splitted_key[2] except IndexError: splitted_key.append(None) object_data['rbd'] = True object_data['rbd_type'] = splitted_key[0] object_data['rbd_id'] = splitted_key[1] object_data['rbd_offset'] = splitted_key[2] table_ceph_objects.insert(object_data)
class RbdTrash(RESTController): RESOURCE_ID = "image_id_spec" rbd_inst = rbd.RBD() @ViewCache() def _trash_pool_list(self, pool_name): with mgr.rados.open_ioctx(pool_name) as ioctx: result = [] namespaces = self.rbd_inst.namespace_list(ioctx) # images without namespace namespaces.append('') for namespace in namespaces: ioctx.set_namespace(namespace) images = self.rbd_inst.trash_list(ioctx) for trash in images: trash['pool_name'] = pool_name trash['namespace'] = namespace trash['deletion_time'] = "{}Z".format( trash['deletion_time'].isoformat()) trash['deferment_end_time'] = "{}Z".format( trash['deferment_end_time'].isoformat()) result.append(trash) return result def _trash_list(self, pool_name=None): if pool_name: pools = [pool_name] else: pools = [p['pool_name'] for p in CephService.get_pool_list('rbd')] result = [] for pool in pools: # pylint: disable=unbalanced-tuple-unpacking status, value = self._trash_pool_list(pool) result.append({ 'status': status, 'value': value, 'pool_name': pool }) return result @handle_rbd_error() @handle_rados_error('pool') @EndpointDoc("Get RBD Trash Details by pool name", parameters={ 'pool_name': (str, 'Name of the pool'), }, responses={200: RBD_TRASH_SCHEMA}) def list(self, pool_name=None): """List all entries from trash.""" return self._trash_list(pool_name) @handle_rbd_error() @handle_rados_error('pool') @RbdTask('trash/purge', ['{pool_name}'], 2.0) @RESTController.Collection('POST', query_params=['pool_name']) @DeletePermission @allow_empty_body def purge(self, pool_name=None): """Remove all expired images from trash.""" now = "{}Z".format(datetime.utcnow().isoformat()) pools = self._trash_list(pool_name) for pool in pools: for image in pool['value']: if image['deferment_end_time'] < now: logger.info( 'Removing trash image %s (pool=%s, namespace=%s, name=%s)', image['id'], pool['pool_name'], image['namespace'], image['name']) rbd_call(pool['pool_name'], image['namespace'], self.rbd_inst.trash_remove, image['id'], 0) @RbdTask('trash/restore', ['{image_id_spec}', '{new_image_name}'], 2.0) @RESTController.Resource('POST') @CreatePermission @allow_empty_body def restore(self, image_id_spec, new_image_name): """Restore an image from trash.""" pool_name, namespace, image_id = parse_image_spec(image_id_spec) return rbd_call(pool_name, namespace, self.rbd_inst.trash_restore, image_id, new_image_name) @RbdTask('trash/remove', ['{image_id_spec}'], 2.0) def delete(self, image_id_spec, force=False): """Delete an image from trash. If image deferment time has not expired you can not removed it unless use force. But an actively in-use by clones or has snapshots can not be removed. """ pool_name, namespace, image_id = parse_image_spec(image_id_spec) return rbd_call(pool_name, namespace, self.rbd_inst.trash_remove, image_id, int(str_to_bool(force)))
def __init__(self): super().__init__() self.rbd_inst = rbd.RBD()
def __init__(self): self._rbd = tpool.Proxy(rbd.RBD())