示例#1
0
 def prune_snapshots(self, sched: Schedule) -> None:
     try:
         log.debug('Pruning snapshots')
         ret = sched.retention
         path = sched.path
         prune_candidates = set()
         time = datetime.now(timezone.utc)
         with open_filesystem(self, sched.fs) as fs_handle:
             with fs_handle.opendir(f'{path}/.snap') as d_handle:
                 dir_ = fs_handle.readdir(d_handle)
                 while dir_:
                     if dir_.d_name.decode('utf-8').startswith(f'{SNAPSHOT_PREFIX}-'):
                         log.debug(f'add {dir_.d_name} to pruning')
                         ts = datetime.strptime(
                             snap_name_to_timestamp(dir_.d_name.decode('utf-8')), SNAPSHOT_TS_FORMAT)
                         prune_candidates.add((dir_, ts))
                     else:
                         log.debug(f'skipping dir entry {dir_.d_name}')
                     dir_ = fs_handle.readdir(d_handle)
             to_prune = get_prune_set(prune_candidates, ret)
             for k in to_prune:
                 dirname = k[0].d_name.decode('utf-8')
                 log.debug(f'rmdir on {dirname}')
                 fs_handle.rmdir(f'{path}/.snap/{dirname}')
             if to_prune:
                 with self.get_schedule_db(sched.fs) as conn_mgr:
                     db = conn_mgr.dbinfo.db
                     sched.update_pruned(time, db, len(to_prune))
     except Exception:
         self._log_exception('prune_snapshots')
示例#2
0
 def create_scheduled_snapshot(self, fs_name: str, path: str,
                               retention: str, start: str,
                               repeat: str) -> None:
     log.debug(f'Scheduled snapshot of {path} triggered')
     try:
         with self.get_schedule_db(fs_name) as conn_mgr:
             db = conn_mgr.dbinfo.db
             try:
                 sched = Schedule.get_db_schedules(path,
                                                   db,
                                                   fs_name,
                                                   repeat=repeat,
                                                   start=start)[0]
                 time = datetime.now(timezone.utc)
                 with open_filesystem(self, fs_name) as fs_handle:
                     snap_ts = time.strftime(SNAPSHOT_TS_FORMAT)
                     snap_name = f'{path}/.snap/{SNAPSHOT_PREFIX}-{snap_ts}'
                     fs_handle.mkdir(snap_name, 0o755)
                 log.info(f'created scheduled snapshot of {path}')
                 log.debug(f'created scheduled snapshot {snap_name}')
                 sched.update_last(time, db)
             except cephfs.Error:
                 self._log_exception('create_scheduled_snapshot')
                 sched.set_inactive(db)
             except Exception:
                 # catch all exceptions cause otherwise we'll never know since this
                 # is running in a thread
                 self._log_exception('create_scheduled_snapshot')
     finally:
         with self.get_schedule_db(fs_name) as conn_mgr:
             db = conn_mgr.dbinfo.db
             self.refresh_snap_timers(fs_name, path, db)
         self.prune_snapshots(sched)
示例#3
0
    def verify_and_set_mirror_info(self, local_fs_name, remote_cluster_spec, remote_fs_name, remote_conf={}):
        log.debug(f'local fs={local_fs_name} remote={remote_cluster_spec}/{remote_fs_name}')

        client_name, cluster_name = FSSnapshotMirror.split_spec(remote_cluster_spec)
        remote_cluster, remote_fs = connect_to_filesystem(client_name, cluster_name, remote_fs_name,
                                                          'remote', conf_dct=remote_conf)
        try:
            local_cluster_id = self.rados.get_fsid()
            remote_cluster_id = remote_cluster.get_fsid()
            log.debug(f'local_cluster_id={local_cluster_id} remote_cluster_id={remote_cluster_id}')
            if 'fsid' in remote_conf:
                if not remote_cluster_id == remote_conf['fsid']:
                    raise MirrorException(-errno.EINVAL, 'FSID mismatch between bootstrap token and remote cluster')

            local_fscid = remote_fscid = None
            with open_filesystem(self.local_fs, local_fs_name) as local_fsh:
                local_fscid = local_fsh.get_fscid()
                remote_fscid = remote_fs.get_fscid()
                log.debug(f'local_fscid={local_fscid} remote_fscid={remote_fscid}')
                mi = None
                try:
                    mi = FSSnapshotMirror.get_mirror_info(local_fsh)
                except MirrorException as me:
                    if me.args[0] != -errno.ENODATA:
                        raise Exception(-errno.EINVAL)
                if mi and mi['cluster_id'] == remote_cluster_id and mi['fs_id'] == remote_fscid:
                    raise MirrorException(-errno.EINVAL, f'file system is an active peer for file system: {remote_fs_name}')

            if local_cluster_id == remote_cluster_id and local_fscid == remote_fscid:
                raise MirrorException(-errno.EINVAL, "'Source and destination cluster fsid and "\
                                      "file-system name can't be the same")
            FSSnapshotMirror.set_mirror_info(local_cluster_id, local_fscid, remote_fs)
        finally:
            disconnect_from_filesystem(cluster_name, remote_fs_name, remote_cluster, remote_fs)
示例#4
0
def open_volume_lockless(vc, volname):
    """
    open a volume with shared access. This API is to be used as a context
    manager.

    :param vc: volume client instance
    :param volname: volume name
    :return: yields a volume handle (ceph filesystem handle)
    """
    with open_filesystem(vc, volname) as fs_handle:
        yield fs_handle
示例#5
0
def open_volume(vc, volname):
    """
    open a volume for exclusive access. This API is to be used as a contextr
    manager.

    :param vc: volume client instance
    :param volname: volume name
    :return: yields a volume handle (ceph filesystem handle)
    """
    g_lock = GlobalLock()
    with g_lock.lock_op():
        with open_filesystem(vc, volname) as fs_handle:
            yield fs_handle
示例#6
0
文件: volume.py 项目: xijiacun/ceph
def open_volume_lockless(vc, volname):
    """
    open a volume with shared access. This API is to be used as a context
    manager.

    :param vc: volume client instance
    :param volname: volume name
    :return: yields a volume handle (ceph filesystem handle)
    """
    try:
        with open_filesystem(vc, volname) as fs_handle:
            yield fs_handle
    except CephfsConnectionException as ce:
        raise VolumeException(ce.errno, ce.error_str)
示例#7
0
文件: volume.py 项目: xijiacun/ceph
def open_volume(vc, volname):
    """
    open a volume for exclusive access. This API is to be used as a contextr
    manager.

    :param vc: volume client instance
    :param volname: volume name
    :return: yields a volume handle (ceph filesystem handle)
    """
    g_lock = GlobalLock()
    with g_lock.lock_op():
        try:
            with open_filesystem(vc, volname) as fs_handle:
                yield fs_handle
        except CephfsConnectionException as ce:
            raise VolumeException(ce.errno, ce.error_str)