def task(ctx, config): fs = Filesystem(ctx) # Pick out the clients we will use from the configuration # ======================================================= if len(ctx.mounts) < 2: raise RuntimeError("Need at least two clients") mount_a = ctx.mounts.values()[0] mount_b = ctx.mounts.values()[1] if not isinstance(mount_a, FuseMount) or not isinstance( mount_b, FuseMount): # kclient kill() power cycles nodes, so requires clients to each be on # their own node if mount_a.client_remote.hostname == mount_b.client_remote.hostname: raise RuntimeError("kclient clients must be on separate nodes") # Stash references on ctx so that we can easily debug in interactive mode # ======================================================================= ctx.filesystem = fs ctx.mount_a = mount_a ctx.mount_b = mount_b run_tests(ctx, config, TestClientLimits, { 'fs': fs, 'mount_a': mount_a, 'mount_b': mount_b }) # Continue to any downstream tasks # ================================ yield
def task(ctx, config): fs = Filesystem(ctx) # Pick out the clients we will use from the configuration # ======================================================= if len(ctx.mounts) < 2: raise RuntimeError("Need at least two clients") mount_a = ctx.mounts.values()[0] mount_b = ctx.mounts.values()[1] # Stash references on ctx so that we can easily debug in interactive mode # ======================================================================= ctx.filesystem = fs ctx.mount_a = mount_a ctx.mount_b = mount_b run_tests(ctx, config, TestClusterFull, { 'fs': fs, 'mount_a': mount_a, 'mount_b': mount_b }) # Continue to any downstream tasks # ================================ yield
def task(ctx, config): fs = Filesystem(ctx, config) # Pick out the clients we will use from the configuration # ======================================================= if len(ctx.mounts) < 2: raise RuntimeError("Need at least two clients") mount_a = ctx.mounts.values()[0] mount_b = ctx.mounts.values()[1] if not isinstance(mount_a, FuseMount): # TODO: make kclient mount capable of all the same test tricks as ceph_fuse raise RuntimeError("Require FUSE clients") # Stash references on ctx so that we can easily debug in interactive mode # ======================================================================= ctx.filesystem = fs ctx.mount_a = mount_a ctx.mount_b = mount_b run_tests(ctx, config, TestClientLimits, { 'fs': fs, 'mount_a': mount_a, 'mount_b': mount_b }) # Continue to any downstream tasks # ================================ yield
def task(ctx, config): fs = Filesystem(ctx) # Pick out the clients we will use from the configuration # ======================================================= if len(ctx.mounts) < 2: raise RuntimeError("Need at least two clients") mount_a = ctx.mounts.values()[0] mount_b = ctx.mounts.values()[1] if not isinstance(mount_a, FuseMount) or not isinstance(mount_b, FuseMount): # kclient kill() power cycles nodes, so requires clients to each be on # their own node if mount_a.client_remote.hostname == mount_b.client_remote.hostname: raise RuntimeError("kclient clients must be on separate nodes") # Stash references on ctx so that we can easily debug in interactive mode # ======================================================================= ctx.filesystem = fs ctx.mount_a = mount_a ctx.mount_b = mount_b run_tests(ctx, config, TestClientLimits, { 'fs': fs, 'mount_a': mount_a, 'mount_b': mount_b }) # Continue to any downstream tasks # ================================ yield
def task(ctx, config): """ Execute CephFS client recovery test suite. Requires: - An outer ceph_fuse task with at least two clients - That the clients are on a separate host to the MDS """ fs = Filesystem(ctx) # Pick out the clients we will use from the configuration # ======================================================= if len(ctx.mounts) < 2: raise RuntimeError("Need at least two clients") mount_a = ctx.mounts.values()[0] mount_b = ctx.mounts.values()[1] if not isinstance(mount_a, FuseMount) or not isinstance(mount_b, FuseMount): # kclient kill() power cycles nodes, so requires clients to each be on # their own node if mount_a.client_remote.hostname == mount_b.client_remote.hostname: raise RuntimeError("kclient clients must be on separate nodes") # Check we have at least one remote client for use with network-dependent tests # ============================================================================= if mount_a.client_remote.hostname in fs.get_mds_hostnames(): raise RuntimeError("Require first client to on separate server from MDSs") # Stash references on ctx so that we can easily debug in interactive mode # ======================================================================= ctx.filesystem = fs ctx.mount_a = mount_a ctx.mount_b = mount_b run_tests(ctx, config, TestClientRecovery, { "mds_reconnect_timeout": int(fs.mds_asok( ['config', 'get', 'mds_reconnect_timeout'] )['mds_reconnect_timeout']), "mds_session_timeout": int(fs.mds_asok( ['config', 'get', 'mds_session_timeout'] )['mds_session_timeout']), "ms_max_backoff": int(fs.mds_asok( ['config', 'get', 'ms_max_backoff'] )['ms_max_backoff']), "fs": fs, "mount_a": mount_a, "mount_b": mount_b }) # Continue to any downstream tasks # ================================ yield
def task(ctx, config): fs = Filesystem(ctx) mount_a = ctx.mounts.values()[0] # Stash references on ctx so that we can easily debug in interactive mode # ======================================================================= ctx.filesystem = fs ctx.mount_a = mount_a run_tests(ctx, config, TestMDSAutoRepair, { 'fs': fs, 'mount_a': mount_a, }) # Continue to any downstream tasks # ================================ yield
def task(ctx, config): """ Execute CephFS client recovery test suite. Requires: - An outer ceph_fuse task with at least two clients - That the clients are on a separate host to the MDS """ fs = Filesystem(ctx) # Pick out the clients we will use from the configuration # ======================================================= if len(ctx.mounts) < 2: raise RuntimeError("Need at least two clients") mount_a = ctx.mounts.values()[0] mount_b = ctx.mounts.values()[1] if not isinstance(mount_a, FuseMount) or not isinstance( mount_b, FuseMount): # kclient kill() power cycles nodes, so requires clients to each be on # their own node if mount_a.client_remote.hostname == mount_b.client_remote.hostname: raise RuntimeError("kclient clients must be on separate nodes") # Check we have at least one remote client for use with network-dependent tests # ============================================================================= if mount_a.client_remote.hostname in fs.get_mds_hostnames(): raise RuntimeError( "Require first client to on separate server from MDSs") # Stash references on ctx so that we can easily debug in interactive mode # ======================================================================= ctx.filesystem = fs ctx.mount_a = mount_a ctx.mount_b = mount_b run_tests( ctx, config, TestClientRecovery, { "mds_reconnect_timeout": int( fs.mds_asok(['config', 'get', 'mds_reconnect_timeout' ])['mds_reconnect_timeout']), "mds_session_timeout": int( fs.mds_asok(['config', 'get', 'mds_session_timeout' ])['mds_session_timeout']), "ms_max_backoff": int( fs.mds_asok(['config', 'get', 'ms_max_backoff' ])['ms_max_backoff']), "fs": fs, "mount_a": mount_a, "mount_b": mount_b }) # Continue to any downstream tasks # ================================ yield