def __init__(self): """Create a daos container update-acl command object.""" super(DaosCommand.ContainerSubCommand.UpdateAclSubCommand, self).__init__("update-acl") self.acl_file = FormattedParameter("--acl-file={}") self.entry = FormattedParameter("--entry={}")
def __init__(self): """Create a daos container delete-acl command object.""" super().__init__("delete-acl") self.principal = FormattedParameter("--principal={}")
def __init__(self): """Create a daos container destroy-snap command object.""" super().__init__("destroy-snap") self.epc = FormattedParameter("--epc={}") self.epcrange = FormattedParameter("--epcrange={}") self.snap = FormattedParameter("--snap={}")
def __init__(self): """Create a daos pool list-attr command object.""" super().__init__("list-attrs") self.sys_name = FormattedParameter("--sys-name={}") self.verbose = FormattedParameter("--verbose", False)
def __init__(self): """Create a daos container check command object.""" super(DaosCommandBase.ContainerSubCommand.CheckSubCommand, self).__init__("check") self.src = FormattedParameter("--epc={}")
def __init__(self, path): """Create a daos_perf command object. Args: path (str): path to the daos_perf command """ super().__init__("/run/daos_perf/*", "daos_perf", path) # daos_perf command line options: # # -P <number/string> # Pool SCM partition size, which can have M(megatbytes) or # G(gigabytes) as postfix of number. E.g. -P 512M, -P 8G. self.pool_scm_size = FormattedParameter("-P {}") # -N <number/string> # Pool NVMe partition size. self.pool_nvme_size = FormattedParameter("-N {}") # -T <vos|echo|daos> # Type of test, it can be 'vos' and 'daos'. # vos : run directly on top of Versioning Object Store (VOS). # echo : I/O traffic generated by the utility only goes # through the network stack and never lands to storage. # daos : I/O traffic goes through the full DAOS stack, # including both network and storage. # The default value is 'vos'. self.test_type = FormattedParameter("-T {}", "vos") # -C <number> # Credits for concurrently asynchronous I/O. It can be value # between 1 and 64. The utility runs in synchronous mode if # credits is set to 0. This option is ignored for mode 'vos'. self.credits = FormattedParameter("-C {}") # -c <TINY|LARGE|R2S|R3S|R4S|EC2P2|EC4P2|EC8P2> # Object class for DAOS full stack test. self.object_class = FormattedParameter("-c {}") # -o <number> # Number of objects are used by the utility. self.objects = FormattedParameter("-o {}") # -d <number/string> # Number of dkeys per object. The number can have 'k' or 'm' as # postfix which stands for kilo or million. self.dkeys = FormattedParameter("-d {}") # -a <number/string> # Number of akeys per dkey. The number can have 'k' or 'm' as # postfix which stands for kilo or million. self.akeys = FormattedParameter("-a {}") # -r <number/string> # Number of records per akey. The number can have 'k' or 'm' as # postfix which stands for kilo or million. self.records = FormattedParameter("-r {}") # -A # Use array value of akey, single value is selected by default. self.akey_use_array = FormattedParameter("-A", False) # -s <number/string> # Size of single value, or extent size of array value. The number # can have 'K' or 'M' as postfix which stands for kilobyte or # megabytes. self.value_size = FormattedParameter("-s {}") # -z # Use zero copy API, this option is only valid for 'vos' self.zero_copy_api = FormattedParameter("-z", False) # -t # Instead of using different indices and epochs, all I/Os land to # the same extent in the same epoch. This option can reduce usage # of storage space. self.same_extent = FormattedParameter("-t", False) # -U # Only run update performance test. self.update_test_only = FormattedParameter("-U", False) # -F # Only run fetch performance test. This does an update first, but # only measures the time for the fetch portion. self.fetch_test_only = FormattedParameter("-F", False) # -v # Verify fetch. Checks that what was read from the filesystem is # what was written to it. This verifcation is not part of timed # performance measurement. This is turned off by default. self.verify_fetch = FormattedParameter("-v", False) # -R # Only run rebuild performance test. self.rebuild_test_only = FormattedParameter("-R", False) # -B # Profile performance of both update and fetch. self.profile_performance = FormattedParameter("-B", False) # -I # Only run iterate performance test. Only runs in vos mode. self.iterate_test_only = FormattedParameter("-I", False) # -n # Only run iterate performance test but with nesting iterator # enable. This can only run in vos mode. self.nesting_iterate_test_only = FormattedParameter("-n", False) # -f <pathname> # Full path name of the VOS file. self.pathname = FormattedParameter("-f {}") # -w # Pause after initialization for attaching debugger or analysis # tool. self.pause_after_init = FormattedParameter("-w", False) # Environment variable names to export when running daos_perf self._env_names = ["D_LOG_FILE"]
def __init__(self, job, path="", subprocess=False): """Create a Srun object. Args: job (ExecutableCommand): command object to manage. path (str, optional): path to location of command binary file. Defaults to "". subprocess (bool, optional): whether the command is run as a subprocess. Defaults to False. """ super(Srun, self).__init__("/run/srun", "srun", job, path, subprocess) self.label = FormattedParameter("--label", True) self.mpi = FormattedParameter("--mpi={}", "pmi2") self.export = FormattedParameter("--export={}", "ALL") self.ntasks = FormattedParameter("--ntasks={}", None) self.distribution = FormattedParameter("--distribution={}", None) self.nodefile = FormattedParameter("--nodefile={}", None) self.nodelist = FormattedParameter("--nodelist={}", None) self.ntasks_per_node = FormattedParameter("--ntasks-per-node={}", None) self.nodes = FormattedParameter("--nodes={}", None) self.reservation = FormattedParameter("--reservation={}", None) self.partition = FormattedParameter("--partition={}", None) self.output = FormattedParameter("--output={}", None)
def __init__(self): """Create a dmg network scan command object.""" super(DmgCommandBase.NetworkSubCommand.ScanSubCommand, self).__init__("/run/dmg/network/scan/*", "scan") self.provider = FormattedParameter("-p {}", None) self.all = FormattedParameter("-a", False)
def __init__(self): """Create a dmg pool get-acl command object.""" super(DmgCommandBase.PoolSubCommand.GetAclSubCommand, self).__init__("/run/dmg/pool/get-acl/*", "get-acl") self.pool = FormattedParameter("--pool={}", None)
def __init__(self): """Create a daos container rollback command object.""" super(DaosCommand.ContainerSubCommand.RollbackSubCommand, self).__init__("rollback") self.snap = FormattedParameter("--snap={}") self.epc = FormattedParameter("--epc={}")
def __init__(self, namespace, command): """Create a dfuse Command object.""" super().__init__(namespace, command) # dfuse options self.puuid = FormattedParameter("--pool {}") self.cuuid = FormattedParameter("--container {}") self.mount_dir = FormattedParameter("--mountpoint {}") self.sys_name = FormattedParameter("--sys-name {}") self.thread_count = FormattedParameter("--thread-count {}") self.singlethreaded = FormattedParameter("--singlethread", False) self.foreground = FormattedParameter("--foreground", False) self.enable_caching = FormattedParameter("--enable-caching", False) self.enable_wb_cache = FormattedParameter("--enable-wb-cache", False) self.disable_caching = FormattedParameter("--disable-caching", False) self.disable_wb_cache = FormattedParameter("--disable-wb-cache", False) # Environment variable names to export when running dfuse self.update_env_names(["D_LOG_FILE"])
def __init__(self): """Create a daos container create-snap command object.""" super(DaosCommand.ContainerSubCommand.CreateSnapSubCommand, self).__init__("create-snap") self.snap = FormattedParameter("--snap={}")
def __init__(self): """Create a daos container set-attr command object.""" super(DaosCommand.ContainerSubCommand.SetAttrSubCommand, self).__init__("set-attr") self.attr = FormattedParameter("--attr={}") self.value = FormattedParameter("--value={}")
def __init__(self): """Create a daos container del-attrs command object.""" super(DaosCommand.ContainerSubCommand.DelAttrSubCommand, self).__init__("del-attrs") self.attr = FormattedParameter("--attr={}")
def __init__(self): """Create an MdtestCommand object.""" super(MdtestCommand, self).__init__("/run/mdtest/*", "mdtest") self.flags = FormattedParameter("{}") # mdtest flags # Optional arguments # -a=STRING API for I/O [POSIX|DUMMY] # -b=1 branching factor of hierarchical dir structure # -d=./out the directory in which the tests will run # -B=0 no barriers between phases # -e=0 bytes to read from each file # -f=1 first number of tasks on which test will run # -i=1 number of iterations the test will run # -I=0 number of items per directory in tree # -l=0 last number of tasks on which test will run # -n=0 every process will creat/stat/read/remove num # of directories and files # -N=0 stride num between neighbor tasks for file/dir # operation (local=0) # -p=0 pre-iteration delay (in seconds) # --random-seed=0 random seed for -R # -s=1 stride between number of tasks for each test # -V=0 verbosity value # -w=0 bytes to write each file after it is created # -W=0 number in seconds; stonewall timer, write as # many seconds and ensure all processes did the # same number of operations (currently only # stops during create phase) # -x=STRING StoneWallingStatusFile; contains the number # of iterations of the creation phase, can be # used to split phases across runs # -z=0 depth of hierarchical directory structure self.api = FormattedParameter("-a {}") self.branching_factor = FormattedParameter("-b {}") self.test_dir = FormattedParameter("-d {}") self.barriers = FormattedParameter("-B {}") self.read_bytes = FormattedParameter("-e {}") self.first_num_tasks = FormattedParameter("-f {}") self.iteration = FormattedParameter("-i {}") self.items = FormattedParameter("-I {}") self.last_num_tasks = FormattedParameter("-l {}") self.num_of_files_dirs = FormattedParameter("-n {}") self.pre_iter = FormattedParameter("-p {}") self.random_seed = FormattedParameter("--random-seed {}") self.stride = FormattedParameter("-s {}") self.verbosity_value = FormattedParameter("-V {}") self.write_bytes = FormattedParameter("-w {}") self.stonewall_timer = FormattedParameter("-W {}") self.stonewall_statusfile = FormattedParameter("-x {}") self.depth = FormattedParameter("-z {}") # Module DFS # Required arguments # --dfs.pool=STRING DAOS pool uuid # --dfs.cont=STRING DFS container uuid # Flags # --dfs.destroy Destroy DFS Container # Optional arguments # --dfs.group=STRING DAOS server group # --dfs.chunk_size=1048576 Chunk size # --dfs.oclass=STRING DAOS object class # --dfs.dir_oclass=STRING DAOS directory object class # --dfs.prefix=STRING Mount prefix self.dfs_pool_uuid = FormattedParameter("--dfs.pool {}") self.dfs_cont = FormattedParameter("--dfs.cont {}") self.dfs_group = FormattedParameter("--dfs.group {}") self.dfs_destroy = FormattedParameter("--dfs.destroy", True) self.dfs_chunk = FormattedParameter("--dfs.chunk_size {}", 1048576) self.dfs_oclass = FormattedParameter("--dfs.oclass {}", "SX") self.dfs_prefix = FormattedParameter("--dfs.prefix {}") self.dfs_dir_oclass = FormattedParameter("--dfs.dir_oclass {}", "SX") # A list of environment variable names to set and export with ior self._env_names = ["D_LOG_FILE"]
def __init__(self): """Create a dmg pool query command object.""" super(DmgCommandBase.PoolSubCommand.QuerySubCommand, self).__init__("/run/dmg/pool/query/*", "query") self.pool = FormattedParameter("--pool={}", None)
def __init__(self, path=""): """Create an MacsioCommand object. Args: path (str, optional): path to the macsio command. Defaults to "". """ super(MacsioCommand, self).__init__("/run/macsio/*", "macsio", path) # MACSio command parameters - defaults specified in square brackets: # --units_prefix_system %s ["binary"] # Specify which SI units prefix system to use both in reporting # performance data and in interpreting sizing modifiers to # arguments. The options are "binary" and "decimal". For "binary" # unit prefixes, sizes are reported in powers of 1024 and unit # symbols Ki, Mi, Gi, Ti, Pi are used. For "decimal", sizes are # reported in powers of 1000 and unit symbols are Kb, Mb, Gb, Tb, # Pb. See http://en.wikipedia.org/wiki/Binary_prefix. for more # information self.units_prefix_system = FormattedParameter( "--units_prefix_system {}") # --interface %s [miftmpl] # Specify the name of the interface to be tested. Use keyword # 'list' to print a list of all known interface names and then # exit. self.interface = FormattedParameter("--interface {}", "hdf5") # --parallel_file_mode %s %d [MIF 4] # Specify the parallel file mode. There are several choices. Use # 'MIF' for Multiple Independent File (Poor Man's) mode and then # also specify the number of files. Or, use 'MIFFPP' for MIF mode # and one file per processor or 'MIFOPT' for MIF mode and let the # test determine the optimum file count. Use 'SIF' for SIngle # shared File (Rich Man's) mode. If you also give a file count for # SIF mode, then MACSio will perform a sort of hybrid combination # of MIF and SIF modes. It will produce the specified number of # files by grouping ranks in the the same way MIF does, but I/O # within each group will be to a single, shared file using SIF # mode. # # Run macsio with SIF mode. MIF mode uses the HDF5 posix driver, # so it won't go through MPI-IO and hence not through the MPI-IO # DAOS driver. # # Note: Value should be specified as a string of a space- # separated string and integer value, e.g. 'SIF 1'. self.parallel_file_mode = FormattedParameter( "--parallel_file_mode {}", "SIF 1") # --avg_num_parts %f [1] # The average number of mesh parts per MPI rank. Non-integral # values are acceptable. For example, a value that is half-way # between two integers, K and K+1, means that half the ranks have # K mesh parts and half have K+1 mesh parts. As another example, # a value of 2.75 here would mean that 75% of the ranks get 3 # parts and 25% of the ranks get 2 parts. Note that the total # number of parts is this number multiplied by the MPI # communicator size. If the result of that product is # non-integral, it will be rounded and a warning message will be # generated. self.avg_num_parts = FormattedParameter("--avg_num_parts {}") # --mesh_decomp %d %d %d [] # The layout of parts in the mesh overriding the simple # decomposition e.g. 4 8 1 will decompose into 32 parts in the # structure (x y z). # # Note: Value should be specified as a string of three space- # separated integer values, e.g. '4 8 1'. self.mesh_decomp = FormattedParameter("--mesh_decomp {}") # --part_size %d [80000] # Mesh part size in bytes. This becomes the nominal I/O request # size used by each MPI rank when marshalling data. A following # B|K|M|G character indicates 'B'ytes, 'K'ilo-, 'M'ega- or 'G'iga- # bytes representing powers of either 1000 or 1024 according to # the selected units prefix system. With no size modifier # character, 'B' is assumed. Mesh and variable data is then sized # by MACSio to hit this target byte count. However, due to # constraints involved in creating valid mesh topology and # variable data with realistic variation in features (e.g. zone- # and node-centering), this target byte count is hit exactly for # only the most frequently dumped objects and approximately for # other objects. self.part_size = FormattedParameter("--part_size {}") # --part_mesh_dims %d %d %d [] # Specify the number of elements in each dimension per mesh part. # This overrides the part_size parameter and instead allows the # size of the mesh to be determined by dimensions. e.g. 300 300 2, # 300 300 0 (set final dimension to 0 for 2d # # Note: Value should be specified as a string of three space- # separated integer values, e.g. '300 300 2'. self.part_mesh_dims = FormattedParameter("--part_mesh_dims {}") # --part_dim %d [2] # Spatial dimension of parts; 1, 2, or 3 self.part_dim = FormattedParameter("--part_dim {}") # --part_type %s [rectilinear] # Options are 'uniform', 'rectilinear', 'curvilinear', # 'unstructured' and 'arbitrary' (currently, only rectilinear is # implemented) self.part_type = FormattedParameter("--part_type {}") # --part_map %s [] # Specify the name of an ascii file containing part assignments to # MPI ranks. The ith line in the file, numbered from 0, holds the # MPI rank to which the ith part is to be assigned. (currently # ignored) self.part_map = FormattedParameter("--part_map {}") # --vars_per_part %d [20] # Number of mesh variable objects in each part. The smallest this # can be depends on the mesh type. For rectilinear mesh it is 1. # For curvilinear mesh it is the number of spatial dimensions and # for unstructured mesh it is the number of spatial dimensions # plus 2^number of topological dimensions. self.vars_per_part = FormattedParameter("--vars_per_part {}") # --dataset_growth %f [] # The factor by which the volume of data will grow between dump # iterations If no value is given or the value is <1.0 no dataset # changes will take place. self.dataset_growth = FormattedParameter("--dataset_growth {}") # --topology_change_probability %f [0.0] # The probability that the topology of the mesh (e.g. something # fundamental about the mesh's structure) will change between # dumps. A value of 1.0 indicates it should be changed every dump. # A value of 0.0, the default, indicates it will never change. A # value of 0.1 indicates it will change about once every 10 dumps. # Note: at present MACSio will not actually compute/construct a # different topology. It will only inform a plugin that a given # dump should be treated as a change in topology. self.topology_change_probability = FormattedParameter( "--topology_change_probability {}") # --meta_type %s [tabular] # Specify the type of metadata objects to include in each main # dump. Options are 'tabular', 'amorphous'. For tabular type # data, MACSio will generate a random set of tables of somewhat # random structure and content. For amorphous, MACSio will # generate a random hierarchy of random type and sized objects. self.meta_type = FormattedParameter("--meta_type {}") # --meta_size %d %d [10000 50000] # Specify the size of the metadata objects on each processor and # separately, the root (or master) processor (MPI rank 0). The # size is specified in terms of the total number of bytes in the # metadata objects MACSio creates. For example, a type of tabular # and a size of 10K bytes might result in 3 random tables; one # table with 250 unnamed records where each record is an array of # 3 doubles for a total of 6000 bytes, another table of 200 # records where each record is a named integer value where each # name is length 8 chars for a total of 2400 bytes and a 3rd table # of 40 unnamed records where each record is a 40 byte struct # comprised of ints and doubles for a total of 1600 bytes. # # Note: Value should be specified as a string of two space- # separated integer values, e.g. '10000 50000'. self.meta_size = FormattedParameter("--meta_size {}") # --num_dumps %d [10] # The total number of dumps to marshal. self.num_dumps = FormattedParameter("--num_dumps {}") # --max_dir_size %d [] # The maximum number of filesystem objects (e.g. files or # subdirectories) that MACSio will create in any one subdirectory. # This is typically relevant only in MIF mode because MIF mode can # wind up generating many will continue to create output files in # the same directory until it has completed all dumps. Use a value # of zero to force MACSio to put each dump in a separate directory # but where the number of top-level directories is still # unlimited. The result will be a 2-level directory hierarchy with # dump directories at the top and individual dump files in each # directory. A value > 0 will cause MACSio to create a tree-like # directory structure where the files are the leaves and # encompassing dir tree is created such as to maintain the # max_dir_size constraint specified here. For example, if the # value is set to 32 and the MIF file count is 1024, then each # dump will involve a 3-level dir-tree; the top dir containing 32 # sub-dirs and each sub-dir containing 32 of the 1024 files for # the dump. If more than 32 dumps are performed, then the dir-tree # will really be 4 or more levels with the first 32 dumps' # dir-trees going into the first dir, etc. self.max_dir_size = FormattedParameter("--max_dir_size {}") # --compute_work_intensity %d [1] # Add some work in between I/O phases. There are three levels of # 'compute' that can be performed as follows: # Level 1: Perform a basic sleep operation # Level 2: Perform some simple FLOPS with randomly accessed # data # Level 3: Solves the 2D Poisson equation via the Jacobi # iterative method # This input is intended to be used in conjunction with # --compute_time which will roughly control how much time is spent # doing work between iops self.compute_work_intensity = FormattedParameter( "--compute_work_intensity {}") # --compute_time %f [] # A rough lower bound on the number of seconds spent doing work # between I/O phases. The type of work done is controlled by the # --compute_work_intensity input and defaults to Level 1 (basic # sleep). self.compute_time = FormattedParameter("--compute_time {}") # --alignment %d [] # Not currently documented self.alignment = FormattedParameter("--alignment {}") # --filebase %s [macsio] # Basename of generated file(s). self.filebase = FormattedParameter("--filebase {}") # --fileext %s [] # Extension of generated file(s). self.fileext = FormattedParameter("--fileext {}") # --read_path %s [] # Specify a path name (file or dir) to start reading for a read # test. self.read_path = FormattedParameter("--read_path {}") # --num_loads %d [] # Number of loads in succession to test. self.num_loads = FormattedParameter("--num_loads {}") # --no_validate_read [] # Don't validate data on read. self.no_validate_read = FormattedParameter("--no_validate_read", False) # --read_mesh %s [] # Specify mesh name to read. self.read_mesh = FormattedParameter("--read_mesh {}") # --read_vars %s [] # Specify variable names to read. "all" means all variables. If # listing more than one, be sure to either enclose space separated # list in quotes or use a comma-separated list with no spaces self.read_vars = FormattedParameter("--read_vars {}") # --time_randomize [] # Make randomness in MACSio vary from dump to dump and run to run # by using PRNGs seeded by time. self.time_randomize = FormattedParameter("--time_randomize", False) # --plugin_args %n [] # All arguments after this sentinel are passed to the I/O plugin # plugin. The '%n' is a special designator for the builtin 'argi' # value. self.plugin_args = FormattedParameter("--plugin_args {}") # --debug_level %d [0] # Set debugging level (1, 2 or 3) of log files. Higher numbers # mean more frequent and detailed output. A value of zero, the # default, turns all debugging output off. A value of 1 should not # adversely effect performance. A value of 2 may effect # performance and a value of 3 will almost certainly effect # performance. For debug level 3, MACSio will generate ascii json # files from each processor for the main dump object prior to # starting dumps. self.debug_level = FormattedParameter("--debug_level {}") # # Log File Options to control size and shape of log file: # # --log_file_name %s [macsio-log.log] # The name of the log file. self.log_file_name = FormattedParameter( "--log_file_name {}", "macsio-log.log") # --log_line_cnt %d %d [64 0] # Set number of lines per rank in the log file and number of extra # lines for rank 0. self.log_line_cnt = FormattedParameter("--log_line_cnt {}") # --log_line_length %d [128] # Set log file line length. self.log_line_length = FormattedParameter("--log_line_length {}") # --timings_file_name %s [macsio-timings.log] # Specify the name of the timings file. Passing an empty string, # "" will disable the creation of a timings file. self.timings_file_name = FormattedParameter( "--timings_file_name {}", "macsio-timings.log") # # Options specific to the "hdf5" I/O plugin # # --show_errors [] # Show low-level HDF5 errors self.show_errors = FormattedParameter("--show_errors", False) # --compression %s %s [] # The first string argument is the compression algorithm name. The # second string argument is a comma-separated set of params of the # form 'param1=val1,param2=val2,param3=val3. The various algorithm # names and their parameter meanings are described below. Note # that some parameters are not specific to any algorithm. Those # are described first followed by individual algorithm-specific # parameters for those algorithms available in the current build. # # minsize=%d [1024] # minimum size of dataset (in terms of a count of values) # upon which compression will even be attempted # # shuffle=<int> # Boolean (zero or non-zero) to indicate whether to use # HDF5's byte shuffling filter *prior* to compression. # Default depends on algorithm. By default, shuffling is # NOT used for zfp but IS used with all other algorithms. # # Available compression algorithms: # # "zfp" # Use Peter Lindstrom's ZFP compression ( # computation.llnl.gov/casc/zfp) Note: Whether this # compression is available is determined entirely at # run-time using the H5Z-ZFP compressor as a generic # filter. This means all that is necessary is to specify # the HDF5_PLUGIN_PATH environment variable with a path # to the shared lib for the filter. # # The following ZFP options are *mutually*exclusive*. # In any command-line specifying more than one of the # following options, only the last specified will be # honored. # # rate=%f [] # target # bits per compressed output datum. # Fractional values are permitted. 0 selects defaults: # 4 bits/flt or 8 bits/dbl. Use this option to hit a # target compressed size but where error varies. OTOH, # use one of the following two options for fixed error # but amount of compression, if any, varies. # # precision=%d [] # # bits of precision to preserve in each input datum. # # accuracy=%f [] # absolute error tolerance in each output datum. In # many respects, 'precision' represents a sort of # relative error tolerance while 'accuracy' represents # an absolute tolerance. See # http://en.wikipedia.org/wiki/Accuracy_and_precision. # # "gzip" # level=%d [9] # A value in the range [1,9], inclusive, trading off # time to compress with amount of compression. Level=1 # results in best speed but worst compression whereas # level=9 results in best compression but worst speed. # Values outside [1,9] are clamped. # # Examples: # --compression zfp rate=18.5 # --compression gzip minsize=1024,level=9 # --compression szip shuffle=0,options=nn,pixels_per_block=16 self.compression = FormattedParameter("--compression {}") # --no_collective [] # Use independent, not collective, I/O calls in SIF mode. self.no_collective = FormattedParameter("--no_collective", False) # --no_single_chunk [] # Do not single chunk the datasets (currently ignored). self.no_single_chunk = FormattedParameter("--no_single_chunk", False) # --sieve_buf_size %d [] # Specify sieve buffer size (see H5Pset_sieve_buf_size) self.sieve_buf_size = FormattedParameter("--sieve_buf_size {}") # --meta_block_size %d [] # Specify size of meta data blocks (see H5Pset_meta_block_size) self.meta_block_size = FormattedParameter("--meta_block_size {}") # --small_block_size %d [] # Specify threshold size for data blocks considered to be 'small' # (see H5Pset_small_data_block_size) self.small_block_size = FormattedParameter("--small_block_size {}") # --log [] # Use logging Virtual File Driver (see H5Pset_fapl_log) self.log_virtual_file_driver = FormattedParameter("--log {}") # DAOS parameters self.daos_pool = None self.daos_svcl = None self.daos_cont = None # Environment variable names required to be set when running the macsio # command. The values for these names are populated by the # set_environment() method. self._env_names = ["D_LOG_FILE"]
def __init__(self): """Create a dmg storage format command object.""" super(DmgCommandBase.StorageSubCommand.FormatSubCommand, self).__init__("/run/dmg/storage/format/*", "format") self.reformat = FormattedParameter("--reformat", False)
def __init__(self, job, subprocess=False): """Create a Orterun object. Args: job (ExecutableCommand): command object to manage. subprocess (bool, optional): whether the command is run as a subprocess. Defaults to False. """ if not load_mpi("openmpi"): raise CommandFailure("Failed to load openmpi") path = os.path.dirname(find_executable("orterun")) super(Orterun, self).__init__("/run/orterun/*", "orterun", job, path, subprocess) # Default mca values to avoid queue pair errors mca_default = { "btl_openib_warn_default_gid_prefix": "0", "btl": "tcp,self", "oob": "tcp", "pml": "ob1", "btl_tcp_if_include": "eth0", } self.hostfile = FormattedParameter("--hostfile {}", None) self.processes = FormattedParameter("--np {}", 1) self.display_map = FormattedParameter("--display-map", False) self.map_by = FormattedParameter("--map-by {}", "node") self.export = FormattedParameter("-x {}", None) self.enable_recovery = FormattedParameter("--enable-recovery", True) self.report_uri = FormattedParameter("--report-uri {}", None) self.allow_run_as_root = FormattedParameter("--allow-run-as-root", None) self.mca = FormattedParameter("--mca {}", mca_default) self.pprnode = FormattedParameter("--map-by ppr:{}:node", None) self.tag_output = FormattedParameter("--tag-output", True) self.ompi_server = FormattedParameter("--ompi-server {}", None) self.working_dir = FormattedParameter("-wdir {}", None)
def __init__(self): """Create a dmg storage scan command object.""" super(DmgCommandBase.StorageSubCommand.ScanSubCommand, self).__init__("/run/dmg/storage/scan/*", "scan") self.nvme_health = FormattedParameter("--nvme-health", False) self.verbose = FormattedParameter("--verbose", False)
def __init__(self): """Create a daos_agent dump-attachinfo subcommand object.""" super().__init__("/run/daos_agent/dump-attachinfo/*", "dump-attachinfo") self.output = FormattedParameter("--output {}", None)
def __init__(self): """Create a dmg system query command object.""" super(DmgCommandBase.SystemSubCommand.QuerySubCommand, self).__init__("/run/dmg/system/query/*", "query") self.ranks = FormattedParameter("--ranks={}") self.verbose = FormattedParameter("--verbose", False)
def __init__(self): """Create a daos pool set-attr command object.""" super().__init__("set-attr") self.attr = PositionalParameter(2) self.value = PositionalParameter(3) self.sys_name = FormattedParameter("--sys-name={}")
def __init__(self): """Create a dmg system stop command object.""" super(DmgCommandBase.SystemSubCommand.StopSubCommand, self).__init__("/run/dmg/system/stop/*", "stop") self.force = FormattedParameter("--force", False) self.ranks = FormattedParameter("--ranks={}")
def __init__(self): """Create a daos container clone command object.""" super().__init__("/run/daos/container/clone/*", "clone") self.src = FormattedParameter("--src={}") self.dst = FormattedParameter("--dst={}")
def __init__(self, namespace, command): """Create a dcp Command object.""" super().__init__(namespace, command) # dcp options # IO buffer size in bytes (default 64MB) self.blocksize = FormattedParameter("--blocksize {}") # New versions use bufsize instead of blocksize self.bufsize = FormattedParameter("--bufsize {}") # work size per task in bytes (default 64MB) self.chunksize = FormattedParameter("--chunksize {}") # DAOS source pool self.daos_src_pool = FormattedParameter("--daos-src-pool {}") # DAOS destination pool self.daos_dst_pool = FormattedParameter("--daos-dst-pool {}") # DAOS source container self.daos_src_cont = FormattedParameter("--daos-src-cont {}") # DAOS destination container self.daos_dst_cont = FormattedParameter("--daos-dst-cont {}") # DAOS prefix for unified namespace path self.daos_prefix = FormattedParameter("--daos-prefix {}") # DAOS API in {DFS, DAOS} (default uses DFS for POSIX containers) self.daos_api = FormattedParameter("--daos-api {}") # read source list from file self.input_file = FormattedParameter("--input {}") # copy original files instead of links self.dereference = FormattedParameter("--dereference", False) # don't follow links in source self.no_dereference = FormattedParameter("--no-dereference", False) # preserve permissions, ownership, timestamps, extended attributes self.preserve = FormattedParameter("--preserve", False) # open files with O_DIRECT self.direct = FormattedParameter("--direct", False) # create sparse files when possible self.sparse = FormattedParameter("--sparse", False) # print progress every N seconds self.progress = FormattedParameter("--progress {}") # verbose output self.verbose = FormattedParameter("--verbose", False) # quiet output self.quiet = FormattedParameter("--quiet", False) # print help/usage self.print_usage = FormattedParameter("--help", False) # source path self.src_path = BasicParameter(None) # destination path self.dst_path = BasicParameter(None)
def __init__(self): """Create a daos container destroy command object.""" super().__init__("destroy") self.force = FormattedParameter("--force", False)
def __init__(self, namespace, command): """Create a dsync Command object.""" super().__init__(namespace, command) # dsync options # show differences, but do not synchronize files self.dryrun = FormattedParameter("--dryrun", False) # batch files into groups of N during copy self.batch_files = FormattedParameter("--batch-files {}") # IO buffer size in bytes (default 4MB) self.bufsize = FormattedParameter("--blocksize {}") # work size per task in bytes (default 4MB) self.chunksize = FormattedParameter("--chunksize {}") # DAOS prefix for unified namespace path self.daos_prefix = FormattedParameter("--daos-prefix {}") # DAOS API in {DFS, DAOS} (default uses DFS for POSIX containers) self.daos_api = FormattedParameter("--daos-api {}") # read and compare file contents rather than compare size and mtime self.contents = FormattedParameter("--contents", False) # delete extraneous files from target self.delete = FormattedParameter("--delete", False) # copy original files instead of links self.dereference = FormattedParameter("--dereference", False) # don't follow links in source self.no_dereference = FormattedParameter("--no-dereference", False) # open files with O_DIRECT self.direct = FormattedParameter("--direct", False) # hardlink to files in DIR when unchanged self.link_dest = FormattedParameter("--link-dest {}") # create sparse files when possible self.sparse = FormattedParameter("--sparse", False) # print progress every N seconds self.progress = FormattedParameter("--progress {}") # verbose output self.verbose = FormattedParameter("--verbose", False) # quiet output self.quiet = FormattedParameter("--quiet", False) # print help/usage self.print_usage = FormattedParameter("--help", False) # source path self.src_path = BasicParameter(None) # destination path self.dst_path = BasicParameter(None)
def __init__(self): """Create a daos container get-prop command object.""" super().__init__("get-prop") self.prop = FormattedParameter("--properties={}")
def __init__(self): """Create a daos container overwrite-acl command object.""" super(DaosCommand.ContainerSubCommand.OverwriteAclSubCommand, self).__init__("overwrite-acl") self.acl_file = FormattedParameter("--acl-file={}")