def __init__( self, level: Arg.Number( '-l', bound=(0, 0X9), help='Specify a compression level between 0 and 9.') = 9, window: Arg.Number( '-w', bound=(8, 0XF), help='Manually specify the window size between 8 and 15.') = 15, force: Arg.Switch( '-f', help='Decompress as far as possible, even if all known methods fail.' ) = False, zlib_header: Arg.Switch('-z', group='MODE', help='Use a ZLIB header.') = False, gzip_header: Arg.Switch('-g', group='MODE', help='Use a GZIP header.') = False): if zlib_header and gzip_header: raise ValueError( 'You can only specify one header type (ZLIB or GZIP).') return super().__init__(level=level, window=window, force=force, zlib_header=zlib_header, gzip_header=gzip_header)
def __init__( self, key, nonce: Arg(help='The nonce. Default is the string {default}.') = B'REFINERY', magic: Arg('-m', help='The magic constant; depends on the key size by default.') = B'', offset: Arg.Number('-x', help='Optionally specify the stream index, default is {default}.') = 0, rounds: Arg.Number('-r', help='The number of rounds. Has to be an even number.') = 20, ): super().__init__(key=key, nonce=nonce, magic=magic, offset=offset, rounds=rounds)
def __init__( self, *filenames: Arg( metavar='FILEMASK', nargs='+', type=str, help=( 'A list of file masks (with wildcard patterns). Each matching ' 'file will be read from disk and emitted. In addition to glob ' 'patterns, the file mask can include format string expressions ' 'which will be substituted from the current meta variables.')), list: Arg.Switch('-l', help='Only lists files with metadata.') = False, meta: Arg.Switch( '-m', help=('Adds the atime, mtime, ctime, and size metadata variables.' )) = False, size: Arg.Number( '-s', help=( 'If specified, files will be read in chunks of size N and each ' 'chunk is emitted as one element in the output list.')) = 0, linewise: Arg.Switch( '-w', help= ('Read the file linewise. By default, one line is read at a time. ' 'In line mode, the --size argument can be used to read the given ' 'number of lines in each chunk.')) = False): super().__init__(size=size, list=list, meta=meta, linewise=linewise, filenames=filenames)
def __init__( self, spec: Arg(type=str, help='Structure format as explained above.'), *outputs: Arg(metavar='output', type=str, help='Output format as explained above.'), multi: Arg.Switch( '-m', help= ('Read as many pieces of structured data as possible intead of just one.' )) = False, count: Arg.Number( '-n', help= ('A limit on the number of chunks to read in multi mode; default is {default}.' )) = INF, until: Arg( '-u', metavar='E', type=str, help= ('An expression evaluated on each chunk in multi mode. New chunks will be parsed ' 'only if the result is nonzero.')) = None, ): outputs = outputs or [F'{{{_SHARP}}}'] super().__init__(spec=spec, outputs=outputs, until=until, count=count, multi=multi)
def __init__( self, addresses: Arg( type=sliceobj, nargs='+', metavar='start:count:align', help= ('Use Python slice syntax to describe an area of virtual memory to read. If a chunksize is ' 'specified, then the unit will always read a multiple of that number of bytes' )), ascii: Arg.Switch( '-a', group='END', help='Read ASCII strings; equivalent to -th:00') = False, utf16: Arg.Switch( '-u', group='END', help= 'Read UTF16 strings; equivalent to -th:0000 (also sets chunksize to 2)' ) = False, until: Arg.Binary('-t', group='END', help='Read until sequence {varname} is read.') = B'', base: Arg.Number( '-b', metavar='ADDR', help='Optionally specify a custom base address B.') = None, ): if sum(1 for t in (until, utf16, ascii) if t) > 1: raise ValueError( 'Only one of utf16, ascii, and until can be specified.') return super().__init__(addresses=addresses, utf16=utf16, ascii=ascii, until=until, base=base)
def __init__( self, indent: Arg.Number( '-i', help= ('Controls the amount of space characters used for indentation in the output. Default is 4.' )) = 4): return super().__init__(indent=indent)
def __init__( self, chunk_size: Arg.Number( '-c', help= 'Optionally specify the chunk size for compression, default is 0x1000.' ) = 0x1000): super().__init__(chunk_size=chunk_size)
def __init__( self, size: Arg.Number('size', help='Chop data into chunks of this size.'), truncate: Arg.Switch('-t', help=( 'Truncate possible excess bytes at the end of the input, by default they are appended as a single chunk.')) = False, into: Arg.Switch('-i', help=( 'If this flag is specified, the size parameter determines the number of blocks to be produced rather than the size ' 'of each block. In this case, truncation is performed before the data is split.')) = False ): return super().__init__(size=size, into=into, truncate=truncate)
def __init__(self, *count: Arg.Number( metavar='count', help=( 'The number of times every byte should be repeated. By default, ' 'every byte is repeated once.'))): count = count or (2, ) if any(k <= 0 for k in count): raise ValueError( 'You can not use a stretching factor of less than 1.') super().__init__(count=count or (2, ))
def __init__( self, search: Arg(help='This is the search term.'), replace: Arg( help= 'The substitution string. Leave this empty to remove all occurrences of the search term.' ) = B'', count: Arg.Number( '-n', help='Only replace the given number of occurrences') = -1): super().__init__(search=search, replace=replace, count=count)
def __init__( self, indent: Arg.Number( '-i', help= ('Controls the amount of space characters used for indentation in the output. Default is 4.' )) = 4, header: Arg.Switch( '-x', help='Add an XML header to the formatted output.') = False): super().__init__(indent=indent, header=header)
def __init__( self, tabular: Arg.Switch( '-t', group='OUT', help='Convert JSON input into a flattened table.') = False, indent: Arg.Number( '-i', group='OUT', help='Number of spaces used for indentation. Default is {default}.' ) = 4): return super().__init__(indent=indent, tabular=tabular)
def __init__( self, consecutive: Arg.Switch('-c', help='Assume that the repeating pattern is consecutive when observable.') = False, align: Arg.Switch('-d', help='Assume that the pattern occurs at offsets that are multiples of its length.') = False, min: Arg.Number('-n', help='Minimum size of the pattern to search for. Default is {default}.') = 1, max: Arg.Number('-N', help='Maximum size of the pattern to search for. Default is {default}.') = INF, len: Arg.Number('-l', help='Set the exact size of the pattern. This is equivalent to --min=N --max=N.') = None, all: Arg.Switch('-a', help='Produce one output for each repeating pattern that was detected.') = False, threshold: Arg.Number('-t', help='Patterns must match this performance threshold in percent, lest they be discarded.') = 20, weight: Arg.Number('-w', help='Specifies how much longer patterns are favored over small ones. Default is {default}.') = 0, buffer: Arg.Number('-b', group='BFR', help='Maximum number of bytes to inspect at once. The default is {default}.') = 1024, chug : Arg.Switch('-g', group='BFR', help='Compute the prefix tree for the entire buffer instead of chunking it.') = False ): if len is not None: min = max = len super().__init__( min=min, max=max, all=all, consecutive=consecutive, align=align, weight=weight, buffer=buffer, chug=chug, threshold=threshold )
def __init__( self, regex: Arg(type=regexp, help='Regular expression to match.'), multiline: Arg.Switch('-M', help='Caret and dollar match the beginning and end of a line, a dot does not match line breaks.') = False, ignorecase: Arg.Switch('-I', help='Ignore capitalization for alphabetic characters.') = False, count: Arg.Number('-c', help='Specify the maximum number of operations to perform.') = 0, **keywords ): flags = re.MULTILINE if multiline else re.DOTALL if ignorecase: flags |= re.IGNORECASE super().__init__(regex=regex, flags=flags, count=count, **keywords)
def __init__( self, width: Arg( 'width', help= 'Optionally specify the width, by default the current terminal width is used.' ) = 0, delta: Arg.Number( '-d', help='Subtract this number from the calculated width (0 by default).' ) = 0, ): super().__init__(width=width, delta=delta)
def __init__( self, padding: Arg( 'padding', help= ('This custom binary sequence is used (repeatedly, if necessary) ' 'to pad the input. The default is a zero byte.')) = B'\0', absolute: Arg.Number( '-a', group='HOW', help='Pad inputs to be at least N bytes in size.') = 0, blocksize: Arg.Number( '-b', group='HOW', help='Pad inputs to any even multiple of N.') = 0, left: Arg.Switch( '-l', help='Pad on the left instead of the right.') = False): if absolute and blocksize: raise ValueError( 'Cannot pad simultaneously to a given block size and absolutely.' ) self.superinit(super(), **vars()) self._maxlen = None
def __init__( self, min : Arg.Number('-n', help='Matches must have length at least N.') = 1, max : Arg.Number('-m', help='Matches must have length at most N.') = None, len : Arg.Number('-e', help='Matches must be of length N.') = None, stripspace : Arg.Switch('-x', help='Strip all whitespace from input data.') = False, duplicates : Arg.Switch('-r', help='Yield every (transformed) Match, even when it was found before.') = False, longest : Arg.Switch('-l', help='Sort results by length.') = False, take : Arg.Number('-t', help='Return only the first N occurrences in order of appearance.') = None, **keywords ): keywords.setdefault('ascii', True) keywords.setdefault('utf16', True) super().__init__( min=min, max=max or INF, len=len or AST, stripspace=stripspace, duplicates=duplicates, longest=longest, take=take or INF, **keywords )
def __init__( self, bigendian: Arg.Switch('-E', help='Read chunks in big endian.') = False, blocksize: Arg.Number( '-B', help='The size of each block in bytes, default is 1.') = 1, precision: Arg. Number( '-P', help= ('The size of the variables used for computing the result. By default, this is equal to the block size. The value may be ' 'zero, indicating that arbitrary precision is required.')) = None, **keywords): if blocksize < 1: raise ValueError('Block size can not be less than 1.') if precision is None: precision = blocksize super().__init__(bigendian=bigendian, blocksize=blocksize, precision=precision, **keywords)
def __init__( self, size: Arg(help='The number of bytes to generate.', type=number), salt: Arg(help='Salt for the derivation.'), hash: Arg. Option( choices=HASH, metavar='hash', help= 'Specify one of these algorithms (default is {default}): {choices}' ) = None, iter: Arg.Number( metavar='iter', help='Number of iterations; default is {default}.') = None, **kw): if hash is not None: name = Arg.AsOption(hash, HASH) hash = importlib.import_module(F'Crypto.Hash.{name}') return super().__init__(salt=salt, size=size, iter=iter, hash=hash, **kw)
def __init__( self, prepend: Arg.Switch('-P', '--no-prepend', off=True, help=( 'By default, if decompression fails, the unit attempts to prefix ' 'the data with all possible values of a single byte and decompress ' 'the result. This behavior can be disabled with this flag.') ) = True, tolerance: Arg.Number('-t', help=( 'Maximum number of bytes to strip from the beginning of the data; ' 'The default value is 12.') ) = 12, min_ratio: Arg('-r', metavar='R', help=( 'To determine whether a decompression algorithm was successful, the ' 'ratio of compressed size to decompressed size is required to be at ' 'least this number, a floating point value R; default value is 1.') ) = 1, ): if min_ratio <= 0: raise ValueError('The compression factor must be nonnegative.') super().__init__(tolerance=tolerance, prepend=prepend, min_ratio=min_ratio) self.engines = [ engine() for engine in [szdd, zl, lzma, aplib, jcalg, bz2, blz, lzjb, lz4, lzo, lznt1] ]
def __init__( self, amount: Arg.Number( help='Number of letters to rotate by; Default is 13.') = 13): super().__init__(amount=amount)
def __init__(self, size: Arg.Number( help='Size of each group; must be at least 2.', bound=(2, None))): super().__init__(size=size)