def __init__( self, addresses: Arg( type=sliceobj, nargs='+', metavar='start:count:align', help= ('Use Python slice syntax to describe an area of virtual memory to read. If a chunksize is ' 'specified, then the unit will always read a multiple of that number of bytes' )), ascii: Arg.Switch( '-a', group='END', help='Read ASCII strings; equivalent to -th:00') = False, utf16: Arg.Switch( '-u', group='END', help= 'Read UTF16 strings; equivalent to -th:0000 (also sets chunksize to 2)' ) = False, until: Arg.Binary('-t', group='END', help='Read until sequence {varname} is read.') = B'', base: Arg.Number( '-b', metavar='ADDR', help='Optionally specify a custom base address B.') = None, ): if sum(1 for t in (until, utf16, ascii) if t) > 1: raise ValueError( 'Only one of utf16, ascii, and until can be specified.') return super().__init__(addresses=addresses, utf16=utf16, ascii=ascii, until=until, base=base)
def __init__( self, level: Arg.Number( '-l', bound=(0, 0X9), help='Specify a compression level between 0 and 9.') = 9, window: Arg.Number( '-w', bound=(8, 0XF), help='Manually specify the window size between 8 and 15.') = 15, force: Arg.Switch( '-f', help='Decompress as far as possible, even if all known methods fail.' ) = False, zlib_header: Arg.Switch('-z', group='MODE', help='Use a ZLIB header.') = False, gzip_header: Arg.Switch('-g', group='MODE', help='Use a GZIP header.') = False): if zlib_header and gzip_header: raise ValueError( 'You can only specify one header type (ZLIB or GZIP).') return super().__init__(level=level, window=window, force=force, zlib_header=zlib_header, gzip_header=gzip_header)
def __init__( self, blocks: Arg. Number( '-B', help= 'Group hexadecimal bytes in blocks of the given size; default is {default}.' ) = 1, dense: Arg.Switch('-D', help='Do not insert spaces in hexdump.') = False, expand: Arg.Switch( '-E', help='Do not compress sequences of identical lines in hexdump' ) = False, narrow: Arg.Switch( '-N', help='Do not show addresses in hexdump') = False, width: Arg. Number( '-W', help= 'Specify the number of hexadecimal characters to use in preview.' ) = 0, **kwargs): super().__init__(blocks=blocks, dense=dense, expand=expand, narrow=narrow, width=width, **kwargs)
def __init__( self, consecutive: Arg.Switch('-c', help='Assume that the repeating pattern is consecutive when observable.') = False, align: Arg.Switch('-d', help='Assume that the pattern occurs at offsets that are multiples of its length.') = False, min: Arg.Number('-n', help='Minimum size of the pattern to search for. Default is {default}.') = 1, max: Arg.Number('-N', help='Maximum size of the pattern to search for. Default is {default}.') = INF, len: Arg.Number('-l', help='Set the exact size of the pattern. This is equivalent to --min=N --max=N.') = None, all: Arg.Switch('-a', help='Produce one output for each repeating pattern that was detected.') = False, threshold: Arg.Number('-t', help='Patterns must match this performance threshold in percent, lest they be discarded.') = 20, weight: Arg.Number('-w', help='Specifies how much longer patterns are favored over small ones. Default is {default}.') = 0, buffer: Arg.Number('-b', group='BFR', help='Maximum number of bytes to inspect at once. The default is {default}.') = 1024, chug : Arg.Switch('-g', group='BFR', help='Compute the prefix tree for the entire buffer instead of chunking it.') = False ): if len is not None: min = max = len super().__init__( min=min, max=max, all=all, consecutive=consecutive, align=align, weight=weight, buffer=buffer, chug=chug, threshold=threshold )
def __init__( self, *filenames: Arg( metavar='FILEMASK', nargs='+', type=str, help=( 'A list of file masks (with wildcard patterns). Each matching ' 'file will be read from disk and emitted. In addition to glob ' 'patterns, the file mask can include format string expressions ' 'which will be substituted from the current meta variables.')), list: Arg.Switch('-l', help='Only lists files with metadata.') = False, meta: Arg.Switch( '-m', help=('Adds the atime, mtime, ctime, and size metadata variables.' )) = False, size: Arg.Number( '-s', help=( 'If specified, files will be read in chunks of size N and each ' 'chunk is emitted as one element in the output list.')) = 0, linewise: Arg.Switch( '-w', help= ('Read the file linewise. By default, one line is read at a time. ' 'In line mode, the --size argument can be used to read the given ' 'number of lines in each chunk.')) = False): super().__init__(size=size, list=list, meta=meta, linewise=linewise, filenames=filenames)
def __init__( self, base: Arg( type=numseq, metavar='base|alphabet', help= (R'Either the base to be used or an alphabet. If an explicit alphabet is given, its length ' R'determines the base. The default base 0 treats the input as a Python integer literal. If ' F'a numeric base is given, digits from the alphabet "{_DEFAULT_ALPH_STR}" are used. ' )) = 0, strip_padding: Arg.Switch( '-s', help='Do not add leading zeros to the output.') = False, little_endian: Arg.Switch( '-e', help='Use little endian byte order instead of big endian.' ) = False, strict_digits: Arg.Switch( '-d', help='Check that all input digits are part of the alphabet.' ) = False, ): super().__init__( base=base, strip_padding=strip_padding, little_endian=little_endian, strict_digits=strict_digits, )
def __init__( self, size: Arg.Number('size', help='Chop data into chunks of this size.'), truncate: Arg.Switch('-t', help=( 'Truncate possible excess bytes at the end of the input, by default they are appended as a single chunk.')) = False, into: Arg.Switch('-i', help=( 'If this flag is specified, the size parameter determines the number of blocks to be produced rather than the size ' 'of each block. In this case, truncation is performed before the data is split.')) = False ): return super().__init__(size=size, into=into, truncate=truncate)
def __init__( self, certificate: Arg.Switch('--no-cert', '-c', help='Do not include digital signatures for the size computation.') = True, directories: Arg.Switch('--no-dirs', '-d', help='Do not include any data directories for size computation (implies --no-cert).') = True, memdump: Arg.Switch('-m', help='Assume that the file data was a memory-mapped PE file.') = False, ): super().__init__(certificate=certificate, directories=directories, memdump=memdump)
def __init__( self, regex: Arg(type=regexp, help='Regular expression to match.'), multiline: Arg.Switch('-M', help='Caret and dollar match the beginning and end of a line, a dot does not match line breaks.') = False, ignorecase: Arg.Switch('-I', help='Ignore capitalization for alphabetic characters.') = False, count: Arg.Number('-c', help='Specify the maximum number of operations to perform.') = 0, **keywords ): flags = re.MULTILINE if multiline else re.DOTALL if ignorecase: flags |= re.IGNORECASE super().__init__(regex=regex, flags=flags, count=count, **keywords)
def __init__( self, url_only: Arg.Switch( '-u', help='Only defang URLs, do not look for domains or IPs.') = False, url_protocol: Arg.Switch('-p', help='Escape the protocol in URLs.') = False, dot_only: Arg.Switch( '-d', help='Do not escape the protocol colon in URLs.') = False, quote_md: Arg.Switch( '-q', help='Wrap all indicators in backticks for markdown code.') = False ): self.superinit(super(), **vars())
def __init__( self, *junk: Arg( help= 'Binary strings to be removed, default are all whitespace characters.' ), left: Arg.Switch('-r', '--right-only', group='SIDE', help='Do not trim left.') = True, right: Arg.Switch('-l', '--left-only', group='SIDE', help='Do not trim right.') = True): super().__init__(junk=junk, left=left, right=right)
def __init__( self, spec: Arg(type=str, help='Structure format as explained above.'), *outputs: Arg(metavar='output', type=str, help='Output format as explained above.'), multi: Arg.Switch( '-m', help= ('Read as many pieces of structured data as possible intead of just one.' )) = False, count: Arg.Number( '-n', help= ('A limit on the number of chunks to read in multi mode; default is {default}.' )) = INF, until: Arg( '-u', metavar='E', type=str, help= ('An expression evaluated on each chunk in multi mode. New chunks will be parsed ' 'only if the result is nonzero.')) = None, ): outputs = outputs or [F'{{{_SHARP}}}'] super().__init__(spec=spec, outputs=outputs, until=until, count=count, multi=multi)
def __init__( self, key, stateful: Arg.Switch('-s', help='Do not reset the key stream while processing the chunks of one frame.') = False, **keywords ): super().__init__(key=key, stateful=stateful, **keywords) self._keystream = None
def __init__( self, user: Arg.Switch('-m', '--meta', off=True, group='HEAP', help='Only extract from #Strings.') = True, meta: Arg.Switch('-u', '--user', off=True, group='HEAP', help='Only extract from #US.') = True, ): if not meta and not user: raise ValueError('Either ascii or utf16 strings must be enabled.') super().__init__(meta=meta, user=user)
def __init__( self, *commandline: Arg( nargs='...', type=str, metavar='(all remaining)', help= ('All remaining command line tokens form an arbitrary command line to be executed. Use format string syntax ' 'to insert meta variables and incoming data chunks.')), buffer: Arg.Switch( '-b', help= 'Buffer the command output for one execution rather than streaming it.' ) = False, noerror: Arg( '-e', help= 'do not merge stdin and stderr; stderr will only be output if -v is also specified.' ) = False, timeout: Arg( '-t', metavar='T', help= 'Set an execution timeout as a floating point number in seconds, there is none by default.' ) = 0.0): if not commandline: raise ValueError('you need to provide a command line.') super().__init__(commandline=commandline, noerror=noerror, buffer=buffer, timeout=timeout)
def __init__( self, min=1, max=None, len=None, stripspace=False, duplicates=False, longest=False, take=None, ascii: Arg.Switch('-u', '--no-ascii', group='AvsU', help='Search for UTF16 encoded patterns only.') = True, utf16: Arg.Switch('-a', '--no-utf16', group='AvsU', help='Search for ASCII encoded patterns only.') = True, **keywords ): super().__init__( min=min, max=max, len=len, stripspace=stripspace, duplicates=duplicates, longest=longest, take=take, ascii=ascii, utf16=utf16, **keywords )
def __init__(self, *slice, visible: Arg.Switch('-n', '--not', off=True, help=( 'Hide the given chunks instead of making them the only ones visible.')) = True ): super().__init__(*slice, visible=visible) # Sort any slices with negative arguments to the back so we check # them last. This delays potential consumption of the chunks iterator # as much as possible. self.args.slice.sort( key=lambda s: (s.start or 0, s.stop or 0), reverse=True)
def __init__( self, *paths: Arg(metavar='path', nargs='*', default=(), type=pathspec, help= ('Wildcard pattern for the name of the item to be extracted. Each item is returned' ' as a separate output of this unit. Paths may contain wildcards. The default is ' 'a single wildcard, which means that every item will be extracted.' )), list: Arg.Switch( '-l', help='Return all matching paths as UTF8-encoded output chunks.' ) = False, join_path: Arg.Switch( '-j', group='PATH', help='Join path names from container with previous path names.' ) = False, drop_path: Arg.Switch( '-d', group='PATH', help='Do not modify the path variable for output chunks.' ) = False, regex: Arg.Switch( '-r', help='Use regular expressions instead of wildcard patterns.' ) = False, path: Arg('-P', metavar='NAME', help= 'Name of the meta variable to receive the extracted path. The default value is "{default}".' ) = b'path', **keywords): super().__init__(paths=paths, list=list, join=join_path, drop=drop_path, path=path, regex=regex, **keywords)
def __init__( self, slices: Arg(help='Specify start:stop:step in Python slice syntax.') = [ slice(None, None) ], remove: Arg.Switch( '-r', help='Remove the slices from the input rather than selecting them.' ) = False): super().__init__(slices=slices, remove=remove)
def __init__( self, marker: Arg.Switch( '-m', '--no-marker', off=True, help= ('Do not require magic marker when encoding and do not search for ' 'marker when decoding.')) = True): super().__init__(marker=marker)
def __init__( self, public: Arg.Switch( '-p', help='Force public key output even if the input is private.' ) = False, output: Arg( help= 'Select an output format (PEM/DER/XKMS/TEXT/JSON), default is PEM.' ) = RSAFormat.PEM): super().__init__(public=public, output=Arg.AsOption(output, RSAFormat))
def __init__( self, separator: Arg(help='Separator; the default is a line break.') = B'\n', scoped: Arg.Switch( '-s', help= ('Maintain chunk scope; i.e. do not turn all input chunks visible.' )) = False): super().__init__(separator=separator, scoped=scoped) self.separate = False
def __init__( self, indent: Arg.Number( '-i', help= ('Controls the amount of space characters used for indentation in the output. Default is 4.' )) = 4, header: Arg.Switch( '-x', help='Add an XML header to the formatted output.') = False): super().__init__(indent=indent, header=header)
def __init__( self, key: Arg(type=str, help='The encryption key'), alphabet: Arg( help='The alphabet, by default the Latin one is used: "{default}"' ) = 'abcdefghijklmnopqrstuvwxyz', operator: Arg.Choice( '-:', choices=['add', 'sub', 'xor'], metavar='OP', help= ('Choose the vigenere block operation. The default is {default}, and the available options are: {choices}' )) = 'add', case_sensitive: Arg.Switch( '-c', help= ('Unless this option is set, the key will be case insensitive. Uppercase letters from the input are transformed ' 'using the same shift as would be the lowercase variant, but case is retained.' )) = False, ignore_unknown: Arg.Switch( '-i', help= ('Unless this option is set, the key stream will be iterated even ' 'for letters that are not contained in the alphabet.')) = False): if not callable(operator): operator = { 'add': __add__, 'sub': __sub__, 'xor': __xor__, }.get(operator.lower(), None) if operator is None: raise ValueError( F'The value {operator!r} is not valid as an operator.') if not case_sensitive: key = key.lower() alphabet = alphabet.lower() if len(set(alphabet)) != len(alphabet): raise ValueError('Duplicate entries detected in alphabet.') if not set(key) <= set(alphabet): raise ValueError( 'key contains letters which are not from the given alphabet') self.superinit(super(), **vars())
def __init__( self, *files: Arg(metavar='file', type=str, help='Optionally formatted filename.'), tee: Arg.Switch('-t', help='Forward all inputs to STDOUT.') = False, stream: Arg.Switch( '-s', help='Dump all incoming data to the same file.') = False, plain: Arg.Switch( '-p', help='Never apply any formatting to file names.') = False, force: Arg.Switch( '-f', help='Remove files if necessary to create dump path.') = False, ): if stream and len(files) != 1: raise ValueError('Can only use exactly one file in stream mode.') super().__init__(files=files, tee=tee, stream=stream, force=force) self.stream = None self._formatted = not plain and any(self._has_format(f) for f in files) self._reset()
def __init__( self, variable: Arg( help='The variable which is used as the accumulator') = 'count', relative: Arg.Switch( '-r', help='Normalize the accumulator to a number between 0 and 1.' ) = False): super().__init__(variable=variable, relative=relative) self._trunk = None self._store = collections.defaultdict(int)
def __init__( self, tabular: Arg.Switch( '-t', group='OUT', help='Convert JSON input into a flattened table.') = False, indent: Arg.Number( '-i', group='OUT', help='Number of spaces used for indentation. Default is {default}.' ) = 4): return super().__init__(indent=indent, tabular=tabular)
def __init__( self, hex: Arg.Switch( '-x', help='Hex encode everything, do not use C escape sequences.' ) = False, unicode: Arg.Switch( '-u', help='Use unicode escape sequences and UTF-8 encoding.') = False, greedy: Arg.Switch( '-g', help= 'Replace \\x by x and \\u by u when not followed by two or four hex digits, respectively.' ) = False, quoted: Arg.Switch( '-q', help= 'Remove enclosing quotes while decoding and add them for encoding.' ) = False, bare: Arg.Switch('-b', help='Do not escape quote characters.') = False, expand: Arg.Switch( '-p', help= 'Decode sequences of the form \\uHHLL as two bytes when the upper byte is nonzero.' ) = False, ) -> Unit: pass # noqa
def __init__( self, key: Arg(help='RSA key in PEM, DER, or Microsoft BLOB format.'), swapkeys: Arg.Switch('-s', help='Swap public and private exponent.') = False, textbook: Arg.Switch('-t', group='PAD', help='Equivalent to --padding=NONE.') = False, padding: Arg.Option( '-p', group='PAD', choices=PAD, help= 'Choose one of the following padding modes: {choices}. The default is AUTO.' ) = PAD.AUTO, rsautl: Arg.Switch( '-r', group='PAD', help= 'Act as rsautl from OpenSSH; This is equivalent to --swapkeys --padding=PKCS10' ) = False, ): padding = Arg.AsOption(padding, PAD) if textbook: if padding != PAD.AUTO: raise ValueError('Conflicting padding options!') padding = padding.NONE if rsautl: if padding and padding != PAD.PKCS10: raise ValueError('Conflicting padding options!') swapkeys = True padding = PAD.PKCS10 super().__init__(key=key, textbook=textbook, padding=padding, swapkeys=swapkeys) self._key_hash = None self._key_data = None
def __init__( self, min : Arg.Number('-n', help='Matches must have length at least N.') = 1, max : Arg.Number('-m', help='Matches must have length at most N.') = None, len : Arg.Number('-e', help='Matches must be of length N.') = None, stripspace : Arg.Switch('-x', help='Strip all whitespace from input data.') = False, duplicates : Arg.Switch('-r', help='Yield every (transformed) Match, even when it was found before.') = False, longest : Arg.Switch('-l', help='Sort results by length.') = False, take : Arg.Number('-t', help='Return only the first N occurrences in order of appearance.') = None, **keywords ): keywords.setdefault('ascii', True) keywords.setdefault('utf16', True) super().__init__( min=min, max=max or INF, len=len or AST, stripspace=stripspace, duplicates=duplicates, longest=longest, take=take or INF, **keywords )