def main(): import argparse parser = argparse.ArgumentParser(description='zipdump - scan file contents for PKZIP data', epilog='zipdump can quickly scan a zip from an URL without downloading the complete archive') parser.add_argument('--verbose', '-v', action='count') parser.add_argument('--cat', '-c', type=str, help='decompress file to stdout') parser.add_argument('--print', '-p', type=str, help='print raw file data to stdout') parser.add_argument('--recurse', '-r', action='store_true', help='recurse into directories') parser.add_argument('--skiplinks', '-L', action='store_true', help='skip symbolic links') parser.add_argument('--offset', '-o', type=int, help='start processing at offset') parser.add_argument('--length', '-l', type=int, help='max length of data to process') parser.add_argument('--quick', '-q', action='store_true', help='Quick dir scan. This is quick with URLs as well.') parser.add_argument('--chunksize', type=int, default=1024*1024) parser.add_argument('FILES', type=str, nargs='*', help='Files or URLs') args = parser.parse_args() if args.FILES: for fn in EnumeratePaths(args, args.FILES): print("==> ", fn, " <==") try: if fn.find("://") in (3,4,5): # when argument looks like a url, use urlstream to open import urlstream with urlstream.open(fn) as fh: processfile(args, fh) else: with open(fn, "rb") as fh: processfile(args, fh) except Exception as e: print("ERROR: %s" % e) raise else: processfile(args, sys.stdin.buffer)
def main(): def auto_int(x): return int(x, 0) import argparse parser = argparse.ArgumentParser(description='fatdump') parser.add_argument('--verbose', '-v', action='count', default=0) parser.add_argument('--offset', '-o', type=auto_int, default=0) parser.add_argument( '--recurse', '-r', action='store_true', help='recurse into directories, when finding disk images') parser.add_argument('--skiplinks', '-L', action='store_true', help='ignore symlinks') parser.add_argument('--listfiles', '-l', action='store_true', help='list files') parser.add_argument('--badblocks', type=str, help='bad sector nrs') parser.add_argument('--blocksize', type=str, help='the blocksize') parser.add_argument('--cat', '-c', type=str, help='cat a file to stdout') parser.add_argument('--debug', action='store_true') parser.add_argument('FILES', type=str, nargs='+', help='Files or URLs') # todo: add partition selector # todo: extract files # todo: create image args = parser.parse_args() if args.FILES: for fn in EnumeratePaths(args, args.FILES): if fn == '-': import sys fs = FatFilesystem(args, makereader(args, fh)) processfs(args, fs) else: print("==>", fn, "<==") try: if fn.find("://") in (3, 4, 5): import urlstream with urlstream.open(fn) as fh: fs = FatFilesystem(args, makereader(args, fh)) processfs(args, fs) else: with open(fn, "rb") as fh: fs = FatFilesystem(args, makereader(args, fh)) processfs(args, fs) except Exception as e: print("ERROR: %s" % e) if args.debug: raise
def main(): import argparse parser = argparse.ArgumentParser(description='zipdump - scan file contents for PKZIP data', epilog='zipdump can quickly scan a zip from an URL without downloading the complete archive') parser.add_argument('--verbose', '-v', action='count') parser.add_argument('--quiet', action='store_true') parser.add_argument('--cat', '-c', nargs='*', type=str, help='decompress file(s) to stdout') parser.add_argument('--raw', '-p', nargs='*', type=str, help='print raw compressed file(s) data to stdout') parser.add_argument('--save', '-s', nargs='*', type=str, help='extract file(s) to the output directory') parser.add_argument('--outputdir', '-d', type=str, help='the output directory, default = curdir', default='.') parser.add_argument('--quick', '-q', action='store_true', help='Quick dir scan. This is quick with URLs as well.') parser.add_argument('--recurse', '-r', action='store_true', help='recurse into directories') parser.add_argument('--skiplinks', '-L', action='store_true', help='skip symbolic links') parser.add_argument('--offset', '-o', type=int, help='start processing at offset') parser.add_argument('--length', '-l', type=int, help='max length of data to process') parser.add_argument('--chunksize', type=int, default=1024*1024) parser.add_argument('--dumpraw', action='store_true', help='hexdump raw compressed data') parser.add_argument('--password', type=str, help="Password for pkzip decryption") parser.add_argument('--hexpassword', type=str, help="hexadecimal password for pkzip decryption") parser.add_argument('--keys', type=str, help="internal key representation for pkzip decryption") parser.add_argument('FILES', type=str, nargs='*', help='Files or URLs') args = parser.parse_args() use_raw = args.cat or args.raw or args.save if args.hexpassword: args.password = binascii.a2b_hex(args.hexpassword) elif args.keys: args.password = list(int(_, 0) for _ in args.keys.split(",")) elif args.password: args.password = args.password.encode('utf-8') if args.FILES: for fn in EnumeratePaths(args, args.FILES): if len(args.FILES)>1 and not args.quiet: print("\n==> " + fn + " <==\n") try: if fn.find("://") in (3,4,5): # when argument looks like a url, use urlstream to open import urlstream with urlstream.open(fn) as fh: processfile(args, fh) else: with open(fn, "rb") as fh: processfile(args, fh) except Exception as e: print("ERROR: %s" % e) raise else: processfile(args, sys.stdin.buffer)
def main(): parser = argparse.ArgumentParser( description='webdump - hexdump data via HTTP', ) parser.add_argument('-o', '--offset', type=str, help='Offset into web resource.') parser.add_argument('--debug', action='store_true') parser.add_argument('-l', '--length', type=str, help='number of bytes to output') parser.add_argument('-e', '--end', type=str, help='Offset after last offset to process') parser.add_argument('-s', '--step', type=str, help='Step size') parser.add_argument('-w', '--width', type=str, help='nr of items per line') parser.add_argument('-x', action='store_const', dest='outputmode', const='hexonly', help='only hex, no asc') parser.add_argument('-xx', action='store_const', dest='outputmode', const='asconly', help='only asc, no hex') parser.add_argument('-8', action='store_const', dest='elemsize', const=8, help='output as 64 bit qwords') parser.add_argument('-4', action='store_const', dest='elemsize', const=4, help='output as 32 bit dwords') parser.add_argument('-2', action='store_const', dest='elemsize', const=2, help='output as 16 bit hwords') parser.add_argument('-1', action='store_const', dest='elemsize', const=1, help='output as 8 bit bytes') parser.add_argument('URLS', type=str, nargs='+', help='The web resources we are interested in.') args = parser.parse_args() if args.debug: urlstream.debuglog = True if args.outputmode == 'hexonly': args.with_hexdump = True args.with_ascdump = False elif args.outputmode == 'asconly': args.with_hexdump = False args.with_ascdump = True else: args.with_hexdump = args.with_ascdump = True # convert 0xHEX or DEC strings to number for key in ('offset', 'length', 'end', 'step', 'width'): if key in args and getattr(args, key) is not None: setattr(args, key, int(getattr(args, key), 0)) for fn in args.URLS: if len(args.URLS) > 1: print("==> %s <==" % fn) if fn.find("://") in (3, 4, 5): with urlstream.open(fn) as fh: processstream(args, fh) else: with open(fn, "rb") as fh: processstream(args, fh)
def main(): import argparse class MultipleOptions(argparse.Action): """ Helper class for supporting multiple options of the same name with argparse. --xyz ARG1 --xyz ARG2 will result in an array value args.xyz = [ 'AGRG1', 'ARG2' ] """ def __init__(self, option_strings, dest, nargs=None, **kwargs): super(MultipleOptions, self).__init__(option_strings, dest, **kwargs) def __call__(self, parser, namespace, values, option_string=None): arr = getattr(namespace, self.dest) if arr is None: arr = [] setattr(namespace, self.dest, arr) arr.append(values) parser = argparse.ArgumentParser( description='zipdump - scan file contents for PKZIP data', epilog= 'zipdump can quickly scan a zip from an URL without downloading the complete archive' ) parser.add_argument('--verbose', '-v', action='count') parser.add_argument('--quiet', action='store_true') parser.add_argument('--debug', action='store_true') parser.add_argument('--cat', '-c', type=str, action=MultipleOptions, help='decompress file to stdout') parser.add_argument('--raw', '-p', type=str, action=MultipleOptions, help='print raw compressed file data to stdout') parser.add_argument('--save', '-s', type=str, action=MultipleOptions, help='extract file to the output directory') parser.add_argument('--outputdir', '-d', type=str, help='the output directory, default = curdir', default='.') parser.add_argument('--extract', '-e', action='store_true', help='Extract all files') parser.add_argument( '--strip', '-j', type=int, help='strip N initial parts from pathnames before saving') parser.add_argument( '--quick', '-q', action='store_true', help='Quick dir scan. This is quick with URLs as well.') parser.add_argument('--preserve', '-P', action='store_true', help="preserve permissions and timestamps") parser.add_argument('--recurse', '-r', action='store_true', help='recurse into directories') parser.add_argument('--skiplinks', '-L', action='store_true', help='skip symbolic links') parser.add_argument('--offset', '-o', type=int, help='start processing at offset') parser.add_argument('--length', '-l', type=int, help='max length of data to process') parser.add_argument('--chunksize', type=int, default=1024 * 1024) parser.add_argument('--pretty', action='store_true', help='make output easier to read') parser.add_argument('--dumpraw', action='store_true', help='hexdump raw compressed data') parser.add_argument('--limit', type=str, help='limit raw dump output') parser.add_argument('--password', type=str, help="Password for pkzip decryption") parser.add_argument('--hexpassword', type=str, help="hexadecimal password for pkzip decryption") parser.add_argument( '--keys', type=str, help="internal key representation for pkzip decryption") parser.add_argument('FILES', type=str, nargs='*', help='Files or URLs') args = parser.parse_args() use_raw = args.cat or args.raw or args.save if args.limit: args.limit = int(args.limit, 0) if args.hexpassword: args.password = binascii.a2b_hex(args.hexpassword) elif args.keys: args.password = list(int(_, 0) for _ in args.keys.split(",")) elif args.password: args.password = args.password.encode('utf-8') if args.FILES: for fn in EnumeratePaths(args, args.FILES): if len(args.FILES) > 1 and not args.quiet: print("\n==> " + fn + " <==\n") try: if fn.find("://") in (3, 4, 5): # when argument looks like a url, use urlstream to open import urlstream with urlstream.open(fn) as fh: processfile(args, fh) else: with open(fn, "rb") as fh: processfile(args, fh) except Exception as e: print("ERROR: %s" % e) if args.debug: raise else: processfile(args, sys.stdin.buffer)
def main(): import argparse parser = argparse.ArgumentParser( description='zipdump - scan file contents for PKZIP data', epilog= 'zipdump can quickly scan a zip from an URL without downloading the complete archive' ) parser.add_argument('--verbose', '-v', action='count') parser.add_argument('--cat', '-c', type=str, help='decompress file to stdout') parser.add_argument('--print', '-p', type=str, help='print raw file data to stdout') parser.add_argument('--recurse', '-r', action='store_true', help='recurse into directories') parser.add_argument('--skiplinks', '-L', action='store_true', help='skip symbolic links') parser.add_argument('--offset', '-o', type=int, help='start processing at offset') parser.add_argument('--length', '-l', type=int, help='max length of data to process') parser.add_argument( '--quick', '-q', action='store_true', help='Quick dir scan. This is quick with URLs as well.') parser.add_argument('--chunksize', type=int, default=1024 * 1024) parser.add_argument('--pretty', action='store_true', help='make output easier to read') parser.add_argument('FILES', type=str, nargs='*', help='Files or URLs') args = parser.parse_args() if args.FILES: for fn in EnumeratePaths(args, args.FILES): print("==> ", fn, " <==") try: if fn.find("://") in (3, 4, 5): # when argument looks like a url, use urlstream to open import urlstream with urlstream.open(fn) as fh: processfile(args, fh) else: with open(fn, "rb") as fh: processfile(args, fh) except Exception as e: print("ERROR: %s" % e) raise else: processfile(args, sys.stdin.buffer)
def main(): import argparse parser = argparse.ArgumentParser( description='zipdump - scan file contents for PKZIP data', epilog= 'zipdump can quickly scan a zip from an URL without downloading the complete archive' ) parser.add_argument('--verbose', '-v', action='count') parser.add_argument('--quiet', action='store_true') parser.add_argument('--cat', '-c', nargs='*', type=str, help='decompress file(s) to stdout') parser.add_argument('--raw', '-p', nargs='*', type=str, help='print raw compressed file(s) data to stdout') parser.add_argument('--save', '-s', nargs='*', type=str, help='extract file(s) to the output directory') parser.add_argument('--outputdir', '-d', type=str, help='the output directory, default = curdir', default='.') parser.add_argument( '--quick', '-q', action='store_true', help='Quick dir scan. This is quick with URLs as well.') parser.add_argument('--recurse', '-r', action='store_true', help='recurse into directories') parser.add_argument('--skiplinks', '-L', action='store_true', help='skip symbolic links') parser.add_argument('--offset', '-o', type=int, help='start processing at offset') parser.add_argument('--length', '-l', type=int, help='max length of data to process') parser.add_argument('--chunksize', type=int, default=1024 * 1024) parser.add_argument('--dumpraw', action='store_true', help='hexdump raw compressed data') parser.add_argument('--password', type=str, help="Password for pkzip decryption") parser.add_argument('--hexpassword', type=str, help="hexadecimal password for pkzip decryption") parser.add_argument( '--keys', type=str, help="internal key representation for pkzip decryption") parser.add_argument('FILES', type=str, nargs='*', help='Files or URLs') args = parser.parse_args() use_raw = args.cat or args.raw or args.save if args.hexpassword: args.password = binascii.a2b_hex(args.hexpassword) elif args.keys: args.password = list(int(_, 0) for _ in args.keys.split(",")) elif args.password: args.password = args.password.encode('utf-8') if args.FILES: for fn in EnumeratePaths(args, args.FILES): if len(args.FILES) > 1 and not args.quiet: print("\n==> " + fn + " <==\n") try: if fn.find("://") in (3, 4, 5): # when argument looks like a url, use urlstream to open import urlstream with urlstream.open(fn) as fh: processfile(args, fh) else: with open(fn, "rb") as fh: processfile(args, fh) except Exception as e: print("ERROR: %s" % e) raise else: processfile(args, sys.stdin.buffer)