def display(self): colorama.init(autoreset=True) indices = self.top_line(Side.U) + self.middle_line(Side.U) + self.bottom_line(Side.U) print " {}{}{}\n {}{}{}\n {}{}{}".format(*[self.cchar(self.colors[c]) for c in indices]) indices = self.top_line(Side.L) + self.top_line(Side.F) + self.top_line(Side.R) + self.top_line(Side.B) for c in indices: sys.stdout.write(self.cchar(self.colors[c])) print indices = self.middle_line(Side.L) + self.middle_line(Side.F) + self.middle_line(Side.R) + self.middle_line(Side.B) for c in indices: sys.stdout.write(self.cchar(self.colors[c])) print indices = self.bottom_line(Side.L) + self.bottom_line(Side.F) + self.bottom_line(Side.R) + self.bottom_line(Side.B) for c in indices: sys.stdout.write(self.cchar(self.colors[c])) print indices = self.top_line(Side.D) + self.middle_line(Side.D) + self.bottom_line(Side.D) print " {}{}{}\n {}{}{}\n {}{}{}".format(*[self.cchar(self.colors[c]) for c in indices]) colorama.deinit()
def setup(): """ Setup the bot. :raises: SetupError, NoAccountsError """ init(convert=True) print(Fore.WHITE + ' 1: Log In?') print(Fore.WHITE + ' 2: Load Account?') choice = GetNum() if not choice: raise SetupError() if choice == 1: deinit() while True: user = input('Username: '******'Accounts:') for acc in accounts: print(Fore.YELLOW + '* ' + acc) while True: account = input('Log in to: ') if account in accounts: LoadAccounts(account) break deinit()
def runExecutable(exe, number, threads): """ Run <exe> <number> times, randomising the input parameters. Each execution is independent so allow <exe> to be run over <threads> threads @param: Executable to run @param: Number of times to run <exe> @param: Number of threads to concurrently use @return: Nothing """ # We could read this from ../includes/inputs.h MAX_Z = 118 # Set the minimum possible Z range MAX_LOW_Z = MAX_Z - 1 colorama.init() print("\nUsing: " + colorama.Fore.GREEN + exe + colorama.Style.RESET_ALL + " to create " + colorama.Fore.GREEN + str(number) + colorama.Style.RESET_ALL + " chart(s)\n") colorama.deinit() Parallel(threads)(delayed(createSingleChart)(MAX_LOW_Z, MAX_Z) for i in range(0, number)) print()
def main(): global drive_commands parser = argparse.ArgumentParser( description='YMK google drive command line tool') parser.add_argument('-v', '--verbose', action='count', default=0, help='increse verbosity/logging level') parser.add_argument('-w', '--write-config', action='store_true', help='write a default config') parser.add_argument('-V', '--version', action='version', version="%s" % gm.version, help='show version infomation') subparser = parser.add_subparsers(help='drive sub command', dest='command_name') drive_commands = {'list': command_list.CommandList(subparser), 'push': command_push.CommandPush(subparser), 'pull': command_pull.CommandPull(subparser), 'mkdir': command_mkdir.CommandMkdir(subparser), 'search': command_search.CommandSearch(subparser), 'trash': command_trash.CommandTrash(subparser), 'url': command_url.CommandUrl(subparser), 'share': command_share.CommandShare(subparser), 'init': command_init.CommandInit(subparser)} args = parser.parse_args() set_logging_level(args.verbose) get_config(args) colorama.init() drive_commands[args.command_name].do_command(args) colorama.deinit()
def _handle_soft_error(exc_type, exc_value, exc_tb): colorama.init() err(Fore.LIGHTRED_EX + exc_type.__name__ + ": " + str(exc_value) + Style.RESET_ALL) # Convert to the list of frames tb = exc_tb frames = [] while tb: frames.append(tb.tb_frame) tb = tb.tb_next i0 = len(frames) - 1 - getattr(exc_value, "skip_frames", 0) indent = " " * (len(exc_type.__name__) + 2) for i in range(i0, 0, -1): co = frames[i].f_code func = _find_function_from_code(frames[i - 1], co) fullname = _get_method_full_name(func) if func else "???." + co.co_name highlight = getattr(exc_value, "var_name", None) if i == i0 else None args_str = _get_args_str(func, highlight=highlight) indent_len = len(exc_type.__name__) + len(fullname) + 6 line = Fore.LIGHTBLACK_EX + indent + ("in " if i == i0 else " ") line += (Fore.CYAN + fullname + Fore.LIGHTBLACK_EX if i == i0 else fullname) + "(" line += _wrap(args_str + ") line %d" % frames[i].f_lineno, indent=indent_len) line += Style.RESET_ALL err(line) err() colorama.deinit()
def main(): # Initialize color support colorama.init() try: run() except util.DeviceException as e: typ, val, tb = sys.exc_info() logging.debug("".join(traceback.format_exception(typ, val, tb))) print(colorama.Fore.RED + "There is a problem with your device" " configuration:") print(colorama.Fore.RED + e.message) except ConfigError as e: typ, val, tb = sys.exc_info() logging.debug("".join(traceback.format_exception(typ, val, tb))) print(colorama.Fore.RED + "There is a problem with your configuration file(s):") print(colorama.Fore.RED + e.message) except util.MissingDependencyException as e: typ, val, tb = sys.exc_info() logging.debug("".join(traceback.format_exception(typ, val, tb))) print(colorama.Fore.RED + "You are missing a dependency for one of your enabled plugins:") print(colorama.Fore.RED + e.message) except KeyboardInterrupt: colorama.deinit() sys.exit(1) except Exception as e: typ, val, tb = sys.exc_info() print(colorama.Fore.RED + "spreads encountered an error:") print(colorama.Fore.RED + "".join(traceback.format_exception(typ, val, tb))) # Deinitialize color support colorama.deinit()
def print_queue(sonos): queue = sonos.get_queue() ANSI_BOLD = '\033[1m' ANSI_RESET = '\033[0m' # colorama.init() takes over stdout/stderr to give cross-platform colors if colorama: colorama.init() current = int(sonos.get_current_track_info()['playlist_position']) queue_length = len(queue) padding = len(str(queue_length)) for idx, track in enumerate(queue, 1): if (idx == current): color = ANSI_BOLD else: color = ANSI_RESET idx = str(idx).rjust(padding) print( "%s%s: %s - %s. From album %s." % ( color, idx, track['artist'], track['title'], track['album'] ) ) # Release stdout/stderr from colorama if colorama: colorama.deinit()
def main(): """ Entry point for `spread` command-line application. """ # Initialize color support colorama.init() print_error = lambda x: print(util.colorize(x, colorama.Fore.RED), file=sys.stderr) try: run() except util.DeviceException as e: typ, val, tb = sys.exc_info() logging.debug("".join(traceback.format_exception(typ, val, tb))) print_error("There is a problem with your device configuration:") print_error(e.message) except ConfigError as e: typ, val, tb = sys.exc_info() logging.debug("".join(traceback.format_exception(typ, val, tb))) print_error("There is a problem with your configuration file(s):") print_error(e.message) except util.MissingDependencyException as e: typ, val, tb = sys.exc_info() logging.debug("".join(traceback.format_exception(typ, val, tb))) print_error("You are missing a dependency for one of your " "enabled plugins:") print_error(e.message) except KeyboardInterrupt: colorama.deinit() sys.exit(1) except Exception as e: typ, val, tb = sys.exc_info() print_error("spreads encountered an error:") print_error("".join(traceback.format_exception(typ, val, tb))) # Deinitialize color support colorama.deinit()
def printMessage(text, textColor, backgroundColor, textStyle): init() if textColor == "default": if backgroundColor == "default": if textStyle == "default": print (colored(text)) else: print (colored(text, attrs=[textStyle])) else: if textStyle == "default": print (colored(text, "on_%s" % backgroundColor)) else: print (colored(text, "on_%s" % backgroundColor, attrs=[textStyle])) else: if backgroundColor == "default": if textStyle == "default": print (colored(text, textColor.lower())) else: print (colored(text, textColor.lower(), attrs=[textStyle])) else: if textStyle == "default": print (colored(text, textColor.lower(), "on_%s" % backgroundColor)) else: print (colored(text, textColor.lower(), "on_%s" % backgroundColor, attrs=[textStyle])) deinit()
def main(): """Run the route finder""" init() print("Eve Route finder") gates = [[int(rows[0]), int(rows[1])] for rows in dict_from_csvqfile("resources/database/system_jumps.csv")] system_desc = { int(rows[0]): [rows[1], rows[2], float(rows[3])] for rows in dict_from_csvqfile("resources/database/system_description.csv") } eve_db = EveDB(gates, system_desc) solar_map = eve_db.get_solar_map() user_input = get_input(system_desc) avoid_sys = user_input[2].split(",") avoid_list = [] for system_name in avoid_sys: avoid_list.append(EveDB.name2id(system_desc, str(system_name.strip()))) counter = itertools.count() solar_map.build_list(counter) print() route = solar_map.djikstra(user_input[0], user_input[1], avoid_list, SolarMap.PREFER_SHORTER, 50, counter) print_route(route, system_desc) print() route = solar_map.djikstra(user_input[0], user_input[1], avoid_list, SolarMap.PREFER_SAFER, 50, counter) print_route(route, system_desc) print() route = solar_map.djikstra(user_input[0], user_input[1], avoid_list, SolarMap.PREFER_DANGEROUS, 100, counter) print_route(route, system_desc) deinit()
def set(self, **kwargs): self.pv.update(kwargs) if not self.pv['done']: self.statusfore = self.in_progress_status_color else: self.statusfore = self.done_status_color deinit() self.update()
def closing(): """ :return: :rtype: """ deinit() Pause()
def main(): init(autoreset=True) print for s in srvr: testRDS(s) deinit()
def print_encoded(string, nl=True): end = "\n" if nl else "" if sys.stdout.encoding == "cp1252": print(string.encode("UTF-8").decode("ISO-8859-1"), end=end, flush=True) else: colorama.init() print(string, end=end, flush=True) colorama.deinit()
def __exit__(self, exc_type: Optional[Type], exc_val: Optional[Any], exc_tb: Optional[Any], ) -> None: print(Style.RESET_ALL, file=sys.stderr, end='') sys.stderr.flush() colorama.deinit()
def printWARN(msg): try: import colorama from colorama import Fore, Style colorama.init() print(Fore.RED + msg + Style.RESET_ALL) colorama.deinit() except ImportError: print(msg)
def fail(message): print(fail_color + 'ERROR: ' + message) # If we've included ANSI color in output, reset the output style if fail_color: print(Fore.RESET) deinit() return 1
def warning(message): output = warning_color + 'WARNING: ' + message # If we've included ANSI color in output, reset the output style if warning_color: output += Fore.RESET deinit() return output
def print_tips(msg): """ print tips in green :param msg: :return: """ colorama.init() print(colored(msg, 'green')) colorama.deinit()
def print_warning(msg): """ print warning message in red :param msg: :return: """ colorama.init() msg = 'Warning: ' + msg print(colored(msg, 'red')) colorama.deinit()
def test_colorama(): """Test 'colorama' module as a reference (more features/different featurset). """ _banner('colorama') import colorama colorama.init() print colorama.Fore.RED + "hello " + colorama.Fore .WHITE + colorama.Back.GREEN + "world" + colorama.Style.DIM + "!" colorama.deinit()
def main(): init(autoreset=True) print for s in srvr: print(" MongoDB server : %s" %s) basicTest(s) print deinit()
def main(): init(autoreset=True) print for s in srvr: print(" PostgreSQL server : %s" %s) basicTest(s) print deinit()
def undoColoredOutput(): global MyNameError, MyValueError, makeWarning, makeInfo, makeKeyMessage,makePrompt,makeHeader colorama.deinit() MyNameError = NameError MyValueError = ValueError makeWarning = makeWarningN makeInfo = makeInfoN makeKeyMessage = makeKeyMessageN makePrompt = makePromptN makeHeader = makeHeaderN
def main(): colorama.init() parser = make_parser() if len(sys.argv) == 1: args = parser.parse_args(args=['status']) else: args = parser.parse_args() args.func(args) colorama.deinit()
def colorama_init(): if not IS_RUNNING_IN_PYCHARM: colorama.init() try: yield except: raise finally: if not IS_RUNNING_IN_PYCHARM: colorama.deinit()
def main(): init(autoreset=True) print for c in clnts: print(" ELS HTTP Node: %s" %c) gatherData(c) basicTest(c) extendedTest(c) print deinit()
def deinit_colors(): """ De-initializes colorama. See the colorama docs for why you'd want to do this. Only does anything if colorama has been inited using L{init_colors}. @see: http://pypi.python.org/pypi/colorama """ global __colorama_inited if __colorama_inited: from colorama import deinit deinit() __colorama_inited = False
def __call__(self, poller): import colorama from msrest.exceptions import ClientException # https://github.com/azure/azure-cli/issues/3555 colorama.init() correlation_message = '' self.cli_ctx.get_progress_controller().begin() correlation_id = None cli_logger = get_logger() # get CLI logger which has the level set through command lines is_verbose = any(handler.level <= logs.INFO for handler in cli_logger.handlers) while not poller.done(): self.cli_ctx.get_progress_controller().add(message='Running') try: # pylint: disable=protected-access correlation_id = json.loads( poller._response.__dict__['_content'].decode())['properties']['correlationId'] correlation_message = 'Correlation ID: {}'.format(correlation_id) except: # pylint: disable=bare-except pass current_time = datetime.datetime.now() if is_verbose and current_time - self.last_progress_report >= datetime.timedelta(seconds=10): self.last_progress_report = current_time try: self._generate_template_progress(correlation_id) except Exception as ex: # pylint: disable=broad-except logger.warning('%s during progress reporting: %s', getattr(type(ex), '__name__', type(ex)), ex) try: self._delay() except KeyboardInterrupt: self.cli_ctx.get_progress_controller().stop() logger.error('Long-running operation wait cancelled. %s', correlation_message) raise try: result = poller.result() except ClientException as client_exception: from azure.cli.core.commands.arm import handle_long_running_operation_exception self.cli_ctx.get_progress_controller().stop() handle_long_running_operation_exception(client_exception) self.cli_ctx.get_progress_controller().end() colorama.deinit() return result
def run(self): parser = argparse.ArgumentParser(usage='dnsbruter.py [options]', add_help=False) group = parser.add_mutually_exclusive_group() #anonGroup = parser.add_mutually_exclusive_group() group.add_argument('-f', '--file', dest='file') group.add_argument('-d', '--domain', dest='domain', type=str, nargs='+') parser.add_argument( "-w", "--wordlist", default='wordlists/subdomains.txt') #anonGroup.add_argument('--tor', help='using only TOR for connections', action='store_true') #anonGroup.add_argument('--privoxy', help='using only Privoxy for connections', action='store_true') #anonGroup.add_argument('--tp', help='using TOR and Privoxy for connections', action='store_true') parser.add_argument( "-h", "--help", action="help") args = parser.parse_args() try: if args.domain: for dom in args.domain: self.__domain_list.append(Domain(dom)) elif args.file: if not os.path.isfile(args.file): print(Fore.RED + "\n[x] File not found: " + args.file + "\n | Aborting..." + Fore.RESET) sys.exit(-2) else: with open(args.file, 'r') as f: for line in f: self.__domain_list.append(Domain(line.strip('\n'))) for domain in self.__domain_list: print('\n\n' + Fore.CYAN + Style.BRIGHT + '[ Checking ' + domain.get_name() + ' ]' + '\n' + "-"* 73 + Fore.RESET + Style.RESET_ALL) zonetransfer = Zonetransfer() zonetransfer.run(domain) if not domain.get_zonetransfer(): wildcard = Wildcard() wildcard.test(domain) if not self.__wordlist: with open(args.wordlist, 'r') as wordlist: for line in wordlist: self.__wordlist.append(line.strip('\n')) bruteforcer = Bruteforcer() bruteforcer.run(domain, self.__wordlist) except KeyboardInterrupt: print("\nReceived keyboard interrupt.\nPress ctrl+c again to quit...") sys.exit(-1) finally: deinit() now = datetime.datetime.now() print('\n\n' + __program__ + ' finished at ' + now.strftime("%Y-%m-%d %H:%M:%S") + '\n')
def main(argv=None): if sys.version_info < (3, 6): raise UnsupportedRuntimeError( "This version of capa can only be used with Python 3.6+") if argv is None: argv = sys.argv[1:] desc = "The FLARE team's open-source tool to identify capabilities in executable files." epilog = textwrap.dedent(""" By default, capa uses a default set of embedded rules. You can see the rule set here: https://github.com/fireeye/capa-rules To provide your own rule set, use the `-r` flag: capa --rules /path/to/rules suspicious.exe capa -r /path/to/rules suspicious.exe examples: identify capabilities in a binary capa suspicious.exe identify capabilities in 32-bit shellcode, see `-f` for all supported formats capa -f sc32 shellcode.bin report match locations capa -v suspicious.exe report all feature match details capa -vv suspicious.exe filter rules by meta fields, e.g. rule name or namespace capa -t "create TCP socket" suspicious.exe """) parser = argparse.ArgumentParser( description=desc, epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter) install_common_args( parser, {"sample", "format", "backend", "signatures", "rules", "tag"}) parser.add_argument("-j", "--json", action="store_true", help="emit JSON instead of text") args = parser.parse_args(args=argv) handle_common_args(args) try: taste = get_file_taste(args.sample) except IOError as e: # per our research there's not a programmatic way to render the IOError with non-ASCII filename unless we # handle the IOError separately and reach into the args logger.error("%s", e.args[0]) return -1 try: rules = get_rules(args.rules, disable_progress=args.quiet) rules = capa.rules.RuleSet(rules) logger.debug( "successfully loaded %s rules", # during the load of the RuleSet, we extract subscope statements into their own rules # that are subsequently `match`ed upon. this inflates the total rule count. # so, filter out the subscope rules when reporting total number of loaded rules. len([ i for i in filter(lambda r: "capa/subscope-rule" not in r.meta, rules.rules.values()) ]), ) if args.tag: rules = rules.filter_rules_by_meta(args.tag) logger.debug("selected %d rules", len(rules)) for i, r in enumerate(rules.rules, 1): # TODO don't display subscope rules? logger.debug(" %d. %s", i, r) except (IOError, capa.rules.InvalidRule, capa.rules.InvalidRuleSet) as e: logger.error("%s", str(e)) return -1 if args.format == "pe" or (args.format == "auto" and taste.startswith(b"MZ")): # this pefile file feature extractor is pretty light weight: it doesn't do any code analysis. # so we can fairly quickly determine if the given PE file has "pure" file-scope rules # that indicate a limitation (like "file is packed based on section names") # and avoid doing a full code analysis on difficult/impossible binaries. try: from pefile import PEFormatError file_extractor = capa.features.extractors.pefile.PefileFeatureExtractor( args.sample) except PEFormatError as e: logger.error("Input file '%s' is not a valid PE file: %s", args.sample, str(e)) return -1 pure_file_capabilities, _ = find_file_capabilities( rules, file_extractor, {}) # file limitations that rely on non-file scope won't be detected here. # nor on FunctionName features, because pefile doesn't support this. if has_file_limitation(rules, pure_file_capabilities): # bail if capa encountered file limitation e.g. a packed binary # do show the output in verbose mode, though. if not (args.verbose or args.vverbose or args.json): logger.debug( "file limitation short circuit, won't analyze fully.") return -1 try: sig_paths = get_signatures(args.signatures) except (IOError) as e: logger.error("%s", str(e)) return -1 if (args.format == "freeze") or (args.format == "auto" and capa.features.freeze.is_freeze(taste)): format = "freeze" with open(args.sample, "rb") as f: extractor = capa.features.freeze.load(f.read()) else: format = args.format should_save_workspace = os.environ.get("CAPA_SAVE_WORKSPACE") not in ( "0", "no", "NO", "n", None) try: extractor = get_extractor(args.sample, format, args.backend, sig_paths, should_save_workspace, disable_progress=args.quiet) except UnsupportedFormatError: logger.error("-" * 80) logger.error(" Input file does not appear to be a PE file.") logger.error(" ") logger.error( " capa currently only supports analyzing PE files (or shellcode, when using --format sc32|sc64)." ) logger.error( " If you don't know the input file type, you can try using the `file` utility to guess it." ) logger.error("-" * 80) return -1 meta = collect_metadata(argv, args.sample, args.rules, format, extractor) capabilities, counts = find_capabilities(rules, extractor, disable_progress=args.quiet) meta["analysis"].update(counts) if has_file_limitation(rules, capabilities): # bail if capa encountered file limitation e.g. a packed binary # do show the output in verbose mode, though. if not (args.verbose or args.vverbose or args.json): return -1 if args.json: print(capa.render.json.render(meta, rules, capabilities)) elif args.vverbose: print(capa.render.vverbose.render(meta, rules, capabilities)) elif args.verbose: print(capa.render.verbose.render(meta, rules, capabilities)) else: print(capa.render.default.render(meta, rules, capabilities)) colorama.deinit() logger.debug("done.") return 0
def run(self): if (args.user_agent): user_agent = args.user_agent else: database = os.path.join(self.__path, 'lib', 'typo3scan.db') conn = sqlite3.connect(database) c = conn.cursor() c.execute('SELECT * FROM UserAgents ORDER BY RANDOM() LIMIT 1;') user_agent = c.fetchone()[0] c.close() config = { 'threads': args.threads, 'timeout': args.timeout, 'cookie': args.cookie, 'auth': args.auth, 'User-Agent': user_agent } json.dump(config, open(os.path.join(self.__path, 'lib', 'config.json'), 'w')) try: if args.domain: for dom in args.domain: self.__domain_list.append(dom) elif args.file: if not os.path.isfile(args.file): print(Fore.RED + '\n[x] File not found: {}\n | Aborting...'.format( args.file) + Fore.RESET) sys.exit(-1) else: with open(args.file, 'r') as f: for line in f: self.__domain_list.append(line.strip()) for domain in self.__domain_list: print(Fore.CYAN + Style.BRIGHT + '\n\n[ Checking {} ]\n'.format(domain) + '-' * 73 + Fore.RESET + Style.RESET_ALL) check = Domain(domain) check.check_root() default_files = check.check_default_files() if not default_files: check_404 = check.check_404() if not check.is_typo3(): print( Fore.RED + '\n[x] It seems that Typo3 is not used on this domain\n' + Fore.RESET) else: # check for typo3 information print('\n [+] Core Information') print(' --------------------') check.search_login() check.search_typo3_version() # Search extensions print('\n [+] Extension Search') if not self.__extensions: database = os.path.join(self.__path, 'lib', 'typo3scan.db') conn = sqlite3.connect(database) c = conn.cursor() if args.vuln: for row in c.execute( 'SELECT extensionkey FROM extension_vulns' ): self.__extensions.append(row[0]) self.__extensions = set(self.__extensions) else: for row in c.execute( 'SELECT extensionkey FROM extensions'): self.__extensions.append(row[0]) conn.close() print(' \u251c Brute-Forcing {} Extensions'.format( len(self.__extensions))) extensions = Extensions() ext_list = extensions.search_extension( check.get_path(), self.__extensions, args.threads) if ext_list: print('\n \u251c Found {} extensions'.format( len(ext_list))) print(' \u251c Brute-Forcing Version Information'. format(len(self.__extensions))) ext_list = extensions.search_ext_version( ext_list, args.threads) json_ext = extensions.output(ext_list, database) else: print('\n [!] No extensions found.\n') if args.json: json_log = {} json_log[check.get_name()] = { 'Backend': check.get_backend(), 'Version': check.get_typo3_version(), 'Vulnerabilities': check.get_typo3_vulns(), 'Extensions': json_ext } json.dump(json_log, open('typo3scan.json', 'w')) except KeyboardInterrupt: print('\nReceived keyboard interrupt.\nQuitting...') exit(-1) finally: deinit()
def __exit__(self, *exc_detail): Style.RESET_ALL deinit()
def __exit__(self, exc_type, exc_val, exc_tb): #@UnusedVariable # de-initialize colorama deinit() # we don't swallow the exceptions return False
def __exit__(self, exc_type, exc_val, exc_tb): colorama.deinit()
time.sleep(0.1 / self.text_speed) sys.stdout.write('\n') sys.stdout.flush() GAME = Game() try: # Entry point GAME.typeit('Welcome to CLI BAG') GAME.wait(3) PLAYER_NAME = GAME.ask('Name yourself: ', Fore.WHITE) # Decision Tree demo GAME.typeit('Welcome to the game, ' + PLAYER_NAME) GAME.wait(3) # Ask the first question RESULT = GAME.ask_question(GAME.get_question('0')) # Start the game loop while RESULT != 'null': RESULT = GAME.ask_question(GAME.get_question(int(RESULT) - 1)) except (KeyboardInterrupt): GAME.clear() GAME.typeit('Thanks for playing!', 5) deinit() pass
def parse(self, args, initial_invocation_data=None, out_file=None): """ Invoke a command. :param args: The arguments that represent the command :type args: list, tuple :param initial_invocation_data: Prime the in memory collection of key-value data for this invocation. :type initial_invocation_data: dict :param out_file: The file to send output to. If not used, we use out_file for knack.cli.CLI instance :type out_file: file-like object :return: The exit code of the invocation :rtype: int """ from knack.util import CommandResultItem if not isinstance(args, (list, tuple)): raise TypeError('args should be a list or tuple.') exit_code = 0 try: if self.enable_color: import colorama colorama.init() if self.out_file == sys.__stdout__: # point out_file to the new sys.stdout which is overwritten by colorama self.out_file = sys.stdout args = self.completion.get_completion_args() or args out_file = out_file or self.out_file self.logging.configure(args) logger.debug('Command arguments: %s', args) self.raise_event(EVENT_CLI_PRE_EXECUTE) if CLI._should_show_version(args): self.show_version() self.result = CommandResultItem(None) else: self.invocation = self.invocation_cls( cli_ctx=self, parser_cls=self.parser_cls, commands_loader_cls=self.commands_loader_cls, help_cls=self.help_cls, initial_data=initial_invocation_data) cmd_result = self.invocation.execute(args) self.result = cmd_result exit_code = self.result.exit_code output_type = self.invocation.data['output'] if cmd_result and cmd_result.result is not None: formatter = self.output.get_formatter(output_type) self.output.out(cmd_result, formatter=formatter, out_file=out_file) # print(self.invocation.expanded_arg, self.invocation.cmd_copy) except KeyboardInterrupt as ex: exit_code = 1 self.result = CommandResultItem(None, error=ex, exit_code=exit_code) except Exception as ex: # pylint: disable=broad-except exit_code = self.exception_handler(ex) self.result = CommandResultItem(None, error=ex, exit_code=exit_code) except SystemExit as ex: exit_code = ex.code self.result = CommandResultItem(None, error=ex, exit_code=exit_code) raise ex finally: self.raise_event(EVENT_CLI_POST_EXECUTE) if self.enable_color: colorama.deinit() return exit_code
def tearDown(self): colorama.deinit()
def __del__(self) -> None: deinit()
def _combine_image(image_paths: List[str], out_dir: str, filename: str, fps: float, extension: str = "gif", reverse: bool = False, transparent: bool = True, flip_horizontal: bool = False, flip_vertical: bool = False): abs_image_paths = [ os.path.abspath(ip) for ip in image_paths if os.path.exists(ip) ] img_paths = [ f for f in abs_image_paths if str.lower(os.path.splitext(f)[1][1:]) in STATIC_IMG_EXTS ] # workpath = os.path.dirname(img_paths[0]) init() # Test if inputted filename has extension, then remove it from the filename fname, ext = os.path.splitext(filename) if ext: filename = fname if not out_dir: raise Exception("No output folder selected, please select it first") out_dir = os.path.abspath(out_dir) if not os.path.exists(out_dir): raise Exception("The specified absolute out_dir does not exist!") duration = round(1000 / fps) if reverse: img_paths.reverse() if extension == 'gif': out_full_path = os.path.join(out_dir, f"{filename}.gif") frames = [Image.open(i) for i in img_paths] if flip_horizontal: for index, frame in enumerate(frames): frames[index] = frame.transpose(Image.FLIP_LEFT_RIGHT) if flip_vertical: for index, frame in enumerate(frames): frames[index] = frame.transpose(Image.FLIP_TOP_BOTTOM) # if scale != 1.0: # frames = [f.resize((round(f.width * scale), round(f.height * scale))) for f in frames] # pprint(frames[0].filename) filename = f"{filename}.gif" disposal = 0 frames = gify_images(frames, transparent=transparent) if transparent: disposal = 2 frames[0].save(out_full_path, optimize=False, save_all=True, append_images=frames[1:], duration=duration, loop=0, disposal=disposal) elif extension == 'apng': out_full_path = os.path.join(out_dir, f"{filename}.png") APNG.from_files(img_paths, delay=duration).save(out_full_path) deinit() return out_full_path
def _split_image(image_path: str, out_dir: str): abspath = os.path.abspath(image_path) init() if not os.path.isfile(image_path): raise Exception( "Oi skrubman the path here seems to be a bloody directory, should've been a file" ) filename = str(os.path.basename(abspath)) workpath = os.path.dirname(abspath) if os.getcwd() != workpath: os.chdir(workpath) # Custom output dirname and frame names if specified on the cli if '.' not in filename: raise Exception('Where the fuk is the extension mate?!') fname, ext = os.path.splitext(filename) ext = str.lower(ext[1:]) # raise Exception(fname, ext) if ext not in ANIMATED_IMG_EXTS: return # raise ClickException('Only supported extensions are gif and apng. Sry lad') # Create directory to contain all the frames if does not exist if not os.path.exists(out_dir): os.mkdir(out_dir) print(f"Creating directory {out_dir}...") else: print( f"Directory {out_dir} already exists, replacing the PNGs inside it..." ) # Image processing if ext == 'gif': try: gif: Image = Image.open(filename) except Exception: raise Exception( filename, "M8 I don't even think this file is even an image file in the first place" ) if gif.format != 'GIF' or not gif.is_animated: raise Exception( filename, "Sorry m9, the image you specified is not a valid animated GIF" ) # click.secho(f"{filename} ({gif.n_frames} frames). Splitting GIF...", fg='cyan') pad_count = max(len(str(gif.n_frames)), 3) frame_nums = list(range(0, gif.n_frames)) # with click.progressbar(frame_nums, empty_char=" ", fill_char="█", show_percent=True, show_pos=True) as frames: for f in frame_nums: gif.seek(f) gif.save( os.path.join(out_dir, f"{fname}_{str.zfill(str(f), pad_count)}.png"), 'PNG') elif ext == 'png': img: APNG = APNG.open(filename) iframes = img.frames pad_count = max(len(str(len(iframes))), 3) # print('frames', [(png, control.__dict__) for (png, control) in img.frames][0]) # with click.progressbar(iframes, empty_char=" ", fill_char="█", show_percent=True, show_pos=True) as frames: for i, (png, control) in enumerate(iframes): png.save( os.path.join(out_dir, f"{fname}_{str.zfill(str(i), pad_count)}.png")) deinit() return True
def _init_colorama(force): if force: colorama.deinit() colorama.init(strip=False) else: colorama.init()
def run_siscom(t1, interictal, ictal, out, siscom_threshold, mask_threshold, mripanel, mripanel_type, mripanel_orientation, mripanel_thickness, mripanel_transparency, mripanel_t1window, mripanel_spectwindow, mripanel_siscomwindow, glassbrain, skipcoreg): """ Command line tool for computing subtraction ictal SPECT coregistered to MRI (SISCOM).\n For research use only!\n\n Author: Jeremy Moreau ([email protected])\n Version: 0.4.0 (2020-01-02) """ init() # start colorama ## Prompt for settings if not yet saved settings = load_settings() # Get user agreement if settings.get('agreed_to_license') == None or settings.get( 'agreed_to_license') == '': # prompt for acceptance accept = click.confirm(license_text + '\n I accept the agreement') if accept: settings['agreed_to_license'] = 'yes' else: print('You must agree to the license in order to use MNI SISCOM.') sys.exit(0) # Get SPM12 path if settings.get('spm12_path') == None or settings.get('spm12_path') == '': if platform.system() == 'Windows': spm12_path = click.prompt( '\nEnter the SPM12 standalone installation path (e.g. C:\\path\\to\\spm12_win64.exe)', type=click.Path(exists=True)) else: spm12_path = click.prompt( '\nEnter the SPM12 standalone installation path (e.g. /path/to/run_spm12.sh)', type=click.Path(exists=True)) settings['spm12_path'] = spm12_path else: spm12_path = settings['spm12_path'] # Get MCR path (on Mac/Linux) if platform.system() != 'Windows': if settings.get('mcr_path') == None or settings.get('mcr_path') == '': mcr_path_message = '\nEnter the MATLAB Compiler Runtime installation path. The selected folder name should ' \ '\nstart with "v" (e.g. /path/to/v95) and contain a subfolder called "mcr"' mcr_path = click.prompt(mcr_path_message, type=click.Path(exists=True)) settings['mcr_path'] = mcr_path else: mcr_path = settings['mcr_path'] else: mcr_path = '' # Save settings save_settings(settings) # Create output directory siscom_dir = siscom.create_output_dir(out) # Copy original T1, interictal, and ictal volumes to siscom_dir t1_nii = shutil.copy(t1, join(siscom_dir, 'T1' + ''.join(Path(t1).suffixes))) interictal_nii = shutil.copy( interictal, join(siscom_dir, 'interictal' + ''.join(Path(interictal).suffixes))) ictal_nii = shutil.copy( ictal, join(siscom_dir, 'ictal' + ''.join(Path(ictal).suffixes))) if not skipcoreg: # Coregister i/ii to t1, then coregister ri to rii (for better alignment) print( Fore.GREEN + 'Coregistering interictal/ictal SPECT images to T1 with SPM (~1-5 minutes)...' ) print(Style.RESET_ALL) siscom.spm_coregister(t1_nii, [interictal_nii, ictal_nii], spm12_path, mcr_path) rinterictal_nii = join(siscom_dir, 'rinterictal.nii') rictal_nii = join(siscom_dir, 'rictal.nii') siscom.spm_coregister(rinterictal_nii, [rictal_nii], spm12_path, mcr_path) rrictal_nii = join(siscom_dir, 'rrictal.nii') else: rinterictal_nii = interictal_nii rrictal_nii = ictal_nii t1_nii = t1 # Run SISCOM print(Fore.GREEN + 'Computing SISCOM images (~5-30s)...') print(Style.RESET_ALL) siscom.compute_siscom(rinterictal_nii, rrictal_nii, siscom_dir, threshold=siscom_threshold, mask_cutoff=mask_threshold) # Get paths of result nii files interictal_z = join(siscom_dir, 'interictal_z.nii.gz') ictal_z = join(siscom_dir, 'ictal_z.nii.gz') siscom_z = join(siscom_dir, 'siscom_z.nii.gz') mask = join(siscom_dir, 'interictal_mask.nii.gz') # Make MRI panels if mripanel: print(Fore.GREEN + 'Plotting MRI panel results (~30s-1 minute)...') print(Style.RESET_ALL) # Create list of slice orientations if 'all' option is selected if mripanel_orientation == 'all': panel_slices = ['ax', 'cor', 'sag'] else: panel_slices = [mripanel_orientation] for panel_slice in panel_slices: siscom.make_mri_panel(t1_nii, interictal_z, ictal_z, siscom_z, mask, siscom_dir, slice_orientation=panel_slice, slice_thickness=mripanel_thickness, alpha=mripanel_transparency, panel_type=mripanel_type, t1_window=mripanel_t1window, spect_window=mripanel_spectwindow, siscom_window=mripanel_siscomwindow) # Make glass brain if glassbrain: print(Fore.GREEN + 'Plotting glass brain results (~30s-2 minutes)...') print(Style.RESET_ALL) siscom.make_glass_brain(t1_nii, siscom_z, siscom_dir, spm12_path, mcr_path) # Clean output dir print(Fore.GREEN + 'Cleaning up result files... (~30s)') print(Style.RESET_ALL) siscom.clean_output_dir(siscom_dir) print(Fore.GREEN + 'Done!') print(Style.RESET_ALL) deinit() # stop colorama
def main(arguments): topdir = get_topdir() buildflags = get_buildflags() generator = get_generator() # glob recipes recipes = [ r for r in sorted(glob.glob(os.path.join(topdir, arguments['<regex>']))) ] ci_environment = get_ci_environment() # Set NINJA_STATUS environment variable os.environ['NINJA_STATUS'] = '[Built edge %f of %t in %e sec]' # extract global menu menu_file = os.path.join(topdir, '.scripts', 'menu.yml') expect_failure_global, env_global, definitions_global = extract_menu_file( menu_file, generator, ci_environment) colorama.init(autoreset=True) return_code = 0 for recipe in recipes: # extract title from README.md with open(os.path.join(recipe, 'README.md'), 'r') as f: for line in f.read().splitlines(): if line[0:2] == '# ': print(colorama.Back.BLUE + '\nrecipe: {0}'.format(line[2:])) # Glob examples examples = [ e for e in sorted(glob.glob(os.path.join(recipe, '*example'))) ] # TODO we need to get rid of this # Remove Fortran examples if generator is Visual Studio if generator == 'Visual Studio 14 2015': examples = filter(lambda x: 'fortran' not in x, examples) for example in examples: sys.stdout.write('\n {}\n'.format(example)) # extract local menu menu_file = os.path.join(recipe, example, 'menu.yml') expect_failure_local, env_local, definitions_local = extract_menu_file( menu_file, generator, ci_environment) expect_failure = expect_failure_global or expect_failure_local # local env vars override global ones env = env_global.copy() for entry in env_local: env[entry] = env_local[entry] # local definitions override global ones definitions = definitions_global.copy() for entry in definitions_local: definitions[entry] = definitions_local[entry] env_string = ' '.join('{0}={1}'.format(entry, env[entry]) for entry in env) definitions_string = ' '.join( '-D{0}={1}'.format(entry, definitions[entry]) for entry in definitions) # we append a time stamp to the build directory # to avoid it being re-used when running tests multiple times # when debugging on a laptop time_stamp = datetime.datetime.fromtimestamp( time.time()).strftime('%Y-%m-%d-%H-%M-%S') build_directory = os.path.join(recipe, example, 'build-{0}'.format(time_stamp)) cmakelists_path = os.path.join(recipe, example) # configure step step = 'configuring' command = '{0} cmake -H{1} -B{2} -G"{3}" {4}'.format( env_string, cmakelists_path, build_directory, generator, definitions_string) skip_predicate = lambda stdout, stderr: False return_code += run_command(step=step, command=command, expect_failure=expect_failure, skip_predicate=skip_predicate, verbose=arguments['--verbose']) os.chdir(build_directory) # build step step = 'building' command = 'cmake --build . -- {0}'.format(buildflags) skip_predicate = lambda stdout, stderr: False return_code += run_command(step=step, command=command, expect_failure=expect_failure, skip_predicate=skip_predicate, verbose=arguments['--verbose']) # test step step = 'testing' command = 'ctest' skip_predicate = lambda stdout, stderr: 'No test configuration file found!' in stderr return_code += run_command(step=step, command=command, expect_failure=expect_failure, skip_predicate=skip_predicate, verbose=arguments['--verbose']) os.chdir(topdir) colorama.deinit() sys.exit(return_code)
def main(argv=None): if argv is None: argv = sys.argv[1:] parser = argparse.ArgumentParser( description="detect capabilities in programs.") capa.main.install_common_args( parser, wanted={"format", "backend", "sample", "signatures", "rules", "tag"}) args = parser.parse_args(args=argv) capa.main.handle_common_args(args) try: taste = get_file_taste(args.sample) except IOError as e: logger.error("%s", str(e)) return -1 try: rules = capa.main.get_rules(args.rules) rules = capa.rules.RuleSet(rules) logger.info("successfully loaded %s rules", len(rules)) if args.tag: rules = rules.filter_rules_by_meta(args.tag) logger.info("selected %s rules", len(rules)) except (IOError, capa.rules.InvalidRule, capa.rules.InvalidRuleSet) as e: logger.error("%s", str(e)) return -1 try: sig_paths = capa.main.get_signatures(args.signatures) except (IOError) as e: logger.error("%s", str(e)) return -1 if (args.format == "freeze") or (args.format == "auto" and capa.features.freeze.is_freeze(taste)): format = "freeze" with open(args.sample, "rb") as f: extractor = capa.features.freeze.load(f.read()) else: format = args.format should_save_workspace = os.environ.get("CAPA_SAVE_WORKSPACE") not in ( "0", "no", "NO", "n", None) try: extractor = capa.main.get_extractor(args.sample, args.format, args.backend, sig_paths, should_save_workspace) except capa.main.UnsupportedFormatError: logger.error("-" * 80) logger.error(" Input file does not appear to be a PE file.") logger.error(" ") logger.error( " capa currently only supports analyzing PE files (or shellcode, when using --format sc32|sc64)." ) logger.error( " If you don't know the input file type, you can try using the `file` utility to guess it." ) logger.error("-" * 80) return -1 except capa.main.UnsupportedRuntimeError: logger.error("-" * 80) logger.error(" Unsupported runtime or Python interpreter.") logger.error(" ") logger.error( " capa supports running under Python 2.7 using Vivisect for binary analysis." ) logger.error( " It can also run within IDA Pro, using either Python 2.7 or 3.5+." ) logger.error(" ") logger.error( " If you're seeing this message on the command line, please ensure you're running Python 2.7." ) logger.error("-" * 80) return -1 meta = capa.main.collect_metadata(argv, args.sample, args.rules, extractor) capabilities, counts = capa.main.find_capabilities(rules, extractor) meta["analysis"].update(counts) meta["analysis"]["layout"] = capa.main.compute_layout( rules, extractor, capabilities) if capa.main.has_file_limitation(rules, capabilities): # bail if capa encountered file limitation e.g. a packed binary # do show the output in verbose mode, though. if not (args.verbose or args.vverbose or args.json): return -1 # colorama will detect: # - when on Windows console, and fixup coloring, and # - when not an interactive session, and disable coloring # renderers should use coloring and assume it will be stripped out if necessary. colorama.init() doc = capa.render.result_document.convert_capabilities_to_result_document( meta, rules, capabilities) print(render_matches_by_function(doc)) colorama.deinit() return 0
def main(): try: init() parser = argparse.ArgumentParser( description='Slack Watchman: Monitoring you Slack workspaces' ' for sensitive information') parser.add_argument( '--timeframe', choices=['d', 'w', 'm', 'a'], dest='time', help= 'How far back to search: d = 24 hours w = 7 days, m = 30 days, a = all time', required=True) parser.add_argument('--version', action='version', version='slack-watchman {}'.format(a.__version__)) parser.add_argument('--all', dest='everything', action='store_true', help='Find everything') parser.add_argument('-U', '--users', dest='users', action='store_true', help='Find all users, including admins') parser.add_argument( '-C', '--channels', dest='channels', action='store_true', help='Find all channels, including external shared channels') parser.add_argument('-a', dest='aws', action='store_true', help='Look for AWS keys') parser.add_argument('-g', dest='gcp', action='store_true', help='Look for GCP keys') parser.add_argument('-s', dest='slack', action='store_true', help='Look for Slack tokens') parser.add_argument('-p', dest='priv', action='store_true', help='Look for private keys') parser.add_argument('-c', dest='card', action='store_true', help='Look for card details') parser.add_argument('-t', dest='cert', action='store_true', help='Look for certificate files') parser.add_argument('-f', dest='files', action='store_true', help='Look for interesting files') parser.add_argument('-P', dest='passwords', action='store_true', help='Look for passwords') args = parser.parse_args() time = args.time everything = args.everything users = args.users channels = args.channels aws = args.aws gcp = args.gcp slack = args.slack priv = args.priv card = args.card cert = args.cert files = args.files passwords = args.passwords if time == 'd': tf = d.DAY_TIMEFRAME elif time == 'w': tf = d.WEEK_TIMEFRAME elif time == 'm': tf = d.MONTH_TIMEFRAME else: tf = d.ALL_TIME print( colored( ''' _ _ _ __ ___ _____ ____ _ _ __ __ _ _ _ ___| | __ _ ___| | __ | | \ \ / / \|_ _/ ___| | | | \/ | / \ | \ | | / __| |/ _` |/ __| |/ / | | \ \ /\ / / _ \ | || | | |_| | |\/| | / _ \ | \| | \__ \ | (_| | (__| < | | \ V V / ___ \| || |___| _ | | | |/ ___ \| |\ | |___/_|\__,_|\___|_|\_\ | | \_/\_/_/ \_\_| \____|_| |_|_| |_/_/ \_\_| \_| |_| ''', 'yellow')) conf_path = '{}/slack_watchman.conf'.format(os.path.expanduser('~')) if not validate_conf(conf_path): raise Exception( colored( 'slack_watchman.conf file not detected.' '\nEnsure a valid file is located in your home directory: {}', 'red').format(os.path.expanduser('~'))) else: validate_token(conf_path) if everything: print('You want everything? I like you...') print(colored('+++++++++++++++++++++', 'yellow')) print(colored('Getting users\n+++++++++++++++++++++', 'yellow')) user_list = audit.get_users() print(colored('Getting channels\n+++++++++++++++++++++', 'yellow')) channel_list = audit.get_channels() print( colored('Getting admin users\n+++++++++++++++++++++', 'yellow')) audit.get_admins(user_list) print( colored('Outputting all channels\n+++++++++++++++++++++', 'yellow')) audit.output_all_channels(channel_list, tf) print( colored('Outputting all users\n+++++++++++++++++++++', 'yellow')) audit.output_all_users(user_list) print( colored( 'Outputting all externally shared channels\n+++++++++++++++++++++', 'yellow')) audit.get_external_shared(channel_list, tf) print( colored('Getting AWS credentials\n+++++++++++++++++++++', 'yellow')) audit.find_aws_credentials(tf) print( colored('Getting GCP credentials\n+++++++++++++++++++++', 'yellow')) audit.find_gcp_credentials(tf) print( colored('Getting private keys\n+++++++++++++++++++++', 'yellow')) audit.find_keys(tf) print( colored('Getting bank card details\n+++++++++++++++++++++', 'yellow')) audit.find_card_details(tf) print( colored('Getting certificate files\n+++++++++++++++++++++', 'yellow')) audit.find_certificates(tf) print( colored('Getting Slack tokens\n+++++++++++++++++++++', 'yellow')) audit.find_slack_tokens(tf) print(colored('Finding passwords\n+++++++++++++++++++++', 'yellow')) audit.find_passwords(tf) print( colored('Finding interesting files\n+++++++++++++++++++++', 'yellow')) audit.find_malicious_files(tf) else: if users: print(colored('Getting users\n+++++++++++++++++++++', 'yellow')) user_list = audit.get_users() print( colored('Getting admin users\n+++++++++++++++++++++', 'yellow')) audit.get_admins(user_list) print( colored('Outputting all users\n+++++++++++++++++++++', 'yellow')) audit.output_all_users(user_list) if channels: print( colored('Getting channels\n+++++++++++++++++++++', 'yellow')) channel_list = audit.get_channels() print( colored('Outputting all channels\n+++++++++++++++++++++', 'yellow')) audit.output_all_channels(channel_list, tf) print( colored( 'Outputting all externally shared channels\n+++++++++++++++++++++', 'yellow')) audit.get_external_shared(channel_list, tf) if aws: print( colored('Getting AWS credentials\n+++++++++++++++++++++', 'yellow')) audit.find_aws_credentials(tf) if gcp: print( colored('Getting GCP credentials\n+++++++++++++++++++++', 'yellow')) audit.find_gcp_credentials(tf) if slack: print( colored('Getting Slack tokens\n+++++++++++++++++++++', 'yellow')) audit.find_slack_tokens(tf) if priv: print( colored('Getting private keys\n+++++++++++++++++++++', 'yellow')) audit.find_keys(tf) if card: print( colored('Getting bank card details\n+++++++++++++++++++++', 'yellow')) audit.find_card_details(tf) if cert: print( colored('Getting certificate files\n+++++++++++++++++++++', 'yellow')) audit.find_certificates(tf) if files: print( colored('Finding interesting files\n+++++++++++++++++++++', 'yellow')) audit.find_malicious_files(tf) if passwords: print( colored('Finding passwords\n+++++++++++++++++++++', 'yellow')) audit.find_passwords(tf) print(colored('++++++Audit completed++++++', 'green')) deinit() except Exception as e: print(colored(e, 'red'))
def main(): global OUTPUT_LOGGER, WORKSPACE_NAME try: init() parser = argparse.ArgumentParser(description=a.__summary__) required = parser.add_argument_group('required arguments') required.add_argument( '--timeframe', choices=['d', 'w', 'm', 'a'], dest='time', help= 'How far back to search: d = 24 hours w = 7 days, m = 30 days, a = all time', required=True) parser.add_argument('--output', choices=['csv', 'file', 'stdout', 'stream'], dest='logging_type', help='Where to send results') parser.add_argument('--version', action='version', version='slack-watchman {}'.format(a.__version__)) parser.add_argument('--all', dest='everything', action='store_true', help='Find everything') parser.add_argument('--users', dest='users', action='store_true', help='Find all users') parser.add_argument('--channels', dest='channels', action='store_true', help='Find all channels') parser.add_argument( '--pii', dest='pii', action='store_true', help= 'Find personal data: Passwords, DOB, passport details, drivers licence, ITIN, SSN' ) parser.add_argument( '--financial', dest='financial', action='store_true', help= 'Find financial data: Card details, PayPal Braintree tokens, IBAN numbers,' ' CUSIP numbers') parser.add_argument( '--tokens', dest='tokens', action='store_true', help= 'Find tokens: Private keys, AWS, GCP, Google API, Slack, Slack webhooks,' ' Facebook, Twitter, GitHub') parser.add_argument( '--files', dest='files', action='store_true', help='Find files: Certificates, interesting/malicious files') parser.add_argument( '--custom', dest='custom', action='store_true', help= 'Search for user defined custom search queries that you have created rules for' ) args = parser.parse_args() tm = args.time everything = args.everything users = args.users channels = args.channels pii = args.pii financial = args.financial tokens = args.tokens files = args.files custom = args.custom logging_type = args.logging_type if tm == 'd': now = int(time.time()) tf = time.strftime('%Y-%m-%d', time.localtime(now - cfg.DAY_TIMEFRAME)) elif tm == 'w': now = int(time.time()) tf = time.strftime('%Y-%m-%d', time.localtime(now - cfg.WEEK_TIMEFRAME)) elif tm == 'm': now = int(time.time()) tf = time.strftime('%Y-%m-%d', time.localtime(now - cfg.MONTH_TIMEFRAME)) else: now = int(time.time()) tf = time.strftime('%Y-%m-%d', time.localtime(now - cfg.ALL_TIME)) conf_path = '{}/watchman.conf'.format(os.path.expanduser('~')) if not validate_conf(conf_path): raise Exception( colored( 'SLACK_WATCHMAN_TOKEN environment variable or slack_watchman.conf file not detected. ' '\nEnsure environment variable is set or a valid file is located in your home ' 'directory: {} ', 'red').format(os.path.expanduser('~'))) else: config = validate_conf(conf_path) slack_con = slack.initiate_slack_connection() slack_con.validate_token() WORKSPACE_NAME = slack_con.get_workspace_name() print = builtins.print if logging_type: if logging_type == 'file': if os.environ.get('SLACK_WATCHMAN_LOG_PATH'): OUTPUT_LOGGER = logger.FileLogger( os.environ.get('SLACK_WATCHMAN_LOG_PATH')) elif config.get('logging').get('file_logging').get('path') and \ os.path.exists(config.get('logging').get('file_logging').get('path')): OUTPUT_LOGGER = logger.FileLogger(log_path=config.get( 'logging').get('file_logging').get('path')) else: print( 'No config given, outputting slack_watchman.log file to home path' ) OUTPUT_LOGGER = logger.FileLogger( log_path=os.path.expanduser('~')) elif logging_type == 'stdout': OUTPUT_LOGGER = logger.StdoutLogger() elif logging_type == 'stream': if os.environ.get('SLACK_WATCHMAN_HOST') and os.environ.get( 'SLACK_WATCHMAN_PORT'): OUTPUT_LOGGER = logger.SocketJSONLogger( os.environ.get('SLACK_WATCHMAN_HOST'), os.environ.get('SLACK_WATCHMAN_PORT')) elif config.get('logging').get('json_tcp').get('host') and \ config.get('logging').get('json_tcp').get('port'): OUTPUT_LOGGER = logger.SocketJSONLogger( config.get('logging').get('json_tcp').get('host'), config.get('logging').get('json_tcp').get('port')) else: raise Exception("JSON TCP stream selected with no config") else: OUTPUT_LOGGER = logger.CSVLogger() else: print('No logging option selected, defaulting to CSV') OUTPUT_LOGGER = logger.CSVLogger() if not isinstance(OUTPUT_LOGGER, logger.StdoutLogger): print = builtins.print print( colored( ''' ##### # # # ## #### # # # # # # # # # # ##### # # # # #### # # ###### # # # # # # # # # # # # ##### ###### # # #### # # # # # ####### ##### # # # # # # # # # # # # # # # # # ## ## # # ## # # # # # # # # # # # # # # # # # # # # # # # # # # ####### # # # # # # # # # # # ####### # # # # # # ####### # # # # # # # # # # # # # # # # # # ## ## ## # # # ##### # # # # # # # #''', 'yellow')) print('Version: {}\n'.format(a.__version__)) print('Searching workspace: {}'.format(WORKSPACE_NAME)) print('Workspace URL: {}\n'.format( slack_con.get_workspace_domain())) print('Importing rules...') rules_list = load_rules() print('{} rules loaded'.format(len(rules_list))) else: OUTPUT_LOGGER.log_info('Slack Watchman started execution') OUTPUT_LOGGER.log_info( 'Searching workspace: {}'.format(WORKSPACE_NAME)) OUTPUT_LOGGER.log_info('Workspace URL: {}'.format( slack_con.get_workspace_domain())) OUTPUT_LOGGER.log_info('Importing rules...') rules_list = load_rules() OUTPUT_LOGGER.log_info('{} rules loaded'.format(len(rules_list))) print = OUTPUT_LOGGER.log_info if everything: print('Getting everything...') print(colored('Getting users', 'yellow')) user_list = slack.get_users(slack.initiate_slack_connection()) print(colored('Getting channels', 'yellow')) channel_list = slack.get_channels( slack.initiate_slack_connection()) print(colored('Outputting all channels', 'yellow')) all_channels = slack.get_all_channels(OUTPUT_LOGGER, channel_list, tf) if all_channels: log(all_channels, 'channels') print(colored('Outputting all users', 'yellow')) all_users = slack.get_all_users(OUTPUT_LOGGER, user_list) if all_users: log(all_users, 'users') print(colored('Searching tokens', 'yellow')) for rule in rules_list: if 'tokens' in rule.get('category'): for scope in rule.get('scope'): search(slack.initiate_slack_connection(), rule, tf, scope) print(colored('Searching financial data', 'yellow')) for rule in rules_list: if 'financial' in rule.get('category'): for scope in rule.get('scope'): search(slack.initiate_slack_connection(), rule, tf, scope) print(colored('Searching files', 'yellow')) for rule in rules_list: if 'files' in rule.get('category'): for scope in rule.get('scope'): search(slack_con, rule, tf, scope) print(colored('Searching PII/Personal Data', 'yellow')) for rule in rules_list: if 'pii' in rule.get('category'): for scope in rule.get('scope'): search(slack_con, rule, tf, scope) print(colored('Searching custom strings', 'yellow')) for rule in rules_list: if 'custom' in rule.get('category'): for scope in rule.get('scope'): search(slack_con, rule, tf, scope) else: if users: print(colored('Getting users', 'yellow')) user_list = slack.get_users(slack_con) print(colored('Outputting all users', 'yellow')) all_users = slack.get_all_users(OUTPUT_LOGGER, user_list) if all_users: log(all_users, 'users') if channels: print(colored('Getting channels', 'yellow')) channel_list = slack.get_channels(slack_con) print(colored('Outputting all channels', 'yellow')) all_channels = slack.get_all_channels(OUTPUT_LOGGER, channel_list, tf) if all_channels: log(all_channels, 'channels') if tokens: print(colored('Searching tokens', 'yellow')) for rule in rules_list: if 'tokens' in rule.get('category'): for scope in rule.get('scope'): search(slack_con, rule, tf, scope) if financial: print(colored('Searching financial data', 'yellow')) for rule in rules_list: if 'financial' in rule.get('category'): for scope in rule.get('scope'): search(slack_con, rule, tf, scope) if files: print(colored('Searching files', 'yellow')) for rule in rules_list: if 'files' in rule.get('category'): for scope in rule.get('scope'): search(slack_con, rule, tf, scope) if pii: print(colored('Searching PII/Personal Data', 'yellow')) for rule in rules_list: if 'pii' in rule.get('category'): for scope in rule.get('scope'): search(slack_con, rule, tf, scope) if custom: print(colored('Searching custom strings', 'yellow')) for rule in rules_list: if 'custom' in rule.get('category'): for scope in rule.get('scope'): search(slack_con, rule, tf, scope) print(colored('++++++Audit completed++++++', 'green')) deinit() except Exception as e: if isinstance(OUTPUT_LOGGER, logger.StdoutLogger): print = OUTPUT_LOGGER.log_info else: print = builtins.print print(colored(e, 'red'))
def supportTerminalColorsInWindows(): # Filter and replace ANSI escape sequences on Windows with equivalent Win32 # API calls. This code does nothing on non-Windows systems. colorama.init() yield colorama.deinit()
elif sys.platform == "darwin": return "macos" import termcolor, \ colorama if get_os() == "windows": import win_unicode_console, \ win32api, \ win32con #from path1 import path1 #win_unicode_console.enable() colorama.init() colorama.deinit() from termcolor import colored, cprint #print_green_on_cyan = lambda x: cprint(x, 'green', 'on_cyan') newline = '\n' ruble = u"\u20bd" # \u20bd is ₽ backslash = "\ "[:1] def isPython3(): PY3K = sys.version_info >= (3, 0) return PY3K def path_full(path): return os.path.abspath(path)
def shutdown(self): #Shutdown Colorama. colorama.deinit()
def nocolor(): if sys.platform == 'win32' and colorama is not None: colorama.deinit() codes.clear()
def _run_demo(body_fn, interactive, echo, testing): """ Execute the demo, echoing commands and pausing for user input. :param body_fn: function that contains the sequence of demo's commands. :param interactive: If True, the user will be prompted to continue the demonstration after every segment. :param echo: If True, the python commands that are executed will be displayed. :param testing: Used for pyunit testing. h2o.init() will not be called if set to True. :type body_fn: function """ import colorama from colorama import Style, Fore colorama.init() class StopExecution(Exception): """Helper class for cancelling the demo.""" assert_is_type(body_fn, type(_run_demo)) # Reformat description by removing extra spaces; then print it. if body_fn.__doc__: desc_lines = body_fn.__doc__.split("\n") while desc_lines[0].strip() == "": desc_lines = desc_lines[1:] while desc_lines[-1].strip() == "": desc_lines = desc_lines[:-1] strip_spaces = min(len(line) - len(line.lstrip(" ")) for line in desc_lines[1:] if line.strip() != "") maxlen = max(len(line) for line in desc_lines) print(Fore.CYAN) print("-" * maxlen) for line in desc_lines: print(line[strip_spaces:].rstrip()) print("-" * maxlen) print(Style.RESET_ALL, end="") # Prepare the executor function def controller(): """Print to console the next block of commands, and wait for keypress.""" try: raise RuntimeError("Catch me!") except RuntimeError: print() # Extract and print lines that will be executed next if echo: tb = sys.exc_info()[2] fr = tb.tb_frame.f_back filename = fr.f_code.co_filename linecache.checkcache(filename) line = linecache.getline(filename, fr.f_lineno, fr.f_globals).rstrip() indent_len = len(line) - len(line.lstrip(" ")) assert line[indent_len:] == "go()" i = fr.f_lineno output_lines = [] n_blank_lines = 0 while True: i += 1 line = linecache.getline(filename, i, fr.f_globals).rstrip() # Detect dedent if line[:indent_len].strip() != "": break line = line[indent_len:] if line == "go()": break style = Fore.LIGHTBLACK_EX if line.lstrip().startswith("#") else Style.BRIGHT prompt = "... " if line.startswith(" ") else ">>> " output_lines.append(Fore.CYAN + prompt + Fore.RESET + style + line + Style.RESET_ALL) del style # Otherwise exception print-outs may get messed-up... if line.strip() == "": n_blank_lines += 1 if n_blank_lines > 5: break # Just in case we hit file end or something else: n_blank_lines = 0 for line in output_lines[:-n_blank_lines]: print(line) # Prompt for user input if interactive: print("\n" + Style.DIM + "(press any key)" + Style.RESET_ALL, end="") key = _wait_for_keypress() print("\r \r", end="") if key.lower() == "q": raise StopExecution() # Replace h2o.init() with a stub when running in "test" mode _h2o_init = h2o.init if testing: h2o.init = lambda *args, **kwargs: None # Run the test try: body_fn(controller) print("\n" + Fore.CYAN + "---- End of Demo ----" + Style.RESET_ALL) except (StopExecution, KeyboardInterrupt): print("\n" + Fore.RED + "---- Demo aborted ----" + Style.RESET_ALL) # Clean-up if testing: h2o.init = _h2o_init print() colorama.deinit()
def Main(): _isUnitTestingSS = False _isUnitTestingV = False _recursiveInput = False _TrainingDataInputFile = "Datasets/Sstt.utf8.txt" _TestSentence = "" _TestSequenceGenSize = 30 _OutputFile = None consoleInArgs = sys.argv[1:] # check input arguments for index, val in enumerate(consoleInArgs): # Runs the unit testing module on initiation if(val == "-utss"): _isUnitTestingSS = True # Unit testing for the vocabulary network elif(val == "-utv"): _isUnitTestingV = True elif(len(consoleInArgs) >= index+1): # specify training data location if(val == "-td"): _TrainingDataInputFile = consoleInArgs[index+1] ConsoleOutput.printGreen("Training data load locaiton changed to: \"" + _TrainingDataInputFile + "\"") # give a generation sentence input elif(val == "-ts"): _TestSentence = consoleInArgs[index+1] if(len(_TestSentence.split()) != _TrainRangeSS): raise ValueError('Test sequence must be the same length as the vector training size. (' + str(_TrainRangeSS) + ')') # set the amount of words generated after input elif(val == "-tsc"): _TestSequenceGenSize = int(consoleInArgs[index+1]) ConsoleOutput.printGreen("Test sequence generation size changed to: " + str(_TestSequenceGenSize)) # set the output file for the generated data to be printed to elif(val == "-of"): _OutputFile = str(consoleInArgs[index+1]) ConsoleOutput.printGreen("Output generation location changed to: (" + consoleInArgs[index+1]+ ")") else: raise ValueError('Un-recognized console argument: ' + str(val)) # Initialise colorama cross-platform console logging init() MLNetworkSS = NNSentenceStructure() MLNetworkV = NNVocabulary() # Network trainer converts text data into normalized vectors that # can be passed into the networks networkTrainer = NetworkTrainer(_TrainRangeSS, _TrainRangeV) networkTrainer.loadTextFromFile(_TrainingDataInputFile) # Trainer parses the structure into vector normal arrays of size (_TrainRangeSS) # the next word of the squence is used as the target, example # ["Harry", "sat", "on", "his"] - ["broomstick"] <-- target networkTrainer.loadSentenceStructureNormals() networkTrainer.loadVocabularyNormals(MLNetworkV) # Pass the vectors into the network MLNetworkSS.loadVectorsIntoNetwork(networkTrainer._TrainingSequenceSS, networkTrainer._TrainingTargetsSS) # Passs into vocab network here **** # Fit data MLNetworkSS.FitNetwork() MLNetworkV.FitNetwork() # Fit to vocab network here **** # Use console argument "-utss" to activate #testing uTester = None if(_isUnitTestingSS): #if(uTester == None): #uTester = UnitTester(MLNetworkSS, MLNetworkV, _TrainRangeSS, _TrainRangeV) #uTester.TestSentenceStructuring() print("_isUnitTestingSS is true") # use console argument "-utv" to activate if(_isUnitTestingV): #if(uTester == None): #uTester = UnitTester(MLNetworkSS, MLNetworkV, _TrainRangeSS, _TrainRangeV) #uTester.TestVocabulary() print("_isUnitTestingV is true") if(_TestSentence != ""): print("_TestSentence is true") printToFile = False f = None # user has specified output location if(_OutputFile != None): printToFile = True f = open(_OutputFile,'w') genSize = _TestSequenceGenSize #要生成的目标文章的大小 initialInput = _TestSentence if(printToFile): f.write(initialInput + " ") else: print(initialInput + " ", end="") initialInput = initialInput.split() # 输入的关键词分割 # generate a sentence of genSize for index in range(0, genSize): #print(initialInput) nlo = NaturalLanguageObject(initialInput) #解决中文切词中存在的二意切词问题,为了让测试数据的维度能匹配训练数据的维度,要丢掉[('word', tag), ('word', tag),('word', tag),('word', tag),('word', tag)...] #头部多余的部分tuple,否则KNN分类器会报错。 diff = len(nlo.sentenceNormalised) - _TrainRangeSS if(diff > 0): nlo.sentenceNormalised = nlo.sentenceNormalised[diff:] # since nlo will always be the right size, we can use that variable predToke = MLNetworkSS.getPrediction([nlo.sentenceNormalised]) nextToke = nlo.tokeniseNormals([predToke]) # now we have the next toke in the sentence, convert that to word word = MLNetworkV.getPredictedWord(nlo.sentenceNormalised[-1], nextToke[0]) # decide whether to print to file or console if(printToFile): f.write(str(word) + " ") else: print(str(word) + " ", end="") initialInput.append(word) # maintain a size of 'genSize' del initialInput[0] print("\n") # Reset console back to original state deinit()
def _exit(msg): sys.stderr.write(msg) # stop colorama deinit() sys.exit(-1)
def main(argv=None): if argv is None: argv = sys.argv[1:] formats = [ ("auto", "(default) detect file type automatically"), ("pe", "Windows PE file"), ("sc32", "32-bit shellcode"), ("sc64", "64-bit shellcode"), ("freeze", "features previously frozen by capa"), ] format_help = ", ".join(["%s: %s" % (f[0], f[1]) for f in formats]) parser = argparse.ArgumentParser( description="detect capabilities in programs.") parser.add_argument("sample", type=str, help="Path to sample to analyze") parser.add_argument( "-r", "--rules", type=str, default="(embedded rules)", help= "Path to rule file or directory, use embedded rules by default", ) parser.add_argument("-t", "--tag", type=str, help="Filter on rule meta field values") parser.add_argument("-d", "--debug", action="store_true", help="Enable debugging output on STDERR") parser.add_argument("-q", "--quiet", action="store_true", help="Disable all output but errors") parser.add_argument( "-f", "--format", choices=[f[0] for f in formats], default="auto", help="Select sample format, %s" % format_help, ) args = parser.parse_args(args=argv) if args.quiet: logging.basicConfig(level=logging.ERROR) logging.getLogger().setLevel(logging.ERROR) elif args.debug: logging.basicConfig(level=logging.DEBUG) logging.getLogger().setLevel(logging.DEBUG) else: logging.basicConfig(level=logging.INFO) logging.getLogger().setLevel(logging.INFO) # disable vivisect-related logging, it's verbose and not relevant for capa users capa.main.set_vivisect_log_level(logging.CRITICAL) try: taste = get_file_taste(args.sample) except IOError as e: logger.error("%s", str(e)) return -1 # py2 doesn't know about cp65001, which is a variant of utf-8 on windows # tqdm bails when trying to render the progress bar in this setup. # because cp65001 is utf-8, we just map that codepage to the utf-8 codec. # see #380 and: https://stackoverflow.com/a/3259271/87207 import codecs codecs.register(lambda name: codecs.lookup("utf-8") if name == "cp65001" else None) if args.rules == "(embedded rules)": logger.info("-" * 80) logger.info(" Using default embedded rules.") logger.info( " To provide your own rules, use the form `capa.exe -r ./path/to/rules/ /path/to/mal.exe`." ) logger.info(" You can see the current default rule set here:") logger.info(" https://github.com/fireeye/capa-rules") logger.info("-" * 80) logger.debug("detected running from source") args.rules = os.path.join(os.path.dirname(__file__), "..", "rules") logger.debug("default rule path (source method): %s", args.rules) else: logger.info("using rules path: %s", args.rules) try: rules = capa.main.get_rules(args.rules) rules = capa.rules.RuleSet(rules) logger.info("successfully loaded %s rules", len(rules)) if args.tag: rules = rules.filter_rules_by_meta(args.tag) logger.info("selected %s rules", len(rules)) except (IOError, capa.rules.InvalidRule, capa.rules.InvalidRuleSet) as e: logger.error("%s", str(e)) return -1 if (args.format == "freeze") or (args.format == "auto" and capa.features.freeze.is_freeze(taste)): format = "freeze" with open(args.sample, "rb") as f: extractor = capa.features.freeze.load(f.read()) else: format = args.format try: extractor = capa.main.get_extractor(args.sample, args.format) except capa.main.UnsupportedFormatError: logger.error("-" * 80) logger.error(" Input file does not appear to be a PE file.") logger.error(" ") logger.error( " capa currently only supports analyzing PE files (or shellcode, when using --format sc32|sc64)." ) logger.error( " If you don't know the input file type, you can try using the `file` utility to guess it." ) logger.error("-" * 80) return -1 except capa.main.UnsupportedRuntimeError: logger.error("-" * 80) logger.error(" Unsupported runtime or Python interpreter.") logger.error(" ") logger.error( " capa supports running under Python 2.7 using Vivisect for binary analysis." ) logger.error( " It can also run within IDA Pro, using either Python 2.7 or 3.5+." ) logger.error(" ") logger.error( " If you're seeing this message on the command line, please ensure you're running Python 2.7." ) logger.error("-" * 80) return -1 meta = capa.main.collect_metadata(argv, args.sample, args.rules, format, extractor) capabilities, counts = capa.main.find_capabilities(rules, extractor) meta["analysis"].update(counts) if capa.main.has_file_limitation(rules, capabilities): # bail if capa encountered file limitation e.g. a packed binary # do show the output in verbose mode, though. if not (args.verbose or args.vverbose or args.json): return -1 # colorama will detect: # - when on Windows console, and fixup coloring, and # - when not an interactive session, and disable coloring # renderers should use coloring and assume it will be stripped out if necessary. colorama.init() doc = capa.render.convert_capabilities_to_result_document( meta, rules, capabilities) print(render_matches_by_function(doc)) colorama.deinit() logger.info("done.") return 0
def _exit(): # Exit tidily colorama.deinit() return False
def color_print(color, text, no_newline=False): init(autoreset=True) print(color + text, end='') if no_newline else print(color + text) deinit()
def main(): colorama.init() #turn colorama ANSII conversion on #wrkr, wrkst, temp, humid, str_start, str_end = gather_info() wrkr = 'wb' wrkst = 'wsb-001' temp = 72.0 humid = 20.0 str_start = 'ST00001' str_end = 'ST00024' counter = 0 straw_dict = {} save_dict = {} for value in straw_nums: save_dict[value+'ii'] = [0,0,'',0,0,0,'fail'] #[% error, device resistance, pass/fail] save_dict[value+'io'] = [0,0,'',0,0,0,'fail'] #[% error, device resistance, pass/fail] save_dict[value+'oi'] = [0,0,'',0,0,0,'fail'] #[% error, device resistance, pass/fail] save_dict[value+'oo'] = [0,0,'',0,0,0,'fail'] #[% error, device resistance, pass/fail] #print('Using first calibration file...\n') repeat = True while(repeat == True): counter += 1 input("Press enter to measure resistance...") straw_dict = measure_resistance(avg_method, meas_cycles, str_start, str_end,calib_file) i = 0 for key, value in sorted(save_dict.items()): if value[6] != 'pass': #and straw_dict[key][6] == 'pass': value[0] = straw_dict[key][0] value[1] = straw_dict[key][1] value[2] = straw_dict[key][2] value[3] = straw_dict[key][3] value[4] = straw_dict[key][4] value[5] = straw_dict[key][5] value[6] = straw_dict[key][6] i += 1 if i == 0: repeat = False display_resistance(save_dict) print('Number of measurement cycles: ' + str(counter) + '\n') repeat = check_repeat() save_resistance(wrkr, wrkst, temp, humid, save_dict,dataFile,str_start,str_end,counter) input('Press enter to exit...') ''' This section was used in testing different calibration methods print('Now using adjusted calibration file...\n') straw_dict = {} save_dict = {} for value in straw_nums: save_dict[value+'ii'] = [0,0,'',0,0,0,'fail'] #[% error, device resistance, pass/fail] save_dict[value+'io'] = [0,0,'',0,0,0,'fail'] #[% error, device resistance, pass/fail] save_dict[value+'oi'] = [0,0,'',0,0,0,'fail'] #[% error, device resistance, pass/fail] save_dict[value+'oo'] = [0,0,'',0,0,0,'fail'] #[% error, device resistance, pass/fail] repeat = True while(repeat == True): input("Press enter to measure resistance...") straw_dict = measure_resistance(avg_method, meas_cycles, str_start, str_end,calib_file_adjusted) i = 0 for key, value in sorted(save_dict.items()): if value[6] != 'pass': #and straw_dict[key][6] == 'pass': value[0] = straw_dict[key][0] value[1] = straw_dict[key][1] value[2] = straw_dict[key][2] value[3] = straw_dict[key][3] value[4] = straw_dict[key][4] value[5] = straw_dict[key][5] value[6] = straw_dict[key][6] i += 1 if i == 0: repeat = False display_resistance(save_dict) repeat = check_repeat() save_resistance(wrkr, wrkst, temp, humid, save_dict,dataFile_adjusted) ''' colorama.deinit() #turn colorama ANSII conversion off
def support_windows_colors(): """Only for windows terminal""" init() yield deinit()
def close(): """Terminate 'colorama' module.""" deinit()