def test_report_multiple_failed(self): f = io.StringIO() r = TerminalReporter(watch_path='/path', build_path=None, terminal=Terminal(stream=f)) results = { 'total_runtime': 2.09, 'total_passed': 0, 'total_failed': 2, 'failures': [ [ 'fail1', [ '/path/to/file:12: blah', 'results line 2', 'results line 3', 'results line 4', ], [], FAILED ], [ 'fail2', [ '/path/to/file:102: blah', 'results line 2', 'results line 3', 'results line 4', ], [], FAILED ], ] } r.report_results(results) expected = [ '================================== FAILURES ==================================', # noqa termstyle.bold(termstyle.red( '___________________________________ fail1 ____________________________________' # noqa )), '/path/to/file:12: blah', 'results line 2', 'results line 3', 'results line 4', termstyle.bold(termstyle.red( '_________________________________ to/file:12 _________________________________' # noqa )), termstyle.bold(termstyle.red( '___________________________________ fail2 ____________________________________' # noqa )), '/path/to/file:102: blah', 'results line 2', 'results line 3', 'results line 4', termstyle.bold(termstyle.red( '________________________________ to/file:102 _________________________________' # noqa )), termstyle.bold(termstyle.red( '===================== 2 failed, 0 passed in 2.09 seconds =====================' # noqa )), ] actual = f.getvalue().splitlines() assert actual == expected
def dump_info(fname): info = red('unknwon error') if not os.path.getsize(fname): info = red('empty file') else: try: handle = open(fname, 'rt') except OSError as exc: info = red('unable to open file: {!s}'.format(exc)) else: try: content = json.load(handle) except ValueError as exc: info = red('unable to load JSON: {!s}'.format(exc)) else: try: info = "{!s} dumps".format(len(content.keys())) except AttributeError as exc: info = red("unable to count dumps: {!s}".format(exc)) finally: try: handle.close() except NameError: pass return info
def test_path_stripping_in_test_failure_last_line(self): f = io.StringIO() r = TerminalReporter(watch_path='/path/to/watch', build_path='/path/to/build', terminal=Terminal(stream=f)) failures = [[ 'core.ok', [ '/path/to/watch/test/test_core.cc:12: Failure', 'Value of: 2', 'Expected: ok()', 'Which is: 42', ], [], FAILED ]] r.report_failures(failures) expected = [ '================================== FAILURES ==================================', # noqa termstyle.bold(termstyle.red( '__________________________________ core.ok ___________________________________' # noqa )), '/path/to/watch/test/test_core.cc:12: Failure', 'Value of: 2', 'Expected: ok()', 'Which is: 42', termstyle.bold(termstyle.red( '____________________________ test/test_core.cc:12 ____________________________', # noqa )), ] actual = f.getvalue().splitlines() assert actual == expected
def do_start_and_return (self, line): """Start TRex run and once in 'Running' mode, return to cmd prompt""" print termstyle.green("*** Starting TRex run, wait until in 'Running' state ***") try: ret = self.trex.start_trex(**self.run_params) print termstyle.green("*** End of scenario (TRex is probably still running!) ***") except TRexException as inst: print termstyle.red(inst)
def do_stop_trex (self, line): """Try to stop TRex run (if TRex is currently running)""" print termstyle.green("*** Starting TRex termination ***") try: ret = self.trex.stop_trex() print termstyle.green("*** End of scenario (TRex is not running now) ***") except TRexException as inst: print termstyle.red(inst)
def do_reserve_trex (self, user): """Reserves the usage of TRex to a certain user""" try: if not user: ret = self.trex.reserve_trex() else: ret = self.trex.reserve_trex(user.split(' ')[0]) print termstyle.green("*** TRex reserved successfully ***") except TRexException as inst: print termstyle.red(inst)
def do_kill_indiscriminately (self, line): """Force killing of running TRex process (if exists) on the server.""" print termstyle.green("*** Starting TRex termination ***") ret = self.trex.force_kill() if ret: print termstyle.green("*** End of scenario (TRex is not running now) ***") elif ret is None: print termstyle.magenta("*** End of scenario (TRex termination aborted) ***") else: print termstyle.red("*** End of scenario (TRex termination failed) ***")
def do_cancel_reservation (self, user): """Cancels a current reservation of TRex to a certain user""" try: if not user: ret = self.trex.cancel_reservation() else: ret = self.trex.cancel_reservation(user.split(' ')[0]) print termstyle.green("*** TRex reservation canceled successfully ***") except TRexException as inst: print termstyle.red(inst)
def do_poll_once (self, line): """Performs a single poll of TRex current data dump (if TRex is running) and prompts and short version of latest result_obj""" print termstyle.green("*** Trying TRex single poll ***") try: last_res = dict() if self.trex.is_running(dump_out = last_res): obj = self.trex.get_result_obj() print obj else: print termstyle.magenta("TRex isn't currently running.") print termstyle.green("*** End of scenario (TRex is posssibly still running!) ***") except TRexException as inst: print termstyle.red(inst)
def do_run_until_finish (self, sample_rate): """Starts TRex and sample server until run is done.""" print termstyle.green("*** Starting TRex run_until_finish scenario ***") if not sample_rate: # use default sample rate if not passed sample_rate = 5 try: sample_rate = int(sample_rate) ret = self.trex.start_trex(**self.run_params) self.trex.sample_to_run_finish(sample_rate) print termstyle.green("*** End of TRex run ***") except ValueError as inst: print termstyle.magenta("Provided illegal sample rate value. Please try again.\n[", inst,"]") except TRexException as inst: print termstyle.red(inst)
def _summarize(self): """summarize all tests - the number of failures, errors and successes""" self._line(termstyle.black) self._out("%s test%s run in %0.1f seconds" % ( self.total, self._plural(self.total), time.time() - self.start_time)) if self.total > self.success: self._outln(". ") additionals = [] if self.failure > 0: additionals.append(termstyle.red("%s FAILED" % ( self.failure,))) if self.error > 0: additionals.append(termstyle.yellow("%s error%s" % ( self.error, self._plural(self.error) ))) if self.skip > 0: additionals.append(termstyle.blue("%s skipped" % ( self.skip))) self._out(', '.join(additionals)) self._out(termstyle.green(" (%s test%s passed)" % ( self.success, self._plural(self.success) ))) self._outln()
def test_report_with_stdout_and_stderr_in_additional_output(self): f = io.StringIO() r = TerminalReporter(watch_path='/path', build_path=None, terminal=Terminal(stream=f)) results = { 'total_runtime': 2.09, 'total_passed': 0, 'total_failed': 1, 'failures': [ [ 'fail1', [ 'extra line 1', 'extra line 2', '/path/to/file:12: blah', 'results line 1', 'results line 2', 'results line 3', ], [], FAILED ], ] } r.report_results(results) expected = [ '================================== FAILURES ==================================', # noqa termstyle.bold(termstyle.red( '___________________________________ fail1 ____________________________________' # noqa )), '/path/to/file:12: blah', 'results line 1', 'results line 2', 'results line 3', '----------------------------- Additional output ------------------------------', # noqa 'extra line 1', 'extra line 2', termstyle.bold(termstyle.red( '_________________________________ to/file:12 _________________________________' # noqa )), termstyle.bold(termstyle.red( '===================== 1 failed, 0 passed in 2.09 seconds =====================' # noqa )), ] actual = f.getvalue().splitlines() assert actual == expected
def test_writeln_decorator(self): f = io.StringIO() t = Terminal(stream=f) t.writeln('hello', decorator=[bold]) assert f.getvalue() == bold('hello') + linesep f = io.StringIO() t = Terminal(stream=f) t.writeln('hello', decorator=[bold, red]) assert f.getvalue() == red(bold('hello')) + linesep
def do_run_until_condition (self, sample_rate): """Starts TRex and sample server until condition is satisfied.""" print termstyle.green("*** Starting TRex run until condition is satisfied scenario ***") def condition (result_obj): return result_obj.get_current_tx_rate()['m_tx_pps'] > 200000 if not sample_rate: # use default sample rate if not passed sample_rate = 5 try: sample_rate = int(sample_rate) ret = self.trex.start_trex(**self.run_params) ret_val = self.trex.sample_until_condition(condition, sample_rate) print ret_val print termstyle.green("*** End of TRex run ***") except ValueError as inst: print termstyle.magenta("Provided illegal sample rate value. Please try again.\n[", inst,"]") except TRexException as inst: print termstyle.red(inst)
def start_master_daemon(): if master_daemon.is_running(): raise Exception('Master daemon is already running') proc = multiprocessing.Process(target = start_master_daemon_func) proc.daemon = True proc.start() for i in range(50): if master_daemon.is_running(): return True sleep(0.1) fail(termstyle.red('Master daemon failed to run. Please look in log: %s' % logging_file))
def do_run_and_poll (self, sample_rate): """Starts TRex and sample server manually until run is done.""" print termstyle.green("*** Starting TRex run and manually poll scenario ***") if not sample_rate: # use default sample rate if not passed sample_rate = 5 try: sample_rate = int(sample_rate) ret = self.trex.start_trex(**self.run_params) last_res = dict() while self.trex.is_running(dump_out = last_res): obj = self.trex.get_result_obj() if (self.verbose): print obj # do WHATEVER here time.sleep(sample_rate) print termstyle.green("*** End of TRex run ***") except ValueError as inst: print termstyle.magenta("Provided illegal sample rate value. Please try again.\n[", inst,"]") except TRexException as inst: print termstyle.red(inst)
def start_master_daemon(): if master_daemon.is_running(): raise Exception('Master daemon is already running') proc = multiprocessing.Process(target = start_master_daemon_func) proc.daemon = True proc.start() for i in range(50): if master_daemon.is_running(): print(termstyle.green('Master daemon is started')) os._exit(0) sleep(0.1) fail(termstyle.red('Master daemon failed to run'))
def dump_info(fname, file_descriptor=None): info = red('unknown error') if not os.path.getsize(fname): info = red('empty file') else: try: if file_descriptor is None: handle = open(fname, 'rt') fcntl.flock(handle, fcntl.LOCK_EX | fcntl.LOCK_NB) else: handle = file_descriptor except BlockingIOError: info = red('Dump file is locked.') except OSError as exc: info = red('unable to open file: {!s}'.format(exc)) else: try: content = json.load(handle) except ValueError as exc: info = red('unable to load JSON: {!s}'.format(exc)) else: try: info = "{!s} dumps".format(len(content.keys())) except AttributeError as exc: info = red("unable to count dumps: {!s}".format(exc)) finally: try: if file_descriptor is None: handle.close() except NameError: pass return info
def find_webserver_user(): candidates = ('www-data', 'wwwrun', 'httpd', 'apache') for candidate in candidates: try: getpwnam(candidate) except KeyError: pass else: print(f'Detected Apache username {candidate!r}.') return candidate else: sys.exit(red("Unable to detect Apache user name. " "Please re-run this program and give the Apache user name with '--webserver-user'."))
def intelmqsetup_api(ownership: bool = True, webserver_user: Optional[str] = None): intelmq_group = getgrnam('intelmq') webserver_user = webserver_user or find_webserver_user() create_directory(ETC_INTELMQ, 0o40775) if ownership: change_owner(CONFIG_DIR, group='intelmq') change_owner(ETC_INTELMQ, owner='intelmq', group='intelmq') # Manager configuration directory create_directory(ETC_INTELMQ_MANAGER, 0o40775) if ownership: change_owner(ETC_INTELMQ_MANAGER, group='intelmq') base = Path(pkg_resources.resource_filename('intelmq_api', '')).parent api_config = base / 'etc/intelmq/api-config.json' etc_intelmq_config = ETC_INTELMQ / 'api-config.json' api_sudoers = base / 'etc/intelmq/api-sudoers.conf' etc_sudoers_api = Path('/etc/sudoers.d/01_intelmq-api') # same path as used in the packages api_manager_positions = base / 'etc/intelmq/manager/positions.conf' etc_intelmq_manager_positions = ETC_INTELMQ_MANAGER / 'positions.conf' if not base.as_posix().startswith('/usr/'): # Paths differ in editable installations print(red("Detected an editable (egg-link) pip-installation of 'intelmq-api'. Some feature of this program may not work.")) if api_config.exists() and not etc_intelmq_config.exists(): shutil.copy(api_config, etc_intelmq_config) print(f'Copied {api_config!s} to {ETC_INTELMQ!s}.') elif not api_config.exists() and not etc_intelmq_config.exists(): print(red(f'Unable to install api-config.json: Neither {api_config!s} nor {etc_intelmq_config!s} exists.')) if api_sudoers.exists() and not etc_sudoers_api.exists(): with open(api_sudoers) as sudoers: original_sudoers = sudoers.read() sudoers = original_sudoers.replace('www-data', webserver_user) with NamedTemporaryFile(mode='w') as tmp_file: tmp_file.write(sudoers) tmp_file.flush() try: run(('visudo', '-c', tmp_file.name)) except CalledProcessError: sys.exit(red('Fatal error: Validation of adapted sudoers-file failed. Please report this bug.')) change_owner(tmp_file.name, owner='root', group='root', log=False) Path(tmp_file.name).chmod(0o440) shutil.copy(tmp_file.name, etc_sudoers_api) print(f'Copied {api_sudoers!s} to {etc_sudoers_api!s}.') elif not api_sudoers.exists() and not etc_sudoers_api.exists(): print(red(f'Unable to install api-sudoers.conf: Neither {api_sudoers!s} nor {etc_sudoers_api!s} exists.')) if api_manager_positions.exists() and not etc_intelmq_manager_positions.exists(): shutil.copy(api_manager_positions, etc_intelmq_manager_positions) print(f'Copied {api_manager_positions!s} to {etc_intelmq_manager_positions!s}.') etc_intelmq_manager_positions.chmod(0o664) change_owner(etc_intelmq_manager_positions, owner='intelmq', group='intelmq', log=False) elif not api_manager_positions.exists() and not etc_intelmq_manager_positions.exists(): print(red(f'Unable to install positions.conf: Neither {api_manager_positions!s} nor {etc_intelmq_manager_positions!s} exists.')) if webserver_user not in intelmq_group.gr_mem: sys.exit(red(f"Webserver user {webserver_user} is not a member of the 'intelmq' group. " f"Please add it with: 'usermod -aG intelmq {webserver_user}'."))
def start_master_daemon(): if master_daemon.is_running(): raise Exception('Master daemon is already running') proc = multiprocessing.Process(target=start_master_daemon_func) proc.daemon = True proc.start() for i in range(50): if master_daemon.is_running(): return True sleep(0.1) fail( termstyle.red('Master daemon failed to run. Please look in log: %s' % logging_file))
def basic_checks(skip_ownership): if os.geteuid() != 0 and not skip_ownership: sys.exit( red('You need to run this program as root for setting file ownership!' )) if not ROOT_DIR: sys.exit( red('Not a pip-installation of IntelMQ, nothing to initialize.')) if skip_ownership: return try: getpwnam('intelmq') except KeyError: sys.exit( red("User 'intelmq' does not exist. Please create it and then re-run this program." )) try: getgrnam('intelmq') except KeyError: sys.exit( red("Group 'intelmq' does not exist. Please create it and then re-run this program." ))
def do_run_and_poll(self, sample_rate): """Starts TRex and sample server manually until run is done.""" print termstyle.green( "*** Starting TRex run and manually poll scenario ***") if not sample_rate: # use default sample rate if not passed sample_rate = 5 try: sample_rate = int(sample_rate) ret = self.trex.start_trex(**self.run_params) last_res = dict() while self.trex.is_running(dump_out=last_res): obj = self.trex.get_result_obj() if (self.verbose): print obj # do WHATEVER here time.sleep(sample_rate) print termstyle.green("*** End of TRex run ***") except ValueError as inst: print termstyle.magenta( "Provided illegal sample rate value. Please try again.\n[", inst, "]") except TRexException as inst: print termstyle.red(inst)
def intelmqsetup_api_webserver_configuration(webserver_configuration_directory: Optional[str] = None): webserver_configuration_dir = webserver_configuration_directory or find_webserver_configuration_directory() api_config = Path(pkg_resources.resource_filename('intelmq_api', '')).parent / 'etc/intelmq/api-apache.conf' apache_api_config = webserver_configuration_dir / 'api-apache.conf' if api_config.exists() and not apache_api_config.exists(): shutil.copy(api_config, apache_api_config) print(f'Copied {api_config!s} to {ETC_INTELMQ!s}.') debian_activate_apache_config('api-apache.conf') global NOTE_WEBSERVER_RELOAD NOTE_WEBSERVER_RELOAD = True elif not api_config.exists() and not apache_api_config.exists(): print(red(f'Unable to install webserver configuration api-config.conf: Neither {api_config!s} nor {apache_api_config!s} exists.')) print('Setup of intelmq-api successful.')
def test_report_watchstate(self): f = io.StringIO() r = TerminalReporter(watch_path=None, build_path=None, terminal=Terminal(stream=f)) r.report_watchstate( WatchState(['create'], ['delete'], ['modify'], 1.0) ) assert f.getvalue() == os.linesep.join([ termstyle.green('# CREATED create'), termstyle.yellow('# MODIFIED modify'), termstyle.red('# DELETED delete'), '### Scan time: 1.000s', ]) + os.linesep
def s(*a, **kw): desc = "\t%s %s" % (prefix, ' '.join([ name, termstyle.bold(' '.join(map(repr, a))), ' '.join( ["%s=%r" % (k, termstyle.bold(v)) for k, v in kw.items()]) ])) try: ret = func(*a, **kw) print >> sys.stderr, termstyle.green(desc) return ret except: print >> sys.stderr, termstyle.red(desc) import traceback traceback.print_exc(file=sys.stderr) raise
def intelmqsetup_manager_webserver_configuration(webserver_configuration_directory: Optional[str] = None): webserver_configuration_dir = webserver_configuration_directory or find_webserver_configuration_directory() manager_config_1 = Path(pkg_resources.resource_filename('intelmq_manager', '')).parent / 'etc/intelmq/manager-apache.conf' # IntelMQ Manager >= 3.1.0 manager_config_2 = Path(pkg_resources.resource_filename('intelmq_manager', '')) / 'manager-apache.conf' manager_config = manager_config_2 if manager_config_2.exists() else manager_config_1 apache_manager_config = webserver_configuration_dir / 'manager-apache.conf' if manager_config.exists() and not apache_manager_config.exists(): shutil.copy(manager_config, apache_manager_config) print(f'Copied {manager_config!s} to {apache_manager_config!s}.') debian_activate_apache_config('manager-apache.conf') global NOTE_WEBSERVER_RELOAD NOTE_WEBSERVER_RELOAD = True elif not manager_config.exists() and not apache_manager_config.exists(): print(red(f'Unable to install webserver configuration manager-config.conf: Neither {manager_config_1!s} nor {manager_config_2!s} nor {apache_manager_config!s} exist.'))
def find_webserver_configuration_directory(): global WEBSERVER_CONFIG_DIR if WEBSERVER_CONFIG_DIR: return WEBSERVER_CONFIG_DIR webserver_configuration_dir_candidates = (Path('/etc/apache2/conf-available/'), Path('/etc/apache2/conf.d/'), Path('/etc/httpd/conf.d/')) for webserver_configuration_dir_candidate in webserver_configuration_dir_candidates: if webserver_configuration_dir_candidate.exists(): print(f'Detected Apache configuration directory {webserver_configuration_dir_candidate!s}.') WEBSERVER_CONFIG_DIR = webserver_configuration_dir_candidate webserver_configuration_dir_candidate.as_posix return webserver_configuration_dir_candidate else: sys.exit(red("Unable to detect Apache configuration directory. " "Please re-run this program and give the Apache configuration directory with '--webserver-configuration-directory'."))
def test_report_watchstate(self): f = io.StringIO() r = TerminalReporter( watch_path=None, build_path=None, terminal=Terminal(stream=f) ) r.report_watchstate(WatchState(["create"], ["delete"], ["modify"], 1.0)) assert ( f.getvalue() == os.linesep.join( [ termstyle.green("# CREATED create"), termstyle.yellow("# MODIFIED modify"), termstyle.red("# DELETED delete"), "### Scan time: 1.000s", ] ) + os.linesep )
def _summarize(self): """summarize all tests - the number of failures, errors and successes""" self._line(termstyle.black) self._out("%s test%s run in %0.1f seconds" % (self.total, self._plural( self.total), time.time() - self.start_time)) if self.total > self.success: self._outln(". ") additionals = [] if self.failure > 0: additionals.append( termstyle.red("%s FAILED" % (self.failure, ))) if self.error > 0: additionals.append( termstyle.yellow("%s error%s" % (self.error, self._plural(self.error)))) if self.skip > 0: additionals.append(termstyle.blue("%s skipped" % (self.skip))) self._out(', '.join(additionals)) self._out( termstyle.green(" (%s test%s passed)" % (self.success, self._plural(self.success)))) self._outln()
def red(self, text): self._restoreColor() return termstyle.red(text)
def red(self, text): self._restoreColor() if self.html: return '<span style="color: rgb(237,73,62)">{}</span>'.format(text) else: return termstyle.red(text)
:: `:. .:' :: `;..``::::''..;' ``::,,,,::'' ___ ___ __________ / _ \/ _ | / __/ __/ / / ___/ __ |_\ \_\ \/_/ /_/ /_/ |_/___/___(_) """)) sys.exit(0) else: print(termstyle.red(""" /\_/\ ( o.o ) > ^ < This cat is sad, test failed. """)) sys.exit(-1)
def test_wraps_arg_as_line_in_colour(self): stream = ColourWritelnDecorator(StringIO()) stream.writeln('{*}', colour=termstyle.red) stream.stream.getvalue() |should| be_equal_to( termstyle.red('{*}') + '\n')
def main(): parser = argparse.ArgumentParser( prog=APPNAME, formatter_class=argparse.RawDescriptionHelpFormatter, usage=USAGE, description=DESCRIPTION, epilog=EPILOG, ) parser.add_argument('botid', metavar='botid', nargs='?', default=None, help='botid to inspect dumps of') args = parser.parse_args() if args.botid is None: filenames = glob.glob(os.path.join(DEFAULT_LOGGING_PATH, '*.dump')) if not len(filenames): print(green('Nothing to recover from, no dump files found!')) exit(0) filenames = [(fname, fname[len(DEFAULT_LOGGING_PATH):-5]) for fname in sorted(filenames)] length = max([len(value[1]) for value in filenames]) print(bold("{c:>3}: {s:{l}} {i}".format(c='id', s='name (bot id)', i='content', l=length))) for count, (fname, shortname) in enumerate(filenames): info = dump_info(fname) print("{c:3}: {s:{l}} {i}".format(c=count, s=shortname, i=info, l=length)) botid = input(inverted('Which dump file to process (id or name)? ')) botid = botid.strip() if botid == 'q' or not botid: exit(0) try: fname, botid = filenames[int(botid)] except ValueError: fname = os.path.join(DEFAULT_LOGGING_PATH, botid) + '.dump' else: botid = args.botid fname = os.path.join(DEFAULT_LOGGING_PATH, botid) + '.dump' if not os.path.isfile(fname): print(bold('Given file does not exist: {}'.format(fname))) exit(1) while True: info = dump_info(fname) print('Processing {}: {}'.format(bold(botid), info)) try: with io.open(fname, 'rt') as handle: content = json.load(handle) meta = load_meta(content) except ValueError: available_opts = [item[0] for item in ACTIONS.values() if item[2]] print(bold('Could not load file:') + '\n{}\nRestricted actions.' ''.format(traceback.format_exc())) else: available_opts = [item[0] for item in ACTIONS.values()] for count, line in enumerate(meta): print('{:3}: {} {}'.format(count, *line)) answer = input(inverted(', '.join(available_opts) + '? ')).split() if not answer: continue if any([answer[0] == char for char in AVAILABLE_IDS]): ids = [int(item) for item in answer[1].split(',')] queue_name = None if answer[0] == 'a': # recover all -> recover all by ids answer[0] = 'r' ids = range(len(meta)) if len(answer) > 1: queue_name = answer[1] if answer[0] == 'q': break elif answer[0] == 'e': # Delete entries for entry in ids: del content[meta[entry][0]] save_file(fname, content) elif answer[0] == 'r': # recover entries for key, entry in [item for (count, item) in enumerate(content.items()) if count in ids]: if type(entry['message']) is dict: if '__type' in entry['message']: msg = json.dumps(entry['message']) # backwards compat: dumps had no type info elif '-parser' in entry['bot_id']: msg = message.Report(entry['message']).serialize() else: msg = message.Event(entry['message']).serialize() elif issubclass(type(entry['message']), (six.binary_type, six.text_type)): msg = entry['message'] elif entry['message'] is None: print(bold('No message here, deleting directly.')) del content[key] save_file(fname, content) continue else: print(bold('Unhandable type of message: {!r}' ''.format(type(entry['message'])))) continue print(entry['source_queue']) default = utils.load_configuration(DEFAULTS_CONF_FILE) runtime = utils.load_configuration(RUNTIME_CONF_FILE) params = utils.load_parameters(default, runtime) pipe = pipeline.PipelineFactory.create(params) if queue_name is None: if len(answer) == 2: queue_name = answer[2] else: queue_name = entry['source_queue'] try: pipe.set_queues(queue_name, 'destination') pipe.connect() pipe.send(msg) except exceptions.PipelineError: print(red('Could not reinject into queue {}: {}' ''.format(queue_name, traceback.format_exc()))) else: del content[key] save_file(fname, content) elif answer[0] == 'd': # delete dumpfile os.remove(fname) print('Deleted file {}'.format(fname)) break elif answer[0] == 's': # Show entries by id for count, (key, value) in enumerate(content.items()): if count not in ids: continue print('=' * 100, '\nShowing id {} {}\n'.format(count, key), '-' * 50) if isinstance(value['message'], (six.binary_type, six.text_type)): value['message'] = json.loads(value['message']) if ('raw' in value['message'] and len(value['message']['raw']) > 1000): value['message']['raw'] = value['message'][ 'raw'][:1000] + '...[truncated]' value['traceback'] = value['traceback'].splitlines() pprint.pprint(value)
def format_unknown(r, parts): logging.error(red('Unknown type: "%s"' % r.get('type'))) return ''
def main(): parser = argparse.ArgumentParser( prog=APPNAME, formatter_class=argparse.RawDescriptionHelpFormatter, usage=USAGE, description=DESCRIPTION, epilog=EPILOG, ) parser.add_argument('botid', metavar='botid', nargs='?', default=None, help='botid to inspect dumps of') args = parser.parse_args() # Try to get log_level from defaults_configuration, else use default try: log_level = utils.load_configuration(DEFAULTS_CONF_FILE)['logging_level'] except Exception: log_level = DEFAULT_LOGGING_LEVEL try: logger = utils.log('intelmqdump', log_level=log_level) except (FileNotFoundError, PermissionError) as exc: logger = utils.log('intelmqdump', log_level=log_level, log_path=False) logger.error('Not logging to file: %s', exc) ctl = intelmqctl.IntelMQController() readline.parse_and_bind("tab: complete") readline.set_completer_delims('') pipeline_config = utils.load_configuration(PIPELINE_CONF_FILE) pipeline_pipes = {} for bot, pipes in pipeline_config.items(): pipeline_pipes[pipes.get('source-queue', '')] = bot if args.botid is None: filenames = glob.glob(os.path.join(DEFAULT_LOGGING_PATH, '*.dump')) if not len(filenames): print(green('Nothing to recover from, no dump files found!')) sys.exit(0) filenames = [(fname, fname[len(DEFAULT_LOGGING_PATH):-5]) for fname in sorted(filenames)] length = max([len(value[1]) for value in filenames]) print(bold("{c:>3}: {s:{length}} {i}".format(c='id', s='name (bot id)', i='content', length=length))) for count, (fname, shortname) in enumerate(filenames): info = dump_info(fname) print("{c:3}: {s:{length}} {i}".format(c=count, s=shortname, i=info, length=length)) try: bot_completer = Completer(possible_values=[f[1] for f in filenames]) readline.set_completer(bot_completer.complete) botid = input(inverted('Which dump file to process (id or name)?') + ' ') except EOFError: sys.exit(0) else: botid = botid.strip() if botid == 'q' or not botid: exit(0) try: fname, botid = filenames[int(botid)] except ValueError: fname = os.path.join(DEFAULT_LOGGING_PATH, botid) + '.dump' else: botid = args.botid fname = os.path.join(DEFAULT_LOGGING_PATH, botid) + '.dump' if not os.path.isfile(fname): print(bold('Given file does not exist: {}'.format(fname))) exit(1) answer = None delete_file = False while True: with open(fname, 'r+') as handle: try: fcntl.flock(handle, fcntl.LOCK_EX | fcntl.LOCK_NB) except BlockingIOError: print(red('Dump file is currently locked. Stopping.')) break info = dump_info(fname, file_descriptor=handle) handle.seek(0) available_answers = ACTIONS.keys() print('Processing {}: {}'.format(bold(botid), info)) if info.startswith(str(red)): available_opts = [item[0] for item in ACTIONS.values() if item[2]] available_answers = [k for k, v in ACTIONS.items() if v[2]] print('Restricted actions.') else: # don't display list after 'show' and 'recover' command if not (answer and isinstance(answer, list) and answer[0] in ['s', 'r']): content = json.load(handle) handle.seek(0) content = OrderedDict(sorted(content.items(), key=lambda t: t[0])) # sort by key here, #1280 meta = load_meta(content) available_opts = [item[0] for item in ACTIONS.values()] for count, line in enumerate(meta): print('{:3}: {} {}'.format(count, *line)) # Determine bot status try: bot_status = ctl.bot_status(botid) if bot_status[1] == 'running': print(red('This bot is currently running, the dump file is now locked and ' 'the bot can\'t write it.')) except KeyError: bot_status = 'error' print(red('Attention: This bot is not defined!')) available_opts = [item[0] for item in ACTIONS.values() if item[2]] available_answers = [k for k, v in ACTIONS.items() if v[2]] print('Restricted actions.') try: possible_answers = list(available_answers) for id_action in ['r', 'a']: if id_action in possible_answers: possible_answers[possible_answers.index(id_action)] = id_action + ' ' action_completer = Completer(possible_answers, queues=pipeline_pipes.keys()) readline.set_completer(action_completer.complete) answer = input(inverted(', '.join(available_opts) + '?') + ' ').split() except EOFError: break else: if not answer: continue if len(answer) == 0 or answer[0] not in available_answers: print('Action not allowed.') continue if any([answer[0] == char for char in AVAILABLE_IDS]) and len(answer) > 1: ids = [int(item) for item in answer[1].split(',')] else: ids = [] queue_name = None if answer[0] == 'a': # recover all -> recover all by ids answer[0] = 'r' ids = range(len(meta)) if len(answer) > 1: queue_name = answer[1] if answer[0] == 'q': break elif answer[0] == 'e': # Delete entries for entry in ids: del content[meta[entry][0]] save_file(handle, content) elif answer[0] == 'r': # recover entries default = utils.load_configuration(DEFAULTS_CONF_FILE) runtime = utils.load_configuration(RUNTIME_CONF_FILE) params = utils.load_parameters(default, runtime) pipe = pipeline.PipelineFactory.create(params, logger) try: for i, (key, entry) in enumerate([item for (count, item) in enumerate(content.items()) if count in ids]): if entry['message']: msg = copy.copy(entry['message']) # otherwise the message field gets converted if isinstance(msg, dict): msg = json.dumps(msg) else: print('No message here, deleting entry.') del content[key] continue if queue_name is None: if len(answer) == 3: queue_name = answer[2] else: queue_name = entry['source_queue'] if queue_name in pipeline_pipes: if runtime[pipeline_pipes[queue_name]]['group'] == 'Parser' and json.loads(msg)['__type'] == 'Event': print('Event converted to Report automatically.') msg = message.Report(message.MessageFactory.unserialize(msg)).serialize() try: pipe.set_queues(queue_name, 'destination') pipe.connect() pipe.send(msg) except exceptions.PipelineError: print(red('Could not reinject into queue {}: {}' ''.format(queue_name, traceback.format_exc()))) else: del content[key] print(green('Recovered dump {}.'.format(i))) finally: save_file(handle, content) if not content: delete_file = True print('Deleting empty file {}'.format(fname)) break elif answer[0] == 'd': # delete dumpfile delete_file = True print('Deleting empty file {}'.format(fname)) break elif answer[0] == 's': # Show entries by id for count, (key, orig_value) in enumerate(content.items()): value = copy.copy(orig_value) # otherwise the raw field gets truncated if count not in ids: continue print('=' * 100, '\nShowing id {} {}\n'.format(count, key), '-' * 50) if isinstance(value['message'], (bytes, str)): value['message'] = json.loads(value['message']) if ('raw' in value['message'] and len(value['message']['raw']) > 1000): value['message']['raw'] = value['message'][ 'raw'][:1000] + '...[truncated]' if type(value['traceback']) is not list: value['traceback'] = value['traceback'].splitlines() pprint.pprint(value) if delete_file: os.remove(fname)
def main(): parser = argparse.ArgumentParser( prog=APPNAME, formatter_class=argparse.RawDescriptionHelpFormatter, usage=USAGE, description=DESCRIPTION, epilog=EPILOG, ) parser.add_argument('botid', metavar='botid', nargs='?', default=None, help='botid to inspect dumps of') args = parser.parse_args() if args.botid is None: filenames = glob.glob(os.path.join(DEFAULT_LOGGING_PATH, '*.dump')) if not len(filenames): print(green('Nothing to recover from, no dump files found!')) exit(0) filenames = [(fname, fname[len(DEFAULT_LOGGING_PATH):-5]) for fname in sorted(filenames)] length = max([len(value[1]) for value in filenames]) print( bold("{c:>3}: {s:{l}} {i}".format(c='id', s='name (bot id)', i='content', l=length))) for count, (fname, shortname) in enumerate(filenames): info = dump_info(fname) print("{c:3}: {s:{l}} {i}".format(c=count, s=shortname, i=info, l=length)) botid = input(inverted('Which dump file to process (id or name)? ')) botid = botid.strip() if botid == 'q' or not botid: exit(0) try: fname, botid = filenames[int(botid)] except ValueError: fname = os.path.join(DEFAULT_LOGGING_PATH, botid) + '.dump' else: botid = args.botid fname = os.path.join(DEFAULT_LOGGING_PATH, botid) + '.dump' if not os.path.isfile(fname): print(bold('Given file does not exist: {}'.format(fname))) exit(1) while True: info = dump_info(fname) print('Processing {}: {}'.format(bold(botid), info)) try: with io.open(fname, 'rt') as handle: content = json.load(handle) meta = load_meta(content) except ValueError: available_opts = [item[0] for item in ACTIONS.values() if item[2]] print( bold('Could not load file:') + '\n{}\nRestricted actions.' ''.format(traceback.format_exc())) else: available_opts = [item[0] for item in ACTIONS.values()] for count, line in enumerate(meta): print('{:3}: {} {}'.format(count, *line)) answer = input(inverted(', '.join(available_opts) + '? ')).split() if not answer: continue if any([answer[0] == char for char in AVAILABLE_IDS]): ids = [int(item) for item in answer[1].split(',')] queue_name = None if answer[0] == 'a': # recover all -> recover all by ids answer[0] = 'r' ids = range(len(meta)) if len(answer) > 1: queue_name = answer[1] if answer[0] == 'q': break elif answer[0] == 'e': # Delete entries for entry in ids: del content[meta[entry][0]] save_file(fname, content) elif answer[0] == 'r': # recover entries for key, entry in [ item for (count, item) in enumerate(content.items()) if count in ids ]: if type(entry['message']) is dict: if '__type' in entry['message']: msg = json.dumps(entry['message']) # backwards compat: dumps had no type info elif '-parser' in entry['bot_id']: msg = message.Report(entry['message']).serialize() else: msg = message.Event(entry['message']).serialize() elif issubclass(type(entry['message']), (six.binary_type, six.text_type)): msg = entry['message'] elif entry['message'] is None: print(bold('No message here, deleting directly.')) del content[key] save_file(fname, content) continue else: print( bold('Unhandable type of message: {!r}' ''.format(type(entry['message'])))) continue print(entry['source_queue']) default = utils.load_configuration(DEFAULTS_CONF_FILE) runtime = utils.load_configuration(RUNTIME_CONF_FILE) params = utils.load_parameters(default, runtime) pipe = pipeline.PipelineFactory.create(params) if queue_name is None: if len(answer) == 2: queue_name = answer[2] else: queue_name = entry['source_queue'] try: pipe.set_queues(queue_name, 'destination') pipe.connect() pipe.send(msg) except exceptions.PipelineError: print( red('Could not reinject into queue {}: {}' ''.format(queue_name, traceback.format_exc()))) else: del content[key] save_file(fname, content) elif answer[0] == 'd': # delete dumpfile os.remove(fname) print('Deleted file {}'.format(fname)) break elif answer[0] == 's': # Show entries by id for count, (key, value) in enumerate(content.items()): if count not in ids: continue print('=' * 100, '\nShowing id {} {}\n'.format(count, key), '-' * 50) if isinstance(value['message'], (six.binary_type, six.text_type)): value['message'] = json.loads(value['message']) if ('raw' in value['message'] and len(value['message']['raw']) > 1000): value['message']['raw'] = value['message'][ 'raw'][:1000] + '...[truncated]' value['traceback'] = value['traceback'].splitlines() pprint.pprint(value)
def test_report_multiple_failed(self): f = io.StringIO() r = TerminalReporter( watch_path="/path", build_path=None, terminal=Terminal(stream=f) ) results = { "total_runtime": 2.09, "total_passed": 0, "total_failed": 2, "failures": [ [ "fail1", [ "/path/to/file:12: blah", "results line 2", "results line 3", "results line 4", ], [], FAILED, ], [ "fail2", [ "/path/to/file:102: blah", "results line 2", "results line 3", "results line 4", ], [], FAILED, ], ], } r.report_results(results) expected = [ "================================== FAILURES ==================================", # noqa termstyle.bold( termstyle.red( "___________________________________ fail1 ____________________________________" # noqa ) ), "/path/to/file:12: blah", "results line 2", "results line 3", "results line 4", termstyle.bold( termstyle.red( "_________________________________ to/file:12 _________________________________" # noqa ) ), termstyle.bold( termstyle.red( "___________________________________ fail2 ____________________________________" # noqa ) ), "/path/to/file:102: blah", "results line 2", "results line 3", "results line 4", termstyle.bold( termstyle.red( "________________________________ to/file:102 _________________________________" # noqa ) ), termstyle.bold( termstyle.red( "===================== 2 failed, 0 passed in 2.09 seconds =====================" # noqa ) ), ] actual = f.getvalue().splitlines() assert actual == expected
def test_wraps_arg_as_line_in_colour(self): stream = ColourWritelnDecorator(StringIO()) stream.writeln('{*}', colour=termstyle.red) stream.stream.getvalue() | should | be_equal_to( termstyle.red('{*}') + '\n')
def main(): parser = argparse.ArgumentParser( prog=APPNAME, formatter_class=argparse.RawDescriptionHelpFormatter, usage=USAGE, description=DESCRIPTION, epilog=EPILOG, ) parser.add_argument('botid', metavar='botid', nargs='?', default=None, help='botid to inspect dumps of') args = parser.parse_args() # Try to get log_level from defaults_configuration, else use default try: log_level = utils.load_configuration( DEFAULTS_CONF_FILE)['logging_level'] except Exception: log_level = DEFAULT_LOGGING_LEVEL try: logger = utils.log('intelmqdump', log_level=log_level) except (FileNotFoundError, PermissionError) as exc: logger = utils.log('intelmqdump', log_level=log_level, log_path=False) logger.error('Not logging to file: %s', exc) ctl = intelmqctl.IntelMQController() readline.parse_and_bind("tab: complete") readline.set_completer_delims('') pipeline_config = utils.load_configuration(PIPELINE_CONF_FILE) pipeline_pipes = {} for bot, pipes in pipeline_config.items(): pipeline_pipes[pipes.get('source-queue', '')] = bot if args.botid is None: filenames = glob.glob(os.path.join(DEFAULT_LOGGING_PATH, '*.dump')) if not len(filenames): print(green('Nothing to recover from, no dump files found!')) sys.exit(0) filenames = [(fname, fname[len(DEFAULT_LOGGING_PATH):-5]) for fname in sorted(filenames)] length = max([len(value[1]) for value in filenames]) print( bold("{c:>3}: {s:{length}} {i}".format(c='id', s='name (bot id)', i='content', length=length))) for count, (fname, shortname) in enumerate(filenames): info = dump_info(fname) print("{c:3}: {s:{length}} {i}".format(c=count, s=shortname, i=info, length=length)) try: bot_completer = Completer( possible_values=[f[1] for f in filenames]) readline.set_completer(bot_completer.complete) botid = input( inverted('Which dump file to process (id or name)?') + ' ') except EOFError: sys.exit(0) else: botid = botid.strip() if botid == 'q' or not botid: exit(0) try: fname, botid = filenames[int(botid)] except ValueError: fname = os.path.join(DEFAULT_LOGGING_PATH, botid) + '.dump' else: botid = args.botid fname = os.path.join(DEFAULT_LOGGING_PATH, botid) + '.dump' if not os.path.isfile(fname): print(bold('Given file does not exist: {}'.format(fname))) exit(1) answer = None delete_file = False while True: with open(fname, 'r+') as handle: try: fcntl.flock(handle, fcntl.LOCK_EX | fcntl.LOCK_NB) except BlockingIOError: print(red('Dump file is currently locked. Stopping.')) break info = dump_info(fname, file_descriptor=handle) handle.seek(0) available_answers = ACTIONS.keys() print('Processing {}: {}'.format(bold(botid), info)) if info.startswith(str(red)): available_opts = [ item[0] for item in ACTIONS.values() if item[2] ] available_answers = [k for k, v in ACTIONS.items() if v[2]] print('Restricted actions.') else: # don't display list after 'show', 'recover' & edit commands if not (answer and isinstance(answer, list) and answer[0] in ['s', 'r', 'v']): content = json.load(handle) handle.seek(0) content = OrderedDict( sorted(content.items(), key=lambda t: t[0])) # sort by key here, #1280 meta = load_meta(content) available_opts = [item[0] for item in ACTIONS.values()] for count, line in enumerate(meta): print('{:3}: {} {}'.format(count, *line)) # Determine bot status try: bot_status = ctl.bot_status(botid) if bot_status[1] == 'running': print( red('This bot is currently running, the dump file is now locked and ' 'the bot can\'t write it.')) except KeyError: bot_status = 'error' print(red('Attention: This bot is not defined!')) available_opts = [ item[0] for item in ACTIONS.values() if item[2] ] available_answers = [k for k, v in ACTIONS.items() if v[2]] print('Restricted actions.') try: possible_answers = list(available_answers) for id_action in ['r', 'a']: if id_action in possible_answers: possible_answers[possible_answers.index( id_action)] = id_action + ' ' action_completer = Completer(possible_answers, queues=pipeline_pipes.keys()) readline.set_completer(action_completer.complete) answer = input( inverted(', '.join(available_opts) + '?') + ' ').split() except EOFError: break else: if not answer: continue if len(answer) == 0 or answer[0] not in available_answers: print('Action not allowed.') continue if any([answer[0] == char for char in AVAILABLE_IDS]) and len(answer) > 1: ids = [int(item) for item in answer[1].split(',')] else: ids = [] queue_name = None if answer[0] == 'a': # recover all -> recover all by ids answer[0] = 'r' ids = range(len(meta)) if len(answer) > 1: queue_name = answer[1] if answer[0] == 'q': break elif answer[0] == 'e': # Delete entries for entry in ids: del content[meta[entry][0]] save_file(handle, content) elif answer[0] == 'r': # recover entries default = utils.load_configuration(DEFAULTS_CONF_FILE) runtime = utils.load_configuration(RUNTIME_CONF_FILE) params = utils.load_parameters(default, runtime) pipe = pipeline.PipelineFactory.create(params, logger) try: for i, (key, entry) in enumerate([ item for (count, item) in enumerate(content.items()) if count in ids ]): if entry['message']: msg = copy.copy( entry['message'] ) # otherwise the message field gets converted if isinstance(msg, dict): msg = json.dumps(msg) else: print('No message here, deleting entry.') del content[key] continue if queue_name is None: if len(answer) == 3: queue_name = answer[2] else: queue_name = entry['source_queue'] if queue_name in pipeline_pipes: if runtime[pipeline_pipes[queue_name]][ 'group'] == 'Parser' and json.loads( msg)['__type'] == 'Event': print( 'Event converted to Report automatically.') msg = message.Report( message.MessageFactory.unserialize( msg)).serialize() try: pipe.set_queues(queue_name, 'destination') pipe.connect() pipe.send(msg) except exceptions.PipelineError: print( red('Could not reinject into queue {}: {}' ''.format(queue_name, traceback.format_exc()))) else: del content[key] print(green('Recovered dump {}.'.format(i))) finally: save_file(handle, content) if not content: delete_file = True print('Deleting empty file {}'.format(fname)) break elif answer[0] == 'd': # delete dumpfile delete_file = True print('Deleting empty file {}'.format(fname)) break elif answer[0] == 's': # Show entries by id for count, (key, orig_value) in enumerate(content.items()): value = copy.copy( orig_value) # otherwise the raw field gets truncated if count not in ids: continue print('=' * 100, '\nShowing id {} {}\n'.format(count, key), '-' * 50) if value.get('message_type') == 'base64': if len(value['message']) > 1000: value['message'] = value[ 'message'][:1000] + '...[truncated]' else: if isinstance(value['message'], (bytes, str)): value['message'] = json.loads(value['message']) if ('raw' in value['message'] and len(value['message']['raw']) > 1000): value['message']['raw'] = value['message'][ 'raw'][:1000] + '...[truncated]' if type(value['traceback']) is not list: value['traceback'] = value['traceback'].splitlines() pprint.pprint(value) elif answer[0] == 'v': # edit given id if not ids: print(red('Edit mode needs an id')) continue for entry in ids: if content[meta[entry][0]].get('message_type') == 'base64': with tempfile.NamedTemporaryFile( mode='w+b', suffix='.txt') as tmphandle: filename = tmphandle.name tmphandle.write( base64.b64decode( content[meta[entry][0]]['message'])) tmphandle.flush() proc = subprocess.call( ['sensible-editor', filename]) if proc != 0: print(red('Calling editor failed.')) else: tmphandle.seek(0) new_content = tmphandle.read() try: new_content = new_content.decode() except UnicodeDecodeError as exc: print( red("Could not write the new message because of the following error:" )) print( red( exceptions.DecodingError( exception=exc))) else: del content[meta[entry][0]]['message_type'] content[meta[entry] [0]]['message'] = new_content save_file(handle, content) else: with tempfile.NamedTemporaryFile( mode='w+t', suffix='.json') as tmphandle: filename = tmphandle.name utils.write_configuration( configuration_filepath=filename, content=json.loads( content[meta[entry][0]]['message']), new=True, backup=False) proc = subprocess.call( ['sensible-editor', filename]) if proc != 0: print(red('Calling editor failed.')) else: tmphandle.seek(0) content[meta[entry] [0]]['message'] = tmphandle.read() save_file(handle, content) if delete_file: os.remove(fname)
os.makedirs(args.trex_dir) os.chmod(args.trex_dir, 0o777) elif args.allow_update: os.chmod(args.trex_dir, 0o777) if not os.path.exists(tmp_dir): os.makedirs(tmp_dir) if args.daemon_type not in daemons.keys(): # not supposed to happen raise Exception('Error in daemon type , should be one of following: %s' % daemon.keys()) daemon = vars().get(args.daemon_type) if not daemon: raise Exception('Daemon %s does not exist' % args.daemon_type) if args.action != 'show': func = getattr(daemon, args.action) if not func: raise Exception('%s does not have function %s' % (daemon.name, args.action)) try: func() except Exception as e: print(termstyle.red(e)) sys.exit(1) # prints running status if daemon.is_running(): print(termstyle.green('%s is running' % daemon.name)) else: print(termstyle.red('%s is NOT running' % daemon.name))
if args.daemon_type not in daemons.keys(): # not supposed to happen raise Exception('Error in daemon type , should be one of following: %s' % daemon.keys()) daemon = vars().get(args.daemon_type) if not daemon: raise Exception('Daemon %s does not exist' % args.daemon_type) if args.action != 'show': func = getattr(daemon, args.action) if not func: raise Exception('%s does not have function %s' % (daemon.name, args.action)) try: func() except: try: # give it another try sleep(1) func() except Exception as e: print(termstyle.red(e)) sys.exit(1) passive = {'start': 'started', 'restart': 'restarted', 'stop': 'stopped', 'show': 'running'} if args.action in ('show', 'start', 'restart') and daemon.is_running() or \ args.action == 'stop' and not daemon.is_running(): print(termstyle.green('%s is %s' % (daemon.name, passive[args.action]))) os._exit(0) else: print(termstyle.red('%s is NOT %s' % (daemon.name, passive[args.action]))) os._exit(-1)
def main(): parser = argparse.ArgumentParser( prog=APPNAME, formatter_class=argparse.RawDescriptionHelpFormatter, usage=USAGE, description=DESCRIPTION, epilog=EPILOG, ) parser.add_argument('botid', metavar='botid', nargs='?', default=None, help='botid to inspect dumps of') args = parser.parse_args() ctl = intelmqctl.IntelMQController() if args.botid is None: filenames = glob.glob(os.path.join(DEFAULT_LOGGING_PATH, '*.dump')) if not len(filenames): print(green('Nothing to recover from, no dump files found!')) exit(0) filenames = [(fname, fname[len(DEFAULT_LOGGING_PATH):-5]) for fname in sorted(filenames)] length = max([len(value[1]) for value in filenames]) print(bold("{c:>3}: {s:{l}} {i}".format(c='id', s='name (bot id)', i='content', l=length))) for count, (fname, shortname) in enumerate(filenames): info = dump_info(fname) print("{c:3}: {s:{l}} {i}".format(c=count, s=shortname, i=info, l=length)) try: botid = input(inverted('Which dump file to process (id or name)?') + ' ') except EOFError: exit(0) else: botid = botid.strip() if botid == 'q' or not botid: exit(0) try: fname, botid = filenames[int(botid)] except ValueError: fname = os.path.join(DEFAULT_LOGGING_PATH, botid) + '.dump' else: botid = args.botid fname = os.path.join(DEFAULT_LOGGING_PATH, botid) + '.dump' if not os.path.isfile(fname): print(bold('Given file does not exist: {}'.format(fname))) exit(1) answer = None while True: info = dump_info(fname) available_answers = ACTIONS.keys() print('Processing {}: {}'.format(bold(botid), info)) if info.startswith(str(red)): available_opts = [item[0] for item in ACTIONS.values() if item[2]] available_answers = [k for k, v in ACTIONS.items() if v[2]] print('Restricted actions.') else: # don't display list after 'show' and 'recover' command if not (answer and isinstance(answer, list) and answer[0] in ['s', 'r']): with open(fname, 'rt') as handle: content = json.load(handle) meta = load_meta(content) available_opts = [item[0] for item in ACTIONS.values()] for count, line in enumerate(meta): print('{:3}: {} {}'.format(count, *line)) # Determine bot status bot_status = ctl.bot_status(botid) if bot_status == 'running': print(red('Attention: This bot is currently running!')) elif bot_status == 'error': print(red('Attention: This bot is not defined!')) try: answer = input(inverted(', '.join(available_opts) + '?') + ' ').split() except EOFError: break else: if not answer: continue if len(answer) == 0 or answer[0] not in available_answers: print('Action not allowed.') continue if any([answer[0] == char for char in AVAILABLE_IDS]) and len(answer) > 1: ids = [int(item) for item in answer[1].split(',')] else: ids = [] queue_name = None if answer[0] == 'a': # recover all -> recover all by ids answer[0] = 'r' ids = range(len(meta)) if len(answer) > 1: queue_name = answer[1] if answer[0] == 'q': break elif answer[0] == 'e': # Delete entries for entry in ids: del content[meta[entry][0]] save_file(fname, content) elif answer[0] == 'r': if bot_status == 'running': # See https://github.com/certtools/intelmq/issues/574 print(red('Recovery for running bots not possible.')) continue # recover entries default = utils.load_configuration(DEFAULTS_CONF_FILE) runtime = utils.load_configuration(RUNTIME_CONF_FILE) params = utils.load_parameters(default, runtime) pipe = pipeline.PipelineFactory.create(params) try: for i, (key, entry) in enumerate([item for (count, item) in enumerate(content.items()) if count in ids]): if entry['message']: msg = entry['message'] else: print('No message here, deleting entry.') del content[key] continue if queue_name is None: if len(answer) == 3: queue_name = answer[2] else: queue_name = entry['source_queue'] try: pipe.set_queues(queue_name, 'destination') pipe.connect() pipe.send(msg) except exceptions.PipelineError: print(red('Could not reinject into queue {}: {}' ''.format(queue_name, traceback.format_exc()))) else: del content[key] print(green('Recovered dump {}.'.format(i))) finally: save_file(fname, content) if not content: os.remove(fname) print('Deleted empty file {}'.format(fname)) break elif answer[0] == 'd': # delete dumpfile os.remove(fname) print('Deleted file {}'.format(fname)) break elif answer[0] == 's': # Show entries by id for count, (key, value) in enumerate(content.items()): if count not in ids: continue print('=' * 100, '\nShowing id {} {}\n'.format(count, key), '-' * 50) if isinstance(value['message'], (bytes, str)): value['message'] = json.loads(value['message']) if ('raw' in value['message'] and len(value['message']['raw']) > 1000): value['message']['raw'] = value['message'][ 'raw'][:1000] + '...[truncated]' if type(value['traceback']) is not list: value['traceback'] = value['traceback'].splitlines() pprint.pprint(value)
def main(): parser = argparse.ArgumentParser( prog=APPNAME, formatter_class=argparse.RawDescriptionHelpFormatter, usage=USAGE, description=DESCRIPTION, epilog=EPILOG, ) parser.add_argument('botid', metavar='botid', nargs='?', default=None, help='botid to inspect dumps of') args = parser.parse_args() ctl = intelmqctl.IntelMQContoller() if args.botid is None: filenames = glob.glob(os.path.join(DEFAULT_LOGGING_PATH, '*.dump')) if not len(filenames): print(green('Nothing to recover from, no dump files found!')) exit(0) filenames = [(fname, fname[len(DEFAULT_LOGGING_PATH):-5]) for fname in sorted(filenames)] length = max([len(value[1]) for value in filenames]) print( bold("{c:>3}: {s:{l}} {i}".format(c='id', s='name (bot id)', i='content', l=length))) for count, (fname, shortname) in enumerate(filenames): info = dump_info(fname) print("{c:3}: {s:{l}} {i}".format(c=count, s=shortname, i=info, l=length)) try: botid = input( inverted('Which dump file to process (id or name)?') + ' ') except EOFError: exit(0) else: botid = botid.strip() if botid == 'q' or not botid: exit(0) try: fname, botid = filenames[int(botid)] except ValueError: fname = os.path.join(DEFAULT_LOGGING_PATH, botid) + '.dump' else: botid = args.botid fname = os.path.join(DEFAULT_LOGGING_PATH, botid) + '.dump' if not os.path.isfile(fname): print(bold('Given file does not exist: {}'.format(fname))) exit(1) answer = None while True: info = dump_info(fname) print('Processing {}: {}'.format(bold(botid), info)) if info.startswith(str(red)): available_opts = [item[0] for item in ACTIONS.values() if item[2]] print('Restricted actions.') else: # don't display list after 'show' and 'recover' command if not (answer and isinstance(answer, list) and answer[0] in ['s', 'r']): with open(fname, 'rt') as handle: content = json.load(handle) meta = load_meta(content) available_opts = [item[0] for item in ACTIONS.values()] for count, line in enumerate(meta): print('{:3}: {} {}'.format(count, *line)) # Determine bot status bot_status = ctl.bot_status(botid) if bot_status == 'running': print(red('Attention: This bot is currently running!')) elif bot_status == 'error': print(red('Attention: This bot is not defined!')) try: answer = input(inverted(', '.join(available_opts) + '?') + ' ').split() except EOFError: break else: if not answer: continue if any([answer[0] == char for char in AVAILABLE_IDS]) and len(answer) > 1: ids = [int(item) for item in answer[1].split(',')] else: ids = [] queue_name = None if answer[0] == 'a': # recover all -> recover all by ids answer[0] = 'r' ids = range(len(meta)) if len(answer) > 1: queue_name = answer[1] if answer[0] == 'q': break elif answer[0] == 'e': # Delete entries for entry in ids: del content[meta[entry][0]] save_file(fname, content) elif answer[0] == 'r': if bot_status == 'running': # See https://github.com/certtools/intelmq/issues/574 print(red('Recovery for running bots not possible.')) continue # recover entries default = utils.load_configuration(DEFAULTS_CONF_FILE) runtime = utils.load_configuration(RUNTIME_CONF_FILE) params = utils.load_parameters(default, runtime) pipe = pipeline.PipelineFactory.create(params) try: for i, (key, entry) in enumerate([ item for (count, item) in enumerate(content.items()) if count in ids ]): if entry['message']: msg = entry['message'] else: print('No message here, deleting entry.') del content[key] continue if queue_name is None: if len(answer) == 3: queue_name = answer[2] else: queue_name = entry['source_queue'] try: pipe.set_queues(queue_name, 'destination') pipe.connect() pipe.send(msg) except exceptions.PipelineError: print( red('Could not reinject into queue {}: {}' ''.format(queue_name, traceback.format_exc()))) else: del content[key] print(green('Recovered dump {}.'.format(i))) finally: save_file(fname, content) if not content: os.remove(fname) print('Deleted empty file {}'.format(fname)) break elif answer[0] == 'd': # delete dumpfile os.remove(fname) print('Deleted file {}'.format(fname)) break elif answer[0] == 's': # Show entries by id for count, (key, value) in enumerate(content.items()): if count not in ids: continue print('=' * 100, '\nShowing id {} {}\n'.format(count, key), '-' * 50) if isinstance(value['message'], (bytes, str)): value['message'] = json.loads(value['message']) if ('raw' in value['message'] and len(value['message']['raw']) > 1000): value['message']['raw'] = value['message'][ 'raw'][:1000] + '...[truncated]' if type(value['traceback']) is not list: value['traceback'] = value['traceback'].splitlines() pprint.pprint(value)