def setUpClass(cls): cls.test_db = os.path.abspath('./test.db') cls.conf = PeekabooDummyConfig() db_con = PeekabooDatabase('sqlite:///' + cls.test_db) cls.conf.set_db_con(db_con) _set_config(cls.conf) cls.sample = Sample(os.path.realpath(__file__))
def setUpClass(cls): cls.test_db = os.path.abspath('./test.db') cls.conf = PeekabooDummyConfig() cls.db_con = PeekabooDatabase('sqlite:///' + cls.test_db) cls.factory = SampleFactory(cuckoo = None, db_con = cls.db_con, connection_map = None, base_dir = cls.conf.sample_base_dir, job_hash_regex = cls.conf.job_hash_regex, keep_mail_data = False) cls.sample = cls.factory.make_sample(os.path.realpath(__file__))
def setUpClass(cls): """ Set up common test case resources. """ cls.test_db = os.path.abspath('./test.db') cls.conf = CreatingPeekabooConfig() cls.db_con = PeekabooDatabase('sqlite:///' + cls.test_db) cls.factory = CreatingSampleFactory( cuckoo=None, base_dir=cls.conf.sample_base_dir, job_hash_regex=cls.conf.job_hash_regex, keep_mail_data=False, processing_info_dir=None) cls.sample = cls.factory.create_sample('test.py', 'test')
def setUpClass(cls): """ Set up common test case resources. """ cls.test_db = os.path.abspath('./test.db') cls.conf = CreatingPeekabooConfig() cls.db_con = PeekabooDatabase('sqlite:///' + cls.test_db, instance_id=1, stale_in_flight_threshold=10) cls.no_cluster_db = PeekabooDatabase('sqlite:///' + cls.test_db, instance_id=0) cls.factory = CreatingSampleFactory( cuckoo=None, base_dir=cls.conf.sample_base_dir, job_hash_regex=cls.conf.job_hash_regex, keep_mail_data=False, processing_info_dir=None) cls.sample = cls.factory.create_sample('test.py', 'test') result = RuleResult('Unittest', Result.failed, 'This is just a test case.', further_analysis=False) cls.sample.add_rule_result(result)
def setUpClass(cls): cls.test_db = os.path.abspath('./test.db') cls.conf = PeekabooDummyConfig() db_con = PeekabooDatabase('sqlite:///' + cls.test_db) cls.conf.set_db_con(db_con) _set_config(cls.conf) cls.sample = Sample(os.path.realpath(__file__)) result = RuleResult('Unittest', Result.unknown, 'This is just a test case.', further_analysis=True) cls.sample.add_rule_result(result) cls.sample.determine_result()
def setUpClass(cls): cls.test_db = os.path.abspath('./test.db') cls.conf = PeekabooDummyConfig() cls.db_con = PeekabooDatabase('sqlite:///' + cls.test_db) cls.factory = SampleFactory(cuckoo = None, db_con = cls.db_con, connection_map = None, base_dir = cls.conf.sample_base_dir, job_hash_regex = cls.conf.job_hash_regex, keep_mail_data = False) cls.sample = cls.factory.make_sample(os.path.realpath(__file__)) result = RuleResult('Unittest', Result.unknown, 'This is just a test case.', further_analysis=True) cls.sample.add_rule_result(result) cls.sample.determine_result()
async def async_main(): """ Runs the Peekaboo daemon. """ arg_parser = ArgumentParser( description='Peekaboo Extended Email Attachment Behavior Observation Owl' ) arg_parser.add_argument( '-c', '--config', action='store', help='The configuration file for Peekaboo.' ) arg_parser.add_argument( '-d', '--debug', action='store_true', help="Run Peekaboo in debug mode regardless of what's specified in the configuration." ) arg_parser.add_argument( '-D', '--daemon', action='store_true', help='Run Peekaboo in daemon mode (suppresses the logo to be written to STDOUT).' ) args = arg_parser.parse_args() print('Starting Peekaboo %s.' % __version__) if not args.daemon: print(PEEKABOO_OWL) # Check if CLI arguments override the configuration log_level = None if args.debug: log_level = logging.DEBUG try: config = PeekabooConfig(config_file=args.config, log_level=log_level) logger.debug(config) except PeekabooConfigException as error: logging.critical(error) sys.exit(1) # find localisation in our package directory locale_domain = 'peekaboo' locale_dir = os.path.join(os.path.dirname(__file__), 'locale') languages = None if config.report_locale: logger.debug('Looking for translations for preconfigured locale "%s"', config.report_locale) languages = [config.report_locale] if not gettext.find(locale_domain, locale_dir, languages): logger.warning('Translation file not found - falling back to ' 'system configuration.') languages = None logger.debug('Installing report message translations') translation = gettext.translation(locale_domain, locale_dir, languages, fallback=True) translation.install() # establish a connection to the database try: db_con = PeekabooDatabase( db_url=config.db_url, instance_id=config.cluster_instance_id, stale_in_flight_threshold=config.cluster_stale_in_flight_threshold, log_level=config.db_log_level) await db_con.start() except PeekabooDatabaseError as error: logging.critical(error) sys.exit(1) except SQLAlchemyError as dberr: logger.critical('Failed to establish a connection to the database ' 'at %s: %s', config.db_url, dberr) sys.exit(1) # initialize the daemon infrastructure such as PID file and dropping # privileges, automatically cleans up after itself when going out of scope daemon_infrastructure = PeekabooDaemonInfrastructure( config.pid_file, config.user, config.group) daemon_infrastructure.init() # clear all our in flight samples and all instances' stale in flight # samples await db_con.clear_in_flight_samples() await db_con.clear_stale_in_flight_samples() # a cluster duplicate interval of 0 disables the handler thread which is # what we want if we don't have an instance_id and therefore are alone cldup_check_interval = 0 if config.cluster_instance_id > 0: cldup_check_interval = config.cluster_duplicate_check_interval if cldup_check_interval < 5: cldup_check_interval = 5 logger.warning("Raising excessively low cluster duplicate check " "interval to %d seconds.", cldup_check_interval) loop = asyncio.get_running_loop() sig_handler = SignalHandler(loop) # separate threadpool for CPU- and I/O-bound blocking tasks (hashlib, # oletools, magic, requests, processing info dumping). This effectively # gives each of our asyncio Worker tasks an OS thread to execute blocking # operations on. The Queue might use them as well for stuff like # calculating samples' sha256sums, speeding up sample reception and # submission from the server. So maybe we should have some more threads # here... threadpool = concurrent.futures.ThreadPoolExecutor( config.worker_count, 'ThreadPool-') # collect a list of awaitables from started subsystems from which to gather # unexpected error conditions such as exceptions awaitables = [] # read in the analyzer and ruleset configuration and start the job queue try: ruleset_config = PeekabooConfigParser(config.ruleset_config) analyzer_config = PeekabooAnalyzerConfig(config.analyzer_config) job_queue = JobQueue( worker_count=config.worker_count, ruleset_config=ruleset_config, db_con=db_con, analyzer_config=analyzer_config, cluster_duplicate_check_interval=cldup_check_interval, threadpool=threadpool) sig_handler.register_listener(job_queue) awaitables.extend(await job_queue.start()) except PeekabooConfigException as error: logging.critical(error) sys.exit(1) # Factory producing almost identical samples providing them with global # config values and references to other objects they need, such as database # connection and connection map. sample_factory = SampleFactory( config.processing_info_dir, threadpool) try: server = PeekabooServer( host=config.host, port=config.port, job_queue=job_queue, sample_factory=sample_factory, request_queue_size=100, db_con=db_con) sig_handler.register_listener(server) # the server runs completely inside the event loop and does not expose # any awaitable to extract exceptions from. await server.start() except Exception as error: logger.critical('Failed to start Peekaboo Server: %s', error) job_queue.shut_down() await job_queue.close_down() sys.exit(1) # abort startup if shutdown was requested meanwhile if sig_handler.shutdown_requested: sys.exit(0) SystemdNotifier().notify("READY=1") try: await asyncio.gather(*awaitables) # CancelledError is derived from BaseException, not Exception except asyncio.exceptions.CancelledError as error: # cancellation is expected in the case of shutdown via signal handler pass except Exception: logger.error("Shutting down due to unexpected exception") # trigger shutdowns of other components if not already ongoing triggered # by the signal handler if not sig_handler.shutdown_requested: server.shut_down() job_queue.shut_down() # close down components after they've shut down await server.close_down() await job_queue.close_down() # do a final cleanup pass through the database try: await db_con.clear_in_flight_samples() await db_con.clear_stale_in_flight_samples() except PeekabooDatabaseError as dberr: logger.error(dberr) sys.exit(0)
def run(): """ Runs the Peekaboo daemon. """ arg_parser = ArgumentParser( description= 'Peekaboo Extended Email Attachment Behavior Observation Owl') arg_parser.add_argument('-c', '--config', action='store', help='The configuration file for Peekaboo.') arg_parser.add_argument( '-d', '--debug', action='store_true', help= "Run Peekaboo in debug mode regardless of what's specified in the configuration." ) arg_parser.add_argument( '-D', '--daemon', action='store_true', help= 'Run Peekaboo in daemon mode (suppresses the logo to be written to STDOUT).' ) args = arg_parser.parse_args() print('Starting Peekaboo %s.' % __version__) if not args.daemon: print(PEEKABOO_OWL) # Check if CLI arguments override the configuration log_level = None if args.debug: log_level = logging.DEBUG try: config = PeekabooConfig(config_file=args.config, log_level=log_level) logger.debug(config) except PeekabooConfigException as error: logging.critical(error) sys.exit(1) # find localisation in our package directory locale_domain = 'peekaboo' locale_dir = os.path.join(os.path.dirname(__file__), 'locale') languages = None if config.report_locale: logger.debug('Looking for translations for preconfigured locale "%s"', config.report_locale) languages = [config.report_locale] if not gettext.find(locale_domain, locale_dir, languages): logger.warning('Translation file not found - falling back to ' 'system configuration.') languages = None logger.debug('Installing report message translations') translation = gettext.translation(locale_domain, locale_dir, languages, fallback=True) # python2's gettext needs to be told explicitly to return unicode strings loc_kwargs = {} if sys.version_info[0] < 3: loc_kwargs = {'unicode': True} translation.install(loc_kwargs) # establish a connection to the database try: db_con = PeekabooDatabase( db_url=config.db_url, instance_id=config.cluster_instance_id, stale_in_flight_threshold=config.cluster_stale_in_flight_threshold, log_level=config.db_log_level) except PeekabooDatabaseError as error: logging.critical(error) sys.exit(1) except SQLAlchemyError as dberr: logger.critical( 'Failed to establish a connection to the database ' 'at %s: %s', config.db_url, dberr) sys.exit(1) # Import debug module if we are in debug mode debugger = None if config.use_debug_module: from peekaboo.debug import PeekabooDebugger debugger = PeekabooDebugger() debugger.start() # initialize the daemon infrastructure such as PID file and dropping # privileges, automatically cleans up after itself when going out of scope daemon_infrastructure = PeekabooDaemonInfrastructure( config.pid_file, config.sock_file, config.user, config.group) daemon_infrastructure.init() systemd = SystemdNotifier() # clear all our in flight samples and all instances' stale in flight # samples db_con.clear_in_flight_samples() db_con.clear_stale_in_flight_samples() # a cluster duplicate interval of 0 disables the handler thread which is # what we want if we don't have an instance_id and therefore are alone cldup_check_interval = 0 if config.cluster_instance_id > 0: cldup_check_interval = config.cluster_duplicate_check_interval if cldup_check_interval < 5: cldup_check_interval = 5 logger.warning( "Raising excessively low cluster duplicate check " "interval to %d seconds.", cldup_check_interval) # workers of the job queue need the ruleset configuration to create the # ruleset engine with it try: ruleset_config = PeekabooConfigParser(config.ruleset_config) except PeekabooConfigException as error: logging.critical(error) sys.exit(1) # verify the ruleset configuration by spawning a ruleset engine and having # it verify it try: engine = RulesetEngine(ruleset_config, db_con) except (KeyError, ValueError, PeekabooConfigException) as error: logging.critical('Ruleset configuration error: %s', error) sys.exit(1) except PeekabooRulesetConfigError as error: logging.critical(error) sys.exit(1) job_queue = JobQueue(worker_count=config.worker_count, ruleset_config=ruleset_config, db_con=db_con, cluster_duplicate_check_interval=cldup_check_interval) if config.cuckoo_mode == "embed": cuckoo = CuckooEmbed(job_queue, config.cuckoo_exec, config.cuckoo_submit, config.cuckoo_storage, config.interpreter) # otherwise it's the new API method and default else: cuckoo = CuckooApi(job_queue, config.cuckoo_url, config.cuckoo_api_token, config.cuckoo_poll_interval) sig_handler = SignalHandler() sig_handler.register_listener(cuckoo) # Factory producing almost identical samples providing them with global # config values and references to other objects they need, such as cuckoo, # database connection and connection map. sample_factory = SampleFactory(cuckoo, config.sample_base_dir, config.job_hash_regex, config.keep_mail_data, config.processing_info_dir) # We only want to accept 2 * worker_count connections. try: server = PeekabooServer(sock_file=config.sock_file, job_queue=job_queue, sample_factory=sample_factory, request_queue_size=config.worker_count * 2) except Exception as error: logger.critical('Failed to start Peekaboo Server: %s', error) job_queue.shut_down() if debugger is not None: debugger.shut_down() sys.exit(1) exit_code = 1 try: systemd.notify("READY=1") # If this dies Peekaboo dies, since this is the main thread. (legacy) exit_code = cuckoo.do() except Exception as error: logger.critical('Main thread aborted: %s', error) finally: server.shutdown() job_queue.shut_down() try: db_con.clear_in_flight_samples() db_con.clear_stale_in_flight_samples() except PeekabooDatabaseError as dberr: logger.error(dberr) if debugger is not None: debugger.shut_down() sys.exit(exit_code)
def run(): """ Runs the Peekaboo daemon. """ arg_parser = ArgumentParser( description= 'Peekaboo Extended Email Attachment Behavior Observation Owl') arg_parser.add_argument('-c', '--config', action='store', required=False, default=os.path.join('./peekaboo.conf'), help='The configuration file for Peekaboo.') arg_parser.add_argument( '-d', '--debug', action='store_true', required=False, default=False, help= "Run Peekaboo in debug mode regardless of what's specified in the configuration." ) arg_parser.add_argument( '-D', '--daemon', action='store_true', required=False, default=False, help= 'Run Peekaboo in daemon mode (suppresses the logo to be written to STDOUT).' ) args = arg_parser.parse_args() if not args.daemon: print(_owl) else: print('Starting Peekaboo %s.' % __version__) # read configuration if not os.path.isfile(args.config): print('Failed to read config, files does not exist.' ) # logger doesn't exist here sys.exit(1) config = parse_config(args.config) # Check if CLI arguments override the configuration if args.debug: config.change_log_level('DEBUG') # Log the configuration options if we are in debug mode if config.log_level == logging.DEBUG: logger.debug(config.__str__()) # establish a connection to the database try: db_con = PeekabooDatabase(config.db_url) config.add_db_con(db_con) except PeekabooDatabaseError as e: logging.exception(e) sys.exit(1) except Exception as e: logger.critical('Failed to establish a connection to the database.') logger.exception(e) sys.exit(1) # Import debug module if we are in debug mode if config.use_debug_module: from peekaboo.debug import peekaboo_debugger peekaboo_debugger() if os.getuid() == 0: logger.warning('Peekaboo should not run as root.') # drop privileges to user os.setgid(grp.getgrnam(config.group)[2]) os.setuid(pwd.getpwnam(config.user)[2]) # set $HOME to the users home directory # (VirtualBox must access the configs) os.environ['HOME'] = pwd.getpwnam(config.user)[5] logger.info("Dropped privileges to user %s and group %s" % (config.user, config.group)) logger.debug('$HOME is ' + os.environ['HOME']) # write PID file pid = str(os.getpid()) with open(config.pid_file, "w") as pidfile: pidfile.write("%s\n" % pid) systemd = SystemdNotifier() server = PeekabooStreamServer(config.sock_file, PeekabooStreamRequestHandler) runner = Thread(target=server.serve_forever) runner.daemon = True try: runner.start() logger.info('Peekaboo server is listening on %s' % server.server_address) os.chmod( config.sock_file, stat.S_IWOTH | stat.S_IREAD | stat.S_IWRITE | stat.S_IRGRP | stat.S_IWGRP | stat.S_IWOTH) # Run Cuckoo sandbox, parse log output, and report back of Peekaboo. # If this dies Peekaboo dies, since this is the main thread. srv = CuckooServer() reactor.spawnProcess(srv, config.interpreter, [config.interpreter, '-u', config.cuckoo_exec]) systemd.notify("READY=1") reactor.run() except Exception as e: logger.exception(e) finally: server.shutdown()
def run(): """ Runs the Peekaboo daemon. """ arg_parser = ArgumentParser( description='Peekaboo Extended Email Attachment Behavior Observation Owl' ) arg_parser.add_argument( '-c', '--config', action='store', help='The configuration file for Peekaboo.' ) arg_parser.add_argument( '-d', '--debug', action='store_true', help="Run Peekaboo in debug mode regardless of what's specified in the configuration." ) arg_parser.add_argument( '-D', '--daemon', action='store_true', help='Run Peekaboo in daemon mode (suppresses the logo to be written to STDOUT).' ) args = arg_parser.parse_args() print('Starting Peekaboo %s.' % __version__) if not args.daemon: print(PEEKABOO_OWL) # Check if CLI arguments override the configuration log_level = None if args.debug: log_level = logging.DEBUG try: config = PeekabooConfig(config_file=args.config, log_level=log_level) logger.debug(config) except PeekabooConfigException as error: logging.critical(error) sys.exit(1) # find localisation in our package directory locale_domain = 'peekaboo' locale_dir = os.path.join(os.path.dirname(__file__), 'locale') languages = None if config.report_locale: logger.debug('Looking for translations for preconfigured locale "%s"', config.report_locale) languages = [config.report_locale] if not gettext.find(locale_domain, locale_dir, languages): logger.warning('Translation file not found - falling back to ' 'system configuration.') languages = None logger.debug('Installing report message translations') translation = gettext.translation(locale_domain, locale_dir, languages, fallback=True) translation.install() # establish a connection to the database try: db_con = PeekabooDatabase( db_url=config.db_url, instance_id=config.cluster_instance_id, stale_in_flight_threshold=config.cluster_stale_in_flight_threshold, log_level=config.db_log_level) except PeekabooDatabaseError as error: logging.critical(error) sys.exit(1) except SQLAlchemyError as dberr: logger.critical('Failed to establish a connection to the database ' 'at %s: %s', config.db_url, dberr) sys.exit(1) # initialize the daemon infrastructure such as PID file and dropping # privileges, automatically cleans up after itself when going out of scope daemon_infrastructure = PeekabooDaemonInfrastructure( config.pid_file, config.sock_file, config.user, config.group) daemon_infrastructure.init() # clear all our in flight samples and all instances' stale in flight # samples db_con.clear_in_flight_samples() db_con.clear_stale_in_flight_samples() # a cluster duplicate interval of 0 disables the handler thread which is # what we want if we don't have an instance_id and therefore are alone cldup_check_interval = 0 if config.cluster_instance_id > 0: cldup_check_interval = config.cluster_duplicate_check_interval if cldup_check_interval < 5: cldup_check_interval = 5 logger.warning("Raising excessively low cluster duplicate check " "interval to %d seconds.", cldup_check_interval) sig_handler = SignalHandler() # read in the analyzer and ruleset configuration and start the job queue try: ruleset_config = PeekabooConfigParser(config.ruleset_config) analyzer_config = PeekabooAnalyzerConfig(config.analyzer_config) job_queue = JobQueue( worker_count=config.worker_count, ruleset_config=ruleset_config, db_con=db_con, analyzer_config=analyzer_config, cluster_duplicate_check_interval=cldup_check_interval) sig_handler.register_listener(job_queue) job_queue.start() except PeekabooConfigException as error: logging.critical(error) sys.exit(1) # Factory producing almost identical samples providing them with global # config values and references to other objects they need, such as database # connection and connection map. sample_factory = SampleFactory( config.sample_base_dir, config.job_hash_regex, config.keep_mail_data, config.processing_info_dir) # We only want to accept 2 * worker_count connections. try: server = PeekabooServer( sock_file=config.sock_file, job_queue=job_queue, sample_factory=sample_factory, request_queue_size=config.worker_count * 2, sock_group=config.sock_group, sock_mode=config.sock_mode) except Exception as error: logger.critical('Failed to start Peekaboo Server: %s', error) job_queue.shut_down() job_queue.close_down() sys.exit(1) # abort startup if shutdown was requested meanwhile if sig_handler.shutdown_requested: sys.exit(0) sig_handler.register_listener(server) SystemdNotifier().notify("READY=1") server.serve() # trigger shutdowns of other components (if not already ongoing triggered # by e.g. the signal handler), server will already be shut down at this # point signaled by the fact that serve() above returned job_queue.shut_down() # close down components after they've shut down job_queue.close_down() # do a final cleanup pass through the database try: db_con.clear_in_flight_samples() db_con.clear_stale_in_flight_samples() except PeekabooDatabaseError as dberr: logger.error(dberr) sys.exit(0)