def parallel_scan_process(options, targets, scan_unique_id, process_number): active_threads = [] verbose_event_info( messages("single_process_started").format(process_number)) total_number_of_modules = len(targets) * len(options.selected_modules) total_number_of_modules_counter = 1 for target in targets: for module_name in options.selected_modules: thread = Thread(target=perform_scan, args=(options, target, module_name, scan_unique_id, process_number, total_number_of_modules_counter, total_number_of_modules)) thread.name = f"{target} -> {module_name}" thread.start() verbose_event_info( messages("start_parallel_module_scan").format( process_number, module_name, target, total_number_of_modules_counter, total_number_of_modules)) total_number_of_modules_counter += 1 active_threads.append(thread) if not wait_for_threads_to_finish( active_threads, options.parallel_module_scan, True): return False wait_for_threads_to_finish(active_threads, maximum=None, terminable=True) return True
def start(self): from terminable_thread import Thread from core.utility import wait_for_threads_to_finish active_threads = [] from core.alert import warn from core.alert import verbose_event_info from core.alert import messages # counting total number of requests total_number_of_requests = 0 for payload in self.module_content['payloads']: if payload['library'] not in self.libraries: warn( messages("library_not_supported").format( payload['library'])) return None for step in payload['steps']: for _ in step: total_number_of_requests += 1 request_number_counter = 0 for payload in self.module_content['payloads']: protocol = getattr( __import__('core.module_protocols.{library}'.format( library=payload['library']), fromlist=['Engine']), 'Engine') for step in payload['steps']: for sub_step in step: thread = Thread( target=protocol.run, args=(sub_step, self.module_name, self.target, self.scan_unique_id, self.module_inputs, self.process_number, self.module_thread_number, self.total_module_thread_number, request_number_counter, total_number_of_requests)) thread.name = f"{self.target} -> {self.module_name} -> {sub_step}" request_number_counter += 1 verbose_event_info( messages("sending_module_request").format( self.process_number, self.module_name, self.target, self.module_thread_number, self.total_module_thread_number, request_number_counter, total_number_of_requests)) thread.start() time.sleep( self.module_inputs['time_sleep_between_requests']) active_threads.append(thread) wait_for_threads_to_finish( active_threads, maximum=self.module_inputs['thread_per_host'], terminable=True) wait_for_threads_to_finish(active_threads, maximum=None, terminable=True)
def run_modules_processors(configuration): """ run ModuleProcessor for each modules :param configuration: user final configuration :return: """ for module in configuration: module_processor_thread = Thread( target=configuration[module]["module_processor"].processor, name=virtual_machine_name_to_container_name( configuration[module]["virtual_machine_name"], module)) module_processor_thread.start() processor_threads.append(module_processor_thread) return
def __call__(self): while True: priority, location = self.locations.get() _trace("____ got new location: priority: {0}; location: {1}".format(priority, location)) fetchingthread = Thread(target=self._fetch_and_put, args=(priority, location)) fetchingthread.daemon = True fetchingthread.start() _trace('__call__: thread started. joining...') fetchingthread.join(self.timeout) _trace('__call__: ...joined.') try: _trace('__call__: raising FetchInterruptError...') fetchingthread.raise_exc(FetchInterruptError) _trace('__call__: ...raised. joining...') fetchingthread.join() _trace('__call__: ...joined.') except threading.ThreadError: # Happens when the thread completes before the timeout. _trace('__call__ got ThreadError.') pass finally: self.locations.task_done()
def __init__(self, fetch, threadcount, results=None, timeout=0.1, Worker=Worker, success=bool, adjust_priority=reduce_priority, **worker_kwargs): self.locations = PriorityQueue() self.results = results if results is not None else Queue() worker = Worker(self.locations, fetch, self.results, timeout=timeout, success=success, adjust_priority=adjust_priority, **worker_kwargs) self.threads = tuple( Thread(target=worker, args=()) for i in range(threadcount)) for thread in self.threads: thread.daemon = True
def load_honeypot_engine(): """ load OHP Engine Returns: True """ # print logo logo() # parse argv parser, argv_options = argv_parser() ######################################### # argv rules apply ######################################### # check help menu if argv_options.show_help_menu: parser.print_help() exit_success() # check for requirements before start check_for_requirements(argv_options.start_api_server) # check api server flag if argv_options.start_api_server: start_api_server() exit_success() # check selected modules if argv_options.selected_modules: selected_modules = list(set(argv_options.selected_modules.rsplit(","))) if "all" in selected_modules: selected_modules = load_all_modules() if "" in selected_modules: selected_modules.remove("") # if selected modules are zero if not len(selected_modules): exit_failure(messages("en", "zero_module_selected")) # if module not found for module in selected_modules: if module not in load_all_modules(): exit_failure(messages("en", "module_not_found").format(module)) # check excluded modules if argv_options.excluded_modules: excluded_modules = list(set(argv_options.excluded_modules.rsplit(","))) if "all" in excluded_modules: exit_failure("you cannot exclude all modules") if "" in excluded_modules: excluded_modules.remove("") # remove excluded modules for module in excluded_modules: if module not in load_all_modules(): exit_failure(messages("en", "module_not_found").format(module)) # ignore if module not selected, it will remove anyway try: selected_modules.remove(module) except Exception as _: del _ # if selected modules are zero if not len(selected_modules): exit_failure(messages("en", "zero_module_selected")) virtual_machine_container_reset_factory_time_seconds = argv_options. \ virtual_machine_container_reset_factory_time_seconds run_as_test = argv_options.run_as_test ######################################### # argv rules apply ######################################### # build configuration based on selected modules configuration = honeypot_configuration_builder(selected_modules) info(messages("en", "honeypot_started")) info(messages("en", "loading_modules").format(", ".join(selected_modules))) # check for conflict in real machine ports and pick new ports info("checking for conflicts in ports") configuration = conflict_ports(configuration) # stop old containers (in case they are not stopped) stop_containers(configuration) # remove old containers (in case they are not updated) remove_old_containers(configuration) # remove old images (in case they are not updated) remove_old_images(configuration) # create new images based on selected modules create_new_images(configuration) # create OWASP Honeypot networks in case not exist create_ohp_networks() # start containers based on selected modules configuration = start_containers(configuration) # start network monitoring thread new_network_events_thread = Thread(target=new_network_events, args=(configuration, ), name="new_network_events_thread") new_network_events_thread.start() info("all selected modules started: {0}".format( ", ".join(selected_modules))) bulk_events_thread = Thread(target=insert_bulk_events_from_thread, args=(), name="insert_events_in_bulk_thread") bulk_events_thread.start() # run module processors run_modules_processors(configuration) # check if it's not a test if not run_as_test: # wait forever! in case user can send ctrl + c to interrupt wait_until_interrupt( virtual_machine_container_reset_factory_time_seconds, configuration, new_network_events_thread) # kill the network events thread terminate_thread(new_network_events_thread) terminate_thread(bulk_events_thread) insert_events_in_bulk( ) # if in case any events that were not inserted from thread # stop created containers stop_containers(configuration) # stop module processor stop_modules_processors(configuration) # remove created containers remove_old_containers(configuration) # remove created images remove_old_images(configuration) # remove_tmp_directories() error: access denied! # kill all missed threads for thread in threading.enumerate()[1:]: terminate_thread(thread, False) info("finished.") # reset cmd/terminal color finish() return True
def load_honeypot_engine(): """ load OHP Engine Returns: True """ # print logo logo() # parse argv parser, argv_options = argv_parser() # check the language if argv_options.language: update_language(argv_options) ######################################### # argv rules apply ######################################### # check help menu if argv_options.show_help_menu: parser.print_help() exit_success() # check for requirements before start check_for_requirements(argv_options.start_api_server) # create indices before server start create_indices() # check api server flag if argv_options.start_api_server: start_api_server() exit_success() # Check if the script is running with sudo if not os.geteuid() == 0: exit_failure(messages['script_must_run_as_root']) # Check timeout value if provided if argv_options.timeout_value < 1: exit_failure(messages["timeout_error"]) # check selected modules if argv_options.selected_modules: selected_modules = list(set(argv_options.selected_modules.rsplit(","))) if "all" in selected_modules: selected_modules = load_all_modules() if "" in selected_modules: selected_modules.remove("") # if selected modules are zero if not len(selected_modules): exit_failure(messages["no_module_selected_error"]) # if module not found for module in selected_modules: if module not in load_all_modules(): exit_failure("module {0} not found!".format(module)) # check excluded modules if argv_options.excluded_modules: excluded_modules = list(set(argv_options.excluded_modules.rsplit(","))) if "all" in excluded_modules: exit_failure(messages["all_modules_excluded_error"]) if "" in excluded_modules: excluded_modules.remove("") # remove excluded modules for module in excluded_modules: if module not in load_all_modules(): exit_failure("module {0} not found!".format(module)) # ignore if module not selected, it will remove anyway try: selected_modules.remove(module) except Exception: pass # if selected modules are zero if not len(selected_modules): exit_failure(messages["no_module_selected_error"]) virtual_machine_container_reset_factory_time_seconds = argv_options. \ virtual_machine_container_reset_factory_time_seconds run_as_test = argv_options.run_as_test ######################################### # argv rules apply ######################################### # build configuration based on selected modules configuration = honeypot_configuration_builder(selected_modules) # Set network configuration network_config = set_network_configuration(argv_options) info(messages["start_message"]) info(messages["loading_modules"].format(", ".join(selected_modules))) # check for conflict in real machine ports and pick new ports info(messages["check_for_port_conflicts"]) configuration = conflict_ports(configuration) # stop old containers (in case they are not stopped) stop_containers(configuration) # remove old containers (in case they are not updated) remove_old_containers(configuration) # remove old images (in case they are not updated) remove_old_images(configuration) # create new images based on selected modules create_new_images(configuration) # create OWASP Honeypot networks in case not exist create_ohp_networks() # start containers based on selected modules configuration = start_containers(configuration) # network capture process mp.set_start_method('spawn') # Event queues honeypot_events_queue = mp.Queue() network_events_queue = mp.Queue() # start a new process for network capture network_traffic_capture_process = mp.Process( target=network_traffic_capture, args=( configuration, honeypot_events_queue, network_events_queue, network_config, ), name="network_traffic_capture_process") network_traffic_capture_process.start() info(messages["selected_modules_started"].format( ", ".join(selected_modules))) # start a thread to push events to database regularly bulk_events_thread = Thread(target=push_events_to_database_from_thread, args=( honeypot_events_queue, network_events_queue, ), name="insert_events_in_bulk_thread") bulk_events_thread.start() # run module processors run_modules_processors(configuration) # wait forever! in case user can send ctrl + c to interrupt exit_flag = wait_until_interrupt( virtual_machine_container_reset_factory_time_seconds, configuration, network_traffic_capture_process, run_as_test) # killed the network traffic capture process by ctrl + c... waiting to end. info(messages["killing_capture_process"]) if run_as_test: network_traffic_capture_process.terminate() # without ci it will be terminate after a few seconds, it needs to kill the tshark and update pcap file collection network_traffic_capture_process.join() # if in case any events that were not inserted from thread push_events_queues_to_database(honeypot_events_queue, network_events_queue) # Kill bulk events thread terminate_thread(bulk_events_thread) # stop created containers stop_containers(configuration) # stop module processor stop_modules_processors(configuration) # remove created containers remove_old_containers(configuration) # remove created images remove_old_images(configuration) # remove_tmp_directories() error: access denied! # kill all missed threads for thread in threading.enumerate()[1:]: terminate_thread(thread, False) info(messages["finished"]) # reset cmd/terminal color reset_cmd_color() return exit_flag
def __call__(self): while True: priority, location = self.locations.get() _trace("____ got new location: priority: {0}; location: {1}". format(priority, location)) fetchingthread = Thread(target=self._fetch_and_put, args=(priority, location)) fetchingthread.daemon = True fetchingthread.start() _trace('__call__: thread started. joining...') fetchingthread.join(self.timeout) _trace('__call__: ...joined.') try: _trace('__call__: raising FetchInterruptError...') fetchingthread.raise_exc(FetchInterruptError) _trace('__call__: ...raised. joining...') fetchingthread.join() _trace('__call__: ...joined.') except threading.ThreadError: # Happens when the thread completes before the timeout. _trace('__call__ got ThreadError.') pass finally: self.locations.task_done()