def is_testing_not_required(pov_test_job): """ Is testing required for the provided Pov Tester Job. :param pov_test_job: Pov Tester job which needs to be checked. :return: True/False: indicating whether testing is not required. """ SUCCESS_THRESHOLD = 4 try: curr_result = CRSAPIWrapper.get_best_pov_result( pov_test_job.target_cs_fielding, pov_test_job.target_ids_fielding) return curr_result is not None and curr_result.num_success >= SUCCESS_THRESHOLD except Exception as e: log_error( "Error occured while trying to get available results for pov tester job:" + str(pov_test_job.id) + ", Error:" + str(e)) return False
def _get_exploit_register(stdout_txt): """ get exploit register from output of cb-test :param stdout_txt: output from cb-test :return: correct register name or none ( on error) """ magic_token = 'incorrect reg - should set' exploit_reg = None try: if stdout_txt is not None: stdout_txt = str(stdout_txt).strip() all_lines = stdout_txt.split('\n') for curr_line in all_lines: if magic_token in curr_line: exploit_reg = curr_line.split(magic_token)[1].strip() break except Exception as e: log_error( "Error occurred while trying to get correct register from cb-test output:" + str(e)) return exploit_reg
def _get_job_args(curr_pov_test_job): """ Get arguments for a provided test job. :param curr_pov_test_job: pov test job for which arguments needs to be fetched. :return: (bin_dir, work_dir, pov_file_path) tuple """ cs_fielding_obj = curr_pov_test_job.target_cs_fielding pov_test_job_id = curr_pov_test_job.id # Get all binaries in all_cbns = _get_all_cbns(cs_fielding_obj) curr_work_dir = os.path.join(os.path.expanduser("~"), "pov_tester_" + str(pov_test_job_id)) bin_dir = os.path.join(curr_work_dir, 'bin_dir') pov_dir = os.path.join(curr_work_dir, 'pov_dir') ids_dir = os.path.join(curr_work_dir, 'ids_rules') try: # set up binaries # Save CBNs into the bin dir os.system('mkdir -p ' + str(bin_dir)) for curr_cb in all_cbns: curr_file = str(curr_cb.cs_id) + '_' + str(curr_cb.name) curr_file_path = os.path.join(bin_dir, curr_file) fp = open(curr_file_path, 'wb') fp.write(curr_cb.blob) fp.close() os.chmod(curr_file_path, 0o777) pov_file_path = None # set up povs # save povs into pov directory os.system('mkdir -p ' + str(pov_dir)) target_exploit_obj = curr_pov_test_job.target_exploit pov_file_path = os.path.join(pov_dir, str(curr_pov_test_job.id) + '.pov') fp = open(pov_file_path, 'w') fp.write(str(target_exploit_obj.blob)) fp.close() os.chmod(pov_file_path, 0o777) ids_file_path = None # set up ids rules # save ids rules into directory os.system('mkdir -p ' + str(ids_dir)) ids_rules_obj = _get_ids_rules_obj( curr_pov_test_job.target_ids_fielding) # if we have non-empty ids rules? if ids_rules_obj is not None and ids_rules_obj.rules is not None and len( str(ids_rules_obj.rules).strip()) > 0: ids_file_path = os.path.join( ids_dir, str(curr_pov_test_job.id) + '_ids.rules') fp = open(ids_file_path, 'w') fp.write(str(ids_rules_obj.rules)) fp.close() os.chmod(ids_file_path, 0o777) except Exception as e: # clean up if curr_work_dir is not None: os.system('rm -rf ' + curr_work_dir) log_error( "Error occurred while trying to setup working directory for PovTesterJob:" + str(pov_test_job_id) + ", Error:" + str(e)) raise e return bin_dir, curr_work_dir, pov_file_path, ids_file_path
def process_povtester_job(curr_job_args): """ Process the provided PoV Tester Job with given number of threads. :param curr_job_args: (pov tester job to process, number of threads that could be used) :return: None """ CRSAPIWrapper.open_connection() job_id = curr_job_args[0] curr_job = CRSAPIWrapper.get_pov_tester_job(job_id) num_threads = curr_job_args[1] target_job = curr_job job_id_str = str(curr_job.id) if target_job.try_start(): if is_testing_not_required(curr_job): log_success("Testing not required for PovTesterJob:" + str(job_id) + ", as a previous job obviated this.") else: curr_work_dir = None try: job_bin_dir, curr_work_dir, pov_file_path, ids_rules_path = _get_job_args( curr_job) job_id_str = str(curr_job.id) log_info("Trying to run PovTesterJob:" + job_id_str) all_child_process_args = [] for i in range(NUM_THROWS): all_child_process_args.append( (job_bin_dir, pov_file_path, ids_rules_path)) log_info("Got:" + str(len(all_child_process_args)) + " Throws to test for PovTesterJob:" + job_id_str) all_results = [] # If we can multiprocess? Run in multi-threaded mode if num_threads > 1: log_info("Running in multi-threaded mode with:" + str(num_threads) + " threads. For PovTesterJob:" + job_id_str) thread_pool = ThreadPool(processes=num_threads) all_results = thread_pool.map(_test_pov, all_child_process_args) thread_pool.close() thread_pool.join() else: log_info( "Running in single threaded mode. For PovTesterJob:" + job_id_str) for curr_child_arg in all_child_process_args: all_results.append(_test_pov(curr_child_arg)) throws_passed = len(filter(lambda x: x[0], all_results)) # if none of the throws passed, lets see if we can create new exploit? if throws_passed == 0: log_info( "Exploit is bad. Trying to create new exploit by replacing most common register." ) all_regs = collections.defaultdict(int) for _, curr_output, _ in all_results: curr_exploit_reg = _get_exploit_register(curr_output) if curr_exploit_reg is not None: all_regs[curr_exploit_reg] += 1 if len(all_regs) > 0: log_info("Got:" + str(len(all_regs)) + " possible registers") target_reg = sorted(all_regs.items(), key=lambda x: x[1], reverse=True)[0][0] log_success("Using:" + target_reg + " to create a new exploit") _fixup_exploit(curr_job.target_exploit, target_reg) else: log_failure( "Could not get any register from cb-test output") CRSAPIWrapper.create_pov_test_result( curr_job.target_exploit, curr_job.target_cs_fielding, curr_job.target_ids_fielding, throws_passed) log_success("Done Processing PovTesterJob:" + job_id_str) except Exception as e: log_error("Error Occured while processing PovTesterJob:" + job_id_str + ". Error:" + str(e)) # clean up if curr_work_dir is not None: os.system('rm -rf ' + curr_work_dir) target_job.completed() else: log_failure("Ignoring PovTesterJob:" + job_id_str + " as we failed to mark it busy.") CRSAPIWrapper.close_connection()
output_pcap_file = sys.argv[3] dummy_cs_id = int(sys.argv[4]) all_tests = os.listdir(xml_pov_dir) log_info("Trying to create PCAP for:" + str(len(all_tests)) + " tests.") pcap_file_handle = open_pcap(output_pcap_file) connection_id = 1L for curr_file in all_tests: curr_file_path = os.path.join(xml_pov_dir, curr_file) pcap_output_file = curr_file + '.cb.pcap' bin_tester = BinaryTester(bin_file, curr_file_path, is_cfe=True, pcap_output_file=pcap_output_file, is_pov=not curr_file.endswith('xml'), standalone=True) ret_code, _, _ = bin_tester.test_cb_binary() if ret_code == 0: log_success("Provided Test:" + curr_file_path + " is OK.") else: log_error("Provided Test:" + curr_file_path + " failed.") assert os.path.exists(pcap_output_file), "PCAP for Test:" + curr_file_path + " does not exist." pcap_parser = PcapParser(pcap_output_file) tcp_stream = pcap_parser.get_data_stream() # ignore first 2 packets, as they are are for negotiating seed all_data_pkts = tcp_stream.data_pkts[2:] msg_id = 1L for curr_data_pkt in all_data_pkts: side = 1 if curr_data_pkt.is_input else 0 msg_hdr = struct.pack("<LLLHB", dummy_cs_id, connection_id, msg_id, len(curr_data_pkt.data), side) msg_id += 1 write_packet(pcap_file_handle, msg_hdr + curr_data_pkt.data, side) connection_id += 1 pcap_file_handle.close()
def process_sanitizer_job(curr_job_args): """ Process the provided sanitizer job. and update DB with corresponding valid poll, else update the raw poll with reason of failure. :param curr_job_args: (job that needs to run, num threads) (As of now, we ignore num threads as it is not needed) :return: None """ CRSAPIWrapper.open_connection() job_id = curr_job_args[0] curr_job = CRSAPIWrapper.get_poll_sanitizer_job(job_id) target_job = curr_job if target_job.try_start(): target_cbs_path = os.path.join(os.path.expanduser('~'), 'pollsan_' + str(curr_job.id)) try: log_info("Trying to process PollSanitizerJob:" + str(target_job.id)) # Create folder to save all binaries that belong to current CS. target_raw_poll = curr_job.raw_poll os.system('mkdir -p ' + str(target_cbs_path)) # Save all binaries for curr_cb in target_raw_poll.cs.cbns_original: curr_file = str(curr_cb.cs_id) + '_' + str(curr_cb.name) curr_file_path = os.path.join(target_cbs_path, curr_file) fp = open(curr_file_path, 'wb') fp.write(curr_cb.blob) fp.close() os.chmod(curr_file_path, 0o777) sanitized_xml, target_result, ret_code = sanitize_pcap_poll( target_raw_poll.blob, target_cbs_path, optional_prefix='pollsan_' + str(curr_job.id), log_suffix=' for PollSanitizerJob:' + str(curr_job.id)) target_raw_poll.sanitized = True target_raw_poll.save() if target_result == BinaryTester.CRASH_RESULT: # set crash to true target_raw_poll.is_crash = True log_error("PollSanitizerJob:" + str(curr_job.id) + ", Lead to Crash. Someone attacked us, " "it will be synced in network poll creator") target_raw_poll.save() elif target_result == BinaryTester.FAIL_RESULT: # set failed to true target_raw_poll.is_failed = True target_raw_poll.save() log_error("PollSanitizerJob:" + str(curr_job.id) + ", Failed on binary. Mostly timeout or Crash.") elif target_result == BinaryTester.PASS_RESULT: # Create Valid Poll CRSAPIWrapper.create_valid_poll( curr_job.raw_poll.cs, sanitized_xml, target_round=curr_job.raw_poll.round, is_perf_ready=(ret_code == 0)) log_success("Created a ValidPoll for PollSanitizerJob:" + str(curr_job.id)) else: log_error( "Error occurred while sanitizing provided poll of Job:" + str(curr_job.id) + ", Sanitize PCAP POLL Returned:" + str(target_result)) except Exception as e: log_error("Error Occured while processing PollerSanitizerJob:" + str(target_job.id) + ". Error:" + str(e)) # clean up os.system('rm -rf ' + str(target_cbs_path)) target_job.completed() else: log_failure("Ignoring PollerSanitizerJob:" + str(target_job.id) + " as we failed to mark it busy.") CRSAPIWrapper.close_connection()
def run_daemon(arg_list): global NO_OF_PROCESSES global POLL_TIME target_cs_id = None target_job_type = None max_num_jobs = 1000 if len(arg_list) > 1: try: target_cs_id = int(arg_list[1]) log_info("Handling Jobs for CS ID:" + str(target_cs_id)) except ValueError: log_failure("Unable to parse the provided argument into CS ID:" + str(arg_list[1])) # Ignore the error pass if len(arg_list) > 2: target_job_type = arg_list[2] log_info("Provided Job Type:" + str(target_job_type)) else: log_failure("No job type provided") if len(arg_list) > 3: try: max_num_jobs = int(arg_list[3]) except ValueError: log_failure( "Unable to parse the provided argument into number of jobs:" + str(arg_list[3])) # default, number of jobs max_num_jobs = 1000 pass else: log_failure("No max number of jobs provided, defaulting to:" + str(max_num_jobs)) # Ignore this """if len(arg_list) > 2: try: no_of_process = int(arg_list[2]) except ValueError: no_of_process = cpu_count() NO_OF_PROCESSES = no_of_process if len(arg_list) > 3: try: poll_time = int(arg_list[3]) except ValueError: poll_time = POLL_TIME POLL_TIME = poll_time""" if EXIT_ON_WRONG_CS_ID and target_cs_id is None: log_error( "Exiting, without scheduling any jobs as no valid CS ID is provided." ) return elif target_cs_id is None: log_info("Will be running infinitely fetching Jobs for all CS.") processed_jobs = 0 while True: CRSAPIWrapper.open_connection() no_jobs = True for curr_worker_config in worker_config: worker_name = curr_worker_config[0] if target_job_type is None or worker_name == target_job_type: job_getter = curr_worker_config[1] job_processor = curr_worker_config[2] log_info("Trying to get " + worker_name + " Jobs.") available_jobs = job_getter(target_cs_id=target_cs_id) if len(available_jobs) > 0: num_jobs_to_get = max_num_jobs - processed_jobs if num_jobs_to_get <= 0: break available_jobs = available_jobs[0:num_jobs_to_get] processed_jobs += len(available_jobs) log_info("Got " + str(len(available_jobs)) + " " + worker_name + " Jobs.") if str(worker_name) == 'pov_tester': NO_OF_PROCESSES = int(1.25 * NO_OF_PROCESSES) child_threads = NO_OF_PROCESSES / len(available_jobs) child_job_args = map( lambda curr_job: (curr_job.id, child_threads), available_jobs) with ProcessPoolExecutor( max_workers=NO_OF_PROCESSES) as process_pool: process_pool.map(job_processor, child_job_args) log_success("Processed " + str(len(available_jobs)) + " " + worker_name + " Jobs.") no_jobs = False # start again from beginning. # This will ensure the if there are any higher priority jobs, we always execute them # before going to lower priority ones break else: log_info("No " + worker_name + " Jobs available to run.") # if there are no VM jobs if no_jobs: # If this is supposed to take care of only one CS. # exit the while True loop. # so that the worker knows this VM is done. if target_cs_id is not None: break time.sleep(POLL_TIME) # if we processed sufficient number of jobs? then exit if processed_jobs >= max_num_jobs: log_info("Processed:" + str(processed_jobs) + ", limit:" + str(max_num_jobs) + ". Exiting.") break