Exemplo n.º 1
0
    def get_perf_measures(self):
        """
            Get performance json of the poll run of the provided cb.
        :return: Json with median and variance of the perf numbers.
        """
        # Test, just in case
        self.test()
        log_info("Trying to compute performance measures.")
        # initialize to defaults
        perfs_median = {
            PatchTester.RSS_PERF_NAME: 0.0,
            PatchTester.FLT_PERF_NAME: 0.0,
            PatchTester.CPU_CLOCK_PERF_NAME: 0.0,
            PatchTester.TSK_CLOCK_PERF_NAME: 0.0,
            PatchTester.FILE_SIZE_PERF_NAME: 0.0
        }
        perfs_variance = {
            PatchTester.RSS_PERF_NAME: 0.0,
            PatchTester.FLT_PERF_NAME: 0.0,
            PatchTester.CPU_CLOCK_PERF_NAME: 0.0,
            PatchTester.TSK_CLOCK_PERF_NAME: 0.0,
            PatchTester.FILE_SIZE_PERF_NAME: 0.0
        }
        perf_keys = [
            PatchTester.RSS_PERF_NAME, PatchTester.FLT_PERF_NAME,
            PatchTester.CPU_CLOCK_PERF_NAME, PatchTester.TSK_CLOCK_PERF_NAME,
            PatchTester.FILE_SIZE_PERF_NAME
        ]
        all_ok = True
        # get all perf dicts
        all_perfs = map(lambda curr_res: curr_res[4]["perf"],
                        self.test_results)
        # iterate thru each and compute the corresponding measures
        for curr_perf_key in perf_keys:
            perf_vals = []
            for curr_perf in all_perfs:
                perf_vals.append(curr_perf[curr_perf_key])
            # compute median and variance for each performance measure.
            curr_median = PatchTester.__get_median(perf_vals)
            if curr_median is not None:
                perfs_median[curr_perf_key] = curr_median
            else:
                log_failure("Unable to compute Median for values:" +
                            str(perf_vals))
                all_ok = False
            curr_variance = PatchTester.__get_variance(perf_vals)
            if curr_variance is not None:
                perfs_variance[curr_perf_key] = curr_variance
            else:
                log_failure("Unable to compute Variance for values:" +
                            str(perf_vals))
                all_ok = False
        if all_ok:
            log_success("Successfully computed performance measures.")
        else:
            log_failure(
                "Error occurred while computing performance values for one or more perf keys."
            )

        return {"perf": {"median": perfs_median, "variance": perfs_variance}}
Exemplo n.º 2
0
    def test(self):
        """
            Test the provided patch.
        :return: None
        """
        if self.test_results is None:
            self.test_results = []
            if os.path.exists(self.poll_xml_path):
                # if multi-threaded?
                if self.num_threads > 1:
                    log_info("Trying to test:" + self.bin_directory +
                             " with poll xml:" + self.poll_xml_path +
                             " with " + str(self.num_threads) +
                             " threads for " + str(PatchTester.NUM_TEST_TIME) +
                             " times")
                    # prepare all args
                    thread_args = []
                    for i in range(PatchTester.NUM_TEST_TIME):
                        thread_args.append(
                            (self.bin_directory, self.poll_xml_path,
                             self.ids_rules_fp, self.isbitflip))
                    # map to process poll
                    thread_pool = ThreadPool(processes=self.num_threads)
                    self.test_results = thread_pool.map(
                        bin_tester_wrapper, thread_args)
                    log_success("Tested:" + self.bin_directory +
                                " with poll xml:" + self.poll_xml_path +
                                " with " + str(self.num_threads) +
                                " threads for " +
                                str(PatchTester.NUM_TEST_TIME) + " times")
                    thread_pool.close()
                    thread_pool.join()

                else:
                    log_info("Trying to test:" + self.bin_directory +
                             " with poll xml:" + self.poll_xml_path +
                             " in single threaded mode for " +
                             str(PatchTester.NUM_TEST_TIME) + " times")
                    # single threaded
                    # test each poll file individually.
                    for i in range(PatchTester.NUM_TEST_TIME):
                        self.test_results.append(
                            bin_tester_wrapper(
                                (self.bin_directory, self.poll_xml_path,
                                 self.ids_rules_fp, self.isbitflip)))

                    log_success("Tested:" + self.bin_directory +
                                " with poll xml dir:" + self.poll_xml_path +
                                " in single threaded mode for " +
                                str(PatchTester.NUM_TEST_TIME) + " times")
Exemplo n.º 3
0
def process_povtester_job(curr_job_args):
    """
        Process the provided PoV Tester Job with given number of threads.
    :param curr_job_args: (pov tester job to process, number of threads that could be used)
    :return: None
    """
    CRSAPIWrapper.open_connection()
    job_id = curr_job_args[0]
    curr_job = CRSAPIWrapper.get_pov_tester_job(job_id)
    num_threads = curr_job_args[1]
    target_job = curr_job
    job_id_str = str(curr_job.id)

    if target_job.try_start():
        if is_testing_not_required(curr_job):
            log_success("Testing not required for PovTesterJob:" +
                        str(job_id) + ", as a previous job obviated this.")
        else:
            curr_work_dir = None
            try:
                job_bin_dir, curr_work_dir, pov_file_path, ids_rules_path = _get_job_args(
                    curr_job)
                job_id_str = str(curr_job.id)
                log_info("Trying to run PovTesterJob:" + job_id_str)
                all_child_process_args = []

                for i in range(NUM_THROWS):
                    all_child_process_args.append(
                        (job_bin_dir, pov_file_path, ids_rules_path))

                log_info("Got:" + str(len(all_child_process_args)) +
                         " Throws to test for PovTesterJob:" + job_id_str)

                all_results = []
                # If we can multiprocess? Run in multi-threaded mode
                if num_threads > 1:
                    log_info("Running in multi-threaded mode with:" +
                             str(num_threads) + " threads. For PovTesterJob:" +
                             job_id_str)
                    thread_pool = ThreadPool(processes=num_threads)
                    all_results = thread_pool.map(_test_pov,
                                                  all_child_process_args)
                    thread_pool.close()
                    thread_pool.join()
                else:
                    log_info(
                        "Running in single threaded mode. For PovTesterJob:" +
                        job_id_str)
                    for curr_child_arg in all_child_process_args:
                        all_results.append(_test_pov(curr_child_arg))

                throws_passed = len(filter(lambda x: x[0], all_results))
                # if none of the throws passed, lets see if we can create new exploit?
                if throws_passed == 0:
                    log_info(
                        "Exploit is bad. Trying to create new exploit by replacing most common register."
                    )
                    all_regs = collections.defaultdict(int)
                    for _, curr_output, _ in all_results:
                        curr_exploit_reg = _get_exploit_register(curr_output)
                        if curr_exploit_reg is not None:
                            all_regs[curr_exploit_reg] += 1
                    if len(all_regs) > 0:
                        log_info("Got:" + str(len(all_regs)) +
                                 " possible registers")
                        target_reg = sorted(all_regs.items(),
                                            key=lambda x: x[1],
                                            reverse=True)[0][0]
                        log_success("Using:" + target_reg +
                                    " to create a new exploit")
                        _fixup_exploit(curr_job.target_exploit, target_reg)
                    else:
                        log_failure(
                            "Could not get any register from cb-test output")

                CRSAPIWrapper.create_pov_test_result(
                    curr_job.target_exploit, curr_job.target_cs_fielding,
                    curr_job.target_ids_fielding, throws_passed)
                log_success("Done Processing PovTesterJob:" + job_id_str)
            except Exception as e:
                log_error("Error Occured while processing PovTesterJob:" +
                          job_id_str + ". Error:" + str(e))
            # clean up
            if curr_work_dir is not None:
                os.system('rm -rf ' + curr_work_dir)
        target_job.completed()
    else:
        log_failure("Ignoring PovTesterJob:" + job_id_str +
                    " as we failed to mark it busy.")
    CRSAPIWrapper.close_connection()
Exemplo n.º 4
0
bin_file = sys.argv[1]
xml_pov_dir = sys.argv[2]
output_pcap_file = sys.argv[3]
dummy_cs_id = int(sys.argv[4])
all_tests = os.listdir(xml_pov_dir)
log_info("Trying to create PCAP for:" + str(len(all_tests)) + " tests.")
pcap_file_handle = open_pcap(output_pcap_file)
connection_id = 1L
for curr_file in all_tests:
    curr_file_path = os.path.join(xml_pov_dir, curr_file)
    pcap_output_file = curr_file + '.cb.pcap'
    bin_tester = BinaryTester(bin_file, curr_file_path, is_cfe=True, pcap_output_file=pcap_output_file,
                              is_pov=not curr_file.endswith('xml'), standalone=True)
    ret_code, _, _ = bin_tester.test_cb_binary()
    if ret_code == 0:
        log_success("Provided Test:" + curr_file_path + " is OK.")
    else:
        log_error("Provided Test:" + curr_file_path + " failed.")
    assert os.path.exists(pcap_output_file), "PCAP for Test:" + curr_file_path + " does not exist."

    pcap_parser = PcapParser(pcap_output_file)
    tcp_stream = pcap_parser.get_data_stream()
    # ignore first 2 packets, as they are are for negotiating seed
    all_data_pkts = tcp_stream.data_pkts[2:]
    msg_id = 1L
    for curr_data_pkt in all_data_pkts:
        side = 1 if curr_data_pkt.is_input else 0
        msg_hdr = struct.pack("<LLLHB", dummy_cs_id, connection_id, msg_id, len(curr_data_pkt.data), side)
        msg_id += 1
        write_packet(pcap_file_handle, msg_hdr + curr_data_pkt.data, side)
    connection_id += 1
Exemplo n.º 5
0
def process_sanitizer_job(curr_job_args):
    """
        Process the provided sanitizer job.
        and update DB with corresponding valid poll, else update the
        raw poll with reason of failure.
    :param curr_job_args:  (job that needs to run, num threads)
                            (As of now, we ignore num threads as it is not needed)
    :return: None
    """
    CRSAPIWrapper.open_connection()
    job_id = curr_job_args[0]
    curr_job = CRSAPIWrapper.get_poll_sanitizer_job(job_id)
    target_job = curr_job

    if target_job.try_start():
        target_cbs_path = os.path.join(os.path.expanduser('~'),
                                       'pollsan_' + str(curr_job.id))
        try:
            log_info("Trying to process PollSanitizerJob:" +
                     str(target_job.id))
            # Create folder to save all binaries that belong to current CS.
            target_raw_poll = curr_job.raw_poll

            os.system('mkdir -p ' + str(target_cbs_path))
            # Save all binaries
            for curr_cb in target_raw_poll.cs.cbns_original:
                curr_file = str(curr_cb.cs_id) + '_' + str(curr_cb.name)
                curr_file_path = os.path.join(target_cbs_path, curr_file)
                fp = open(curr_file_path, 'wb')
                fp.write(curr_cb.blob)
                fp.close()
                os.chmod(curr_file_path, 0o777)

            sanitized_xml, target_result, ret_code = sanitize_pcap_poll(
                target_raw_poll.blob,
                target_cbs_path,
                optional_prefix='pollsan_' + str(curr_job.id),
                log_suffix=' for PollSanitizerJob:' + str(curr_job.id))
            target_raw_poll.sanitized = True
            target_raw_poll.save()

            if target_result == BinaryTester.CRASH_RESULT:
                # set crash to true
                target_raw_poll.is_crash = True
                log_error("PollSanitizerJob:" + str(curr_job.id) +
                          ", Lead to Crash. Someone attacked us, "
                          "it will be synced in network poll creator")
                target_raw_poll.save()
            elif target_result == BinaryTester.FAIL_RESULT:
                # set failed to true
                target_raw_poll.is_failed = True
                target_raw_poll.save()
                log_error("PollSanitizerJob:" + str(curr_job.id) +
                          ", Failed on binary. Mostly timeout or Crash.")
            elif target_result == BinaryTester.PASS_RESULT:
                # Create Valid Poll
                CRSAPIWrapper.create_valid_poll(
                    curr_job.raw_poll.cs,
                    sanitized_xml,
                    target_round=curr_job.raw_poll.round,
                    is_perf_ready=(ret_code == 0))
                log_success("Created a ValidPoll for PollSanitizerJob:" +
                            str(curr_job.id))
            else:
                log_error(
                    "Error occurred while sanitizing provided poll of Job:" +
                    str(curr_job.id) + ", Sanitize PCAP POLL Returned:" +
                    str(target_result))

        except Exception as e:
            log_error("Error Occured while processing PollerSanitizerJob:" +
                      str(target_job.id) + ". Error:" + str(e))
        # clean up
        os.system('rm -rf ' + str(target_cbs_path))
        target_job.completed()
    else:
        log_failure("Ignoring PollerSanitizerJob:" + str(target_job.id) +
                    " as we failed to mark it busy.")
    CRSAPIWrapper.close_connection()
Exemplo n.º 6
0
def process_cb_tester_job(job_args):
    """
        Process the cb tester job
    :param job_args: Tuple (cb tester job, num process) to be tested.
    :return: None
    """
    CRSAPIWrapper.open_connection()
    job_id = job_args[0]
    curr_cb_test_job = CRSAPIWrapper.get_cb_tester_job(job_id)
    no_process = job_args[1]
    curr_job_id = str(curr_cb_test_job.id)
    if curr_cb_test_job.try_start():
        log_info("Trying to process cb-tester Job:" + str(curr_job_id))
        target_dir = get_unique_dir(os.path.expanduser("~"),
                                    "cb_tester_" + str(curr_job_id))
        try:
            # save binaries and xml
            bin_dir = os.path.join(target_dir, "bin")
            xml_dir = os.path.join(target_dir, "poll_xml")
            ids_rule_fp = None
            ids_rule = None
            isbitflip = False

            os.system('mkdir -p ' + str(bin_dir))
            os.system('mkdir -p ' + str(xml_dir))

            # save all the binaries in bin folder
            for curr_cb in CRSAPIWrapper.get_cbs_from_patch_type(
                    curr_cb_test_job.target_cs, curr_cb_test_job.patch_type):
                bin_path = os.path.join(bin_dir, curr_cb.name)
                ids_rule = curr_cb.ids_rule
                fp = open(bin_path, 'wb')
                fp.write(curr_cb.blob)
                fp.close()
                os.chmod(bin_path, 0o777)

            # Save IDS rules
            if ids_rule is not None and ids_rule.rules is not None and len(
                    str(ids_rule.rules).strip()) > 0:
                ids_dir = os.path.join(target_dir, "ids_dir")
                os.system('mkdir -p ' + ids_dir)
                ids_rule_fp = os.path.join(ids_dir, "ids.rules")
                fp = open(ids_rule_fp, "w")
                fp.write(str(ids_rule.rules))
                isbitflip = 'bitflip' in str(ids_rule.rules)
                fp.close()

            # save the xml
            xml_file_path = os.path.join(
                xml_dir,
                str(curr_cb_test_job.poll.id) + '.xml')
            fp = open(xml_file_path, 'w')
            fp.write(str(curr_cb_test_job.poll.blob))
            fp.close()

            # Test the poll
            curr_patch_tester = PatchTester(bin_dir,
                                            xml_file_path,
                                            ids_rule_fp,
                                            num_threads=no_process,
                                            isbitflip=isbitflip)
            curr_patch_tester.test()

            # get all perfs if poll is ok.
            perf_measurements = {}
            is_poll_ok = False
            if curr_patch_tester.are_polls_ok():
                is_poll_ok = True
                perf_measurements = curr_patch_tester.get_perf_measures()
            else:
                log_failure("CS:" + str(curr_cb_test_job.target_cs.id) +
                            ", Patch Type:" +
                            str(curr_cb_test_job.patch_type) +
                            " failed for Poll:" +
                            str(curr_cb_test_job.poll.id))
            # update performance measurements.
            CRSAPIWrapper.create_poll_performance(curr_cb_test_job.poll,
                                                  curr_cb_test_job.target_cs,
                                                  curr_cb_test_job.patch_type,
                                                  is_poll_ok=is_poll_ok,
                                                  perf_json=perf_measurements)
            log_success("Processed cb-tester Job:" + str(curr_job_id))
            # mark job as completed.
        except Exception as e:
            log_failure(
                "Exception occurred while trying to process cb_tester job:" +
                str(curr_job_id) + ", Exception:" + str(e))
        curr_cb_test_job.completed()
        # clean up
        os.system('rm -rf ' + target_dir)
    else:
        log_info("Unable to start job:" + str(curr_job_id) + ". Ignoring")
    CRSAPIWrapper.close_connection()
Exemplo n.º 7
0
def run_daemon(arg_list):
    global NO_OF_PROCESSES
    global POLL_TIME
    target_cs_id = None
    target_job_type = None
    max_num_jobs = 1000

    if len(arg_list) > 1:
        try:
            target_cs_id = int(arg_list[1])
            log_info("Handling Jobs for CS ID:" + str(target_cs_id))
        except ValueError:
            log_failure("Unable to parse the provided argument into CS ID:" +
                        str(arg_list[1]))
            # Ignore the error
            pass
    if len(arg_list) > 2:
        target_job_type = arg_list[2]
        log_info("Provided Job Type:" + str(target_job_type))
    else:
        log_failure("No job type provided")

    if len(arg_list) > 3:
        try:
            max_num_jobs = int(arg_list[3])
        except ValueError:
            log_failure(
                "Unable to parse the provided argument into number of jobs:" +
                str(arg_list[3]))
            # default, number of jobs
            max_num_jobs = 1000
            pass
    else:
        log_failure("No max number of jobs provided, defaulting to:" +
                    str(max_num_jobs))

    # Ignore this
    """if len(arg_list) > 2:
        try:
            no_of_process = int(arg_list[2])
        except ValueError:
            no_of_process = cpu_count()
        NO_OF_PROCESSES = no_of_process
    if len(arg_list) > 3:
        try:
            poll_time = int(arg_list[3])
        except ValueError:
            poll_time = POLL_TIME
        POLL_TIME = poll_time"""

    if EXIT_ON_WRONG_CS_ID and target_cs_id is None:
        log_error(
            "Exiting, without scheduling any jobs as no valid CS ID is provided."
        )
        return
    elif target_cs_id is None:
        log_info("Will be running infinitely fetching Jobs for all CS.")
    processed_jobs = 0
    while True:
        CRSAPIWrapper.open_connection()
        no_jobs = True
        for curr_worker_config in worker_config:
            worker_name = curr_worker_config[0]
            if target_job_type is None or worker_name == target_job_type:
                job_getter = curr_worker_config[1]
                job_processor = curr_worker_config[2]
                log_info("Trying to get " + worker_name + " Jobs.")
                available_jobs = job_getter(target_cs_id=target_cs_id)
                if len(available_jobs) > 0:
                    num_jobs_to_get = max_num_jobs - processed_jobs
                    if num_jobs_to_get <= 0:
                        break
                    available_jobs = available_jobs[0:num_jobs_to_get]
                    processed_jobs += len(available_jobs)
                    log_info("Got " + str(len(available_jobs)) + " " +
                             worker_name + " Jobs.")
                    if str(worker_name) == 'pov_tester':
                        NO_OF_PROCESSES = int(1.25 * NO_OF_PROCESSES)
                    child_threads = NO_OF_PROCESSES / len(available_jobs)
                    child_job_args = map(
                        lambda curr_job: (curr_job.id, child_threads),
                        available_jobs)
                    with ProcessPoolExecutor(
                            max_workers=NO_OF_PROCESSES) as process_pool:
                        process_pool.map(job_processor, child_job_args)
                    log_success("Processed " + str(len(available_jobs)) + " " +
                                worker_name + " Jobs.")
                    no_jobs = False
                    # start again from beginning.
                    # This will ensure the if there are any higher priority jobs, we always execute them
                    # before going to lower priority ones
                    break
                else:
                    log_info("No " + worker_name + " Jobs available to run.")

        # if there are no VM jobs
        if no_jobs:
            # If this is supposed to take care of only one CS.
            # exit the while True loop.
            # so that the worker knows this VM is done.
            if target_cs_id is not None:
                break
            time.sleep(POLL_TIME)
        # if we processed sufficient number of jobs? then exit
        if processed_jobs >= max_num_jobs:
            log_info("Processed:" + str(processed_jobs) + ", limit:" +
                     str(max_num_jobs) + ". Exiting.")
            break