Exemple #1
0
    def test_get_or_create_latest(self):
        # First game
        r0, status = Round.get_or_create_latest(num=0)
        assert_equals(status, Round.FIRST_GAME)
        assert_equals(r0.num, 0)
        assert_equals(r0, Round.current_round())

        # New round
        r1, status = Round.get_or_create_latest(num=1)
        assert_equals(status, Round.NEW_ROUND)
        assert_equals(r1.num, 1)
        assert_equals(r1, Round.current_round())
        assert_equals(r0, Round.prev_round())

        # Same round
        r1b, status = Round.get_or_create_latest(num=1)
        assert_equals(status, Round.SAME_ROUND)
        assert_equals(r1b.num, 1)
        assert_equals(r1b, Round.current_round())
        assert_equals(r0, Round.prev_round())
        assert_equals(r1, r1b)

        # New game
        r0b, status = Round.get_or_create_latest(num=0)
        assert_equals(status, Round.NEW_GAME)
        assert_equals(r0b.num, 0)
        assert_equals(r0b, Round.current_round())
        assert_is_none(Round.prev_round())
Exemple #2
0
    def should_submit(target_cs):
        # FIXME: this should be generalized per challenge set introduction
        # don't submit on the first round
        if Round.current_round().num == 0:
            LOG.info("Not submitting on round 0.")
            return False

        # don't submit if we haven't submitted an exploit before this round
        if ExploitSubmissionCable.select().join(
                Exploit).where((ExploitSubmissionCable.cs == target_cs)
                               & (ExploitSubmissionCable.processed_at != None)
                               & (ExploitSubmissionCable.processed_at <=
                                  Round.current_round().created_at)
                               & (Exploit.method != "backdoor")).exists():
            LOG.info("There's an exploit that's over a round old!.")
            return True

        # don't submit if we haven't found an crash before the last round
        prev_round = Round.prev_round()
        if prev_round is not None and Crash.select().where(
            (Crash.cs == target_cs)
                & (Crash.created_at <= prev_round.created_at)).exists():
            LOG.info("There's a crash that's over two rounds old!")
            return True

        LOG.info("Patch conditions not met!")
        return False
Exemple #3
0
def upload_cbns(args):
    cs = CS.select().where(CS.name == args.cs)

    patch_type, _ = PT.get_or_create(name="manual",
                                     functionality_risk=1.0,
                                     exploitability=0.0)

    cbns = []
    for patched_file in args.patched_files:
        with open(patched_file) as f:
            content = f.read()
        try:
            cbn = CBN.create(cs=cs,
                             blob=content,
                             name=patched_file,
                             patch_type=patch_type)
        except peewee.IntegrityError:
            print "CBN already exists. Fetching."
            cbn = CBN.select().where(CBN.name == args.patched_file,
                                     CBN.cs == cs, CBN.blob == content)
        cbns.append(cbn)

    if args.field:
        ids = IDSRule.get_or_create(cs=target_cs, rules='')
        CSSubmissionCable.get_or_create(cs=target_cs,
                                        cbns=cbns,
                                        ids=ids,
                                        round=Round.current_round())

    if args.eF is not None and args.eT is not None and args.eM is not None and cbn.patch_type is not None:
        perf_score = {
            'score': {
                'ref': {
                    'task_clock': 1.0,
                    'rss': 1.0,
                    'flt': 1.0,
                    'file_size': 1.0
                },
                'rep': {
                    'task_clock': args.eT,
                    'file_size': args.size_overhead,
                    'rss': args.eM,
                    'flt': args.eM,
                }
            }
        }
        PS.create(cs=cs,
                  perf_score=perf_score,
                  patch_type=cbn.patch_type,
                  has_failed_polls=args.eF != 0,
                  failed_polls=args.eF)
    if args.pT is not None and args.pM is not None and args.pS is not None:
        csf.poll_feedback = PF.create(cs=cs,
                                      round_id=Round.current_round().id,
                                      success=args.pS,
                                      time_overhead=args.pT,
                                      memory_overhead=args.pM,
                                      size_overhead=args.size_overhead)
        csf.save()
Exemple #4
0
def main(args=None):
    submitters = [POVSubmitter(), CBSubmitter()]

    while True:
        wait_for_ambassador()

        LOG.info("Round #%d", Round.current_round().num)

        for submitter in submitters:
            submitter.run(Round.current_round().num)

    return 0
Exemple #5
0
def main(args=None):
    """Run the meister."""
    if args is None:
        args = []

    brain = ElephantBrain()
    creators = [DrillerCreator(),
                RexCreator(),
                PovFuzzer1Creator(),
                PovFuzzer2Creator(),
                ColorGuardCreator(),
                AFLCreator(),
                BackdoorSubmitterCreator(),
                CacheCreator(),
                RopCacheCreator(),
                PatcherexCreator(),
                FunctionIdentifierCreator(),
                NetworkPollCreatorCreator(),
                ShowmapSyncCreator(),
                #PatchPerformanceCreator(),
                # VM jobs
                #PollCreatorCreator(),
                #NetworkPollSanitizerCreator(),
                #CBTesterCreator(),
                PovTesterCreator()]
    scheduler = PriorityScheduler(brain, creators)

    while True:
        wait_for_ambassador()
        LOG.info("Round #%d", Round.current_round().num)
        scheduler.run()

    return 0
Exemple #6
0
    def rotator_submission(target_cs):
        global NEXT_PATCH_ORDER

        round_ = Round.current_round()

        if target_cs.name not in ORDERS or len(ORDERS[target_cs.name]) == 0:
            ORDERS[target_cs.name] = list(NEXT_PATCH_ORDER)
            #print target_cs.name, NEXT_PATCH_ORDER
            NEXT_PATCH_ORDER = NEXT_PATCH_ORDER[1:] + NEXT_PATCH_ORDER[:1]

        all_patches = target_cs.cbns_by_patch_type()
        for n in ORDERS[target_cs.name]:
            pt = PatchType.get(name=n)
            if pt not in all_patches:
                continue
            ORDERS[target_cs.name].remove(n)
            cbns = all_patches[pt]
            print "SUBMITTING", target_cs.name, cbns[0].name, cbns[
                0].patch_type.name
            c, _ = CSSubmissionCable.get_or_create(cs=target_cs,
                                                   cbns=cbns,
                                                   ids=cbns[0].ids_rule,
                                                   round=round_)
            print "...", c.id
            break
    def test_most_recent(self):
        cs = ChallengeSet.create(name="foo")
        cs2 = ChallengeSet.create(name="bar")
        team = Team.create(name="opponent")
        exploit = Exploit.create(cs=cs,
                                 job=RexJob.create(),
                                 pov_type="type1",
                                 blob="abc",
                                 c_code="exploit it")
        exploit2 = Exploit.create(cs=cs2,
                                  job=RexJob.create(),
                                  pov_type="type1",
                                  blob="def",
                                  c_code="delfino")
        Round.create(num=0)

        cable = ExploitSubmissionCable.create(team=team,
                                              cs=cs,
                                              exploit=exploit,
                                              throws=10,
                                              round=Round.current_round())
        cable2 = ExploitSubmissionCable.create(team=team,
                                               cs=cs2,
                                               exploit=exploit2,
                                               throws=10,
                                               round=Round.current_round())
        assert_equals(len(ExploitSubmissionCable.most_recent()), 2)
        assert_items_equal(ExploitSubmissionCable.most_recent(),
                           [cable, cable2])

        # assert we get back only the most recent exploit
        r1 = Round.create(num=1)
        new_exploit = Exploit.create(cs=cs,
                                     job=RexJob.create(),
                                     pov_type="type2",
                                     blob="def",
                                     c_code="don't exploit it")
        new_cable = ExploitSubmissionCable.create(team=team,
                                                  cs=cs,
                                                  exploit=new_exploit,
                                                  throws=10,
                                                  round=r1)
        assert_equals(len(ExploitSubmissionCable.most_recent()), 2)
        assert_items_equal(ExploitSubmissionCable.most_recent(),
                           [new_cable, cable2])
Exemple #8
0
def field_cbns(args):
    cs = CS.select().where(CS.name == args.cs)
    # i know i could use one query for this, but then we might get duplicate CBNs and more than we want
    cbns = [
        CBN.get(CBN.cs == cs, CBN.sha256 == sha) for sha in args.patched_shas
    ]
    ids, _ = IDSRule.get_or_create(cs=cs, rules='')
    CSSubmissionCable.get_or_create(cs=cs,
                                    cbns=cbns,
                                    ids=ids,
                                    round=Round.current_round())
Exemple #9
0
def create_round(args):
    if not len(args.challenge_sets):
        fielded = [cs.name for cs in CS.fielded_in_round()]
    else:
        fielded = args.challenge_sets
    new_round = Round.create(
        num=args.number if args.number is not None else Round.current_round() +
        1)

    for f in fielded:
        cs = CS.select().where(CS.name == f).get()
        CSF.create(cs=cs, team=Team.get_our(), available_round=new_round)
Exemple #10
0
 def _jobs(self):
     # get all current valid ChallengeSets and schedule them.
     # passing round id takes care of not generating duplicates in a round for a cs.
     for curr_cs in self.challenge_sets():
         curr_round = Round.current_round()
         job = PatchPerformanceJob(cs=curr_cs,
                                   request_cpu=1,
                                   request_memory=2048,
                                   payload={'round_id': curr_round.id})
         # we want the patch performance to be computed soon for every round.
         priority = 100
         LOG.debug(
             "Creating PatchPerformanceCreator for CS %s and Round %s ",
             curr_cs.name, curr_round.num)
         yield (job, priority)
Exemple #11
0
 def process_patch_submission(target_cs):
     """
     Process a patch submission request for the provided ChallengeSet
     :param target_cs: ChallengeSet for which the request needs to be processed.
     """
     round_ = Round.current_round()
     cbns_to_submit = CBSubmitter.patch_decision_simple(target_cs, round_)
     if cbns_to_submit is not None:
         if cbns_to_submit[0].ids_rule is None:
             ids = IDSRule.create(cs=target_cs, rules='')
         else:
             ids = cbns_to_submit[0].ids_rule
         CSSubmissionCable.get_or_create(cs=target_cs,
                                         cbns=cbns_to_submit,
                                         ids=ids,
                                         round=round_)
     else:
         LOG.info("%s - leaving old CBNs in place for", target_cs.name)
Exemple #12
0
    def test_submit_to(self):
        r0 = Round.create(num=0)
        r1 = Round.create(num=1)
        cs = ChallengeSet.create(name="foo")
        cs.rounds = [r1.id]
        job = AFLJob.create(cs=cs)
        exploit = Exploit.create(job=job, cs=cs, pov_type="type1", blob=BLOB, c_code="exploit it")
        team = Team.create(name="opponent")

        assert_equals(len(exploit.fieldings), 0)

        ef = exploit.submit_to(team=team, throws=10)
        assert_equals(len(exploit.fieldings), 1)
        assert_equals(ef.submission_round, Round.current_round())

        ef = exploit.submit_to(team=team, throws=5, round=r0)
        assert_equals(len(exploit.fieldings), 2)
        assert_equals(ef.submission_round, r0)
    def test_process_and_unprocessed(self):
        cs = ChallengeSet.create(name="foo")
        team = Team.create(name="opponent")
        exploit = Exploit.create(cs=cs,
                                 job=RexJob.create(),
                                 pov_type="type1",
                                 blob="abc",
                                 c_code="exploit it")
        Round.create(num=0)

        cable = ExploitSubmissionCable.create(team=team,
                                              cs=cs,
                                              exploit=exploit,
                                              throws=10,
                                              round=Round.current_round())

        assert_equals(len(ExploitSubmissionCable.unprocessed()), 1)

        cable.process()
        assert_equals(len(ExploitSubmissionCable.unprocessed()), 0)
Exemple #14
0
def wait_for_ambassador():
    POLL_INTERVAL = 3
    while not (Round.current_round() and Round.current_round().is_ready()):
        LOG.info("Round data not available, waiting %d seconds", POLL_INTERVAL)
        time.sleep(POLL_INTERVAL)
Exemple #15
0
def compute_patch_performance(target_cs):
    """
        Compute patch performance for all patched binaries of given CS.
        This will update DB with results.
    :param target_cs: CS for which patch performance needs to be computed.
    :return: None.
    """
    l.info("Trying to compute patch performance for CS:" + str(target_cs.id))
    patched_bins_perf = get_all_cb_sets_perf(target_cs)
    l.info("Got Raw Perf Scores.")

    l.info("Trying to Group Raw Scores.")
    grouped_perf_results = group_poll_results(patched_bins_perf)
    l.info("Grouped Raw Scores.")

    original_cbs_perf = []
    if 'original' in grouped_perf_results:
        original_cbs_perf = grouped_perf_results['original']
        del grouped_perf_results['original']
    if len(original_cbs_perf) <= 0:
        l.warning("No polls have been evaluated against original binary. "
                  "Ignoring this round of patch performance computation.")
        return

    if len(original_cbs_perf['fail']) > 0:
        l.warning(
            "Weired. There are some failed polls for original binary, ignoring failed polls."
        )

    # consider only passed polls
    original_cbs_perf = original_cbs_perf['pass']

    for curr_patch_type in grouped_perf_results:
        l.info("Computing Scores for Patch Type:" + str(curr_patch_type))
        pass_perf_objects = grouped_perf_results[curr_patch_type]['pass']
        patched_cbs_pass_poll_ids = []
        if len(pass_perf_objects) > 0:
            patched_cbs_pass_poll_ids = map(lambda perf_obj: perf_obj.poll.id,
                                            pass_perf_objects)
        else:
            l.warning("No passed polls found for Patch Type:" +
                      str(curr_patch_type))
            # skip to next patch type
            continue
        failed_perf_objects = grouped_perf_results[curr_patch_type]['fail']
        has_fails = len(failed_perf_objects) > 0

        failed_polls = []
        if has_fails:
            failed_polls = map(lambda perf_obj: perf_obj.poll.id,
                               failed_perf_objects)
        failed_polls_json = {'poll_ids': list(failed_polls)}

        original_cbs_pass_poll_ids = map(lambda perf_obj: perf_obj.poll.id,
                                         original_cbs_perf)

        common_pass_poll_ids = set(original_cbs_pass_poll_ids)
        common_pass_poll_ids.intersection_update(patched_cbs_pass_poll_ids)

        if not (len(common_pass_poll_ids) > 0):
            l.warning(
                "No polls have been common between original and patched cbs. Ignoring patch type:"
                + str(curr_patch_type))
            # skip to next patch type
            continue

        polls_included = {'poll_ids': list(common_pass_poll_ids)}

        base_perf_objects = filter(
            lambda perf_obj: perf_obj.poll.id in common_pass_poll_ids,
            original_cbs_perf)
        patched_perf_objects = filter(
            lambda perf_obj: perf_obj.poll.id in common_pass_poll_ids,
            pass_perf_objects)

        base_perf_jsons = map(
            lambda perf_obj: perf_obj.performances['perf']['median'],
            base_perf_objects)
        patched_perf_jsons = map(
            lambda perf_obj: perf_obj.performances['perf']['median'],
            patched_perf_objects)

        base_perf_total = get_perf_totals(base_perf_jsons)
        # get the size of binaries, size of the binaries will be same on all runs
        base_perf_total[SIZE_PERF_NAME] = base_perf_jsons[0][SIZE_PERF_NAME]
        patched_perf_total = get_perf_totals(patched_perf_jsons)
        # again size of binaries will be same across all tests.
        patched_perf_total[SIZE_PERF_NAME] = patched_perf_jsons[0][
            SIZE_PERF_NAME]

        target_score = compute_overhead(base_perf_total, patched_perf_total)

        l.info("Trying to create PatchScore into DB for patch type:" +
               str(curr_patch_type) + " for cs:" + str(target_cs.id))
        # convert patch type name to PatchType
        curr_patch_type = PatchType.get(PatchType.name == curr_patch_type)
        # create patch score
        PatchScore.create(cs=target_cs,
                          patch_type=curr_patch_type,
                          num_polls=len(common_pass_poll_ids),
                          polls_included=polls_included,
                          has_failed_polls=has_fails,
                          failed_polls=failed_polls_json,
                          round=Round.current_round(),
                          perf_score=target_score)
Exemple #16
0
 def test_current_round(self):
     assert_equals(Round.current_round(), None)
     Round.create(num=0)
     Round.create(num=1)
     Round.create(num=2)
     assert_equals(Round.current_round().num, 2)
Exemple #17
0
def wait_for_ambassador():
    poll_interval = 3
    while not (Round.current_round() and Round.current_round().is_ready()):
        LOG.info("Round data not available, waiting %d seconds", poll_interval)
        time.sleep(poll_interval)