def create_tables(): LOG.debug("Creating tables...") master_db.create_tables(tables(), safe=True) from farnsworth.models import (ChallengeBinaryNode, ChallengeSetFielding, Crash, ExploitSubmissionCable, IDSRule, IDSRuleFielding, Test) master_db.create_index(ChallengeBinaryNode, ['cs', 'name', 'sha256'], unique=True) master_db.create_index(ChallengeSetFielding, ['cs', 'team', 'submission_round'], unique=True) master_db.create_index(ChallengeSetFielding, ['cs', 'team', 'available_round'], unique=True) master_db.create_index(Crash, ['cs', 'sha256'], unique=True) master_db.create_index(Test, ['cs', 'sha256'], unique=True) master_db.create_index(ChallengeSetFielding, ['sha256']) master_db.create_index(ChallengeBinaryNode, ['sha256']) master_db.create_index(Crash, ['sha256']) master_db.create_index(IDSRule, ['sha256']) master_db.create_index(IDSRuleFielding, ['sha256']) master_db.create_index(Test, ['sha256']) master_db.create_index(ExploitSubmissionCable, ['round', 'cs', 'team'], unique=True) LOG.debug("Creating patch types...") from farnsworth.models import PatcherexJob, PatchType for name, (func_risk, exploitability) in PatcherexJob.PATCH_TYPES.items(): PatchType.create(name=name, functionality_risk=func_risk, exploitability=exploitability)
def test_cbns_by_patch_type(self): cs = ChallengeSet.create(name="foo") cbn = ChallengeBinaryNode.create(name="foo", cs=cs, blob="aaa") patch0 = PatchType.create( name="patch0", functionality_risk=0, exploitability=1, ) patch1 = PatchType.create( name="patch1", functionality_risk=0, exploitability=1, ) cbn1 = ChallengeBinaryNode.create(name="foo1", cs=cs, patch_type=patch0, blob="aaa1") cbn2 = ChallengeBinaryNode.create(name="foo2", cs=cs, patch_type=patch0, blob="aaa2") cbn3 = ChallengeBinaryNode.create(name="foo3", cs=cs, patch_type=patch1, blob="aaa3") assert_in(patch0, cs.cbns_by_patch_type().keys()) assert_in(patch1, cs.cbns_by_patch_type().keys()) assert_in(cbn1, cs.cbns_by_patch_type()[patch0]) assert_in(cbn2, cs.cbns_by_patch_type()[patch0]) assert_in(cbn3, cs.cbns_by_patch_type()[patch1])
def test_submitted_and_unsubmitted_patches(self): r0 = Round.create(num=0) team = Team.create(name=Team.OUR_NAME) cs = ChallengeSet.create(name="foo") cs.rounds = [r0] cbn = ChallengeBinaryNode.create(name="cbn", cs=cs, blob="aaa1") patchtype1 = PatchType.create(name="PatchType1", functionality_risk=0, exploitability=0) patchtype2 = PatchType.create(name="PatchType2", functionality_risk=0, exploitability=0) patch1 = ChallengeBinaryNode.create(name="patch1", patch_type=patchtype1, cs=cs, root=cbn, blob="aaa2") patch2 = ChallengeBinaryNode.create(name="patch2", patch_type=patchtype2, cs=cs, root=cbn, blob="aaa3") assert_equals(len(cbn.unsubmitted_patches), 2) assert_in(patch1, cbn.unsubmitted_patches) assert_in(patch2, cbn.unsubmitted_patches) assert_equals(len(cbn.submitted_patches), 0) ChallengeSetFielding.create_or_update_submission(team=team, cbns=[patch1, patch2], round=r0) assert_equals(len(cbn.submitted_patches), 2) assert_equals(len(cbn.unsubmitted_patches), 0)
def rotator_submission(target_cs): global NEXT_PATCH_ORDER round_ = Round.current_round() if target_cs.name not in ORDERS or len(ORDERS[target_cs.name]) == 0: ORDERS[target_cs.name] = list(NEXT_PATCH_ORDER) #print target_cs.name, NEXT_PATCH_ORDER NEXT_PATCH_ORDER = NEXT_PATCH_ORDER[1:] + NEXT_PATCH_ORDER[:1] all_patches = target_cs.cbns_by_patch_type() for n in ORDERS[target_cs.name]: pt = PatchType.get(name=n) if pt not in all_patches: continue ORDERS[target_cs.name].remove(n) cbns = all_patches[pt] print "SUBMITTING", target_cs.name, cbns[0].name, cbns[ 0].patch_type.name c, _ = CSSubmissionCable.get_or_create(cs=target_cs, cbns=cbns, ids=cbns[0].ids_rule, round=round_) print "...", c.id break
def upload_cbns(args): cs = CS.select().where(CS.name == args.cs) patch_type, _ = PT.get_or_create(name="manual", functionality_risk=1.0, exploitability=0.0) cbns = [] for patched_file in args.patched_files: with open(patched_file) as f: content = f.read() try: cbn = CBN.create(cs=cs, blob=content, name=patched_file, patch_type=patch_type) except peewee.IntegrityError: print "CBN already exists. Fetching." cbn = CBN.select().where(CBN.name == args.patched_file, CBN.cs == cs, CBN.blob == content) cbns.append(cbn) if args.field: ids = IDSRule.get_or_create(cs=target_cs, rules='') CSSubmissionCable.get_or_create(cs=target_cs, cbns=cbns, ids=ids, round=Round.current_round()) if args.eF is not None and args.eT is not None and args.eM is not None and cbn.patch_type is not None: perf_score = { 'score': { 'ref': { 'task_clock': 1.0, 'rss': 1.0, 'flt': 1.0, 'file_size': 1.0 }, 'rep': { 'task_clock': args.eT, 'file_size': args.size_overhead, 'rss': args.eM, 'flt': args.eM, } } } PS.create(cs=cs, perf_score=perf_score, patch_type=cbn.patch_type, has_failed_polls=args.eF != 0, failed_polls=args.eF) if args.pT is not None and args.pM is not None and args.pS is not None: csf.poll_feedback = PF.create(cs=cs, round_id=Round.current_round().id, success=args.pS, time_overhead=args.pT, memory_overhead=args.pM, size_overhead=args.size_overhead) csf.save()
def create_poll_performance(target_poll, target_cs, patch_type, is_poll_ok=True, perf_json=None): """ Create a poll performance object. :param target_poll: cbtest poll for which the results should be updated. :param target_cs: CS for which performance need to be updated. :param patch_type: Patch type for which performance needs to be updated. :param is_poll_ok: flag to indicate whether the poll is successful. :param perf_json: performance json. :return: None """ patch_type_obj = None if patch_type is not None: patch_type_obj = PatchType.get(PatchType.name == patch_type) CBPollPerformance.create(poll=target_poll, cs=target_cs, patch_type=patch_type_obj, is_poll_ok=is_poll_ok, performances=perf_json)
def _run(self, job): input_file = job.cbn.path patch_type = job.payload["patch_type"] pm = PatchMaster(input_file) patched_bin, ids_rule = pm.create_one_patch(patch_type) if patched_bin == None: LOG.warning("unable to generate patch") return else: name = "{}_patched_{}".format(job.cbn.name, patch_type) ids = IDSRule.get_by_sha256_or_create(rules=ids_rule, cs=job.cbn.cs) pt = PatchType.get(name=patch_type) ChallengeBinaryNode.create( root=job.cbn, cs=job.cbn.cs, name=name, patch_type=pt, blob=patched_bin, sha256=hashlib.sha256(patched_bin).hexdigest(), ids_rule=ids, )
def test_variable_submitter(self): t = Team.create(name=Team.OUR_NAME) r0 = Round.create(num=0) # set up several CSes cses = [CS.create(name='CS_%s' % i) for i in range(10)] # Set up the patches for cs in cses: for pt in PT.select(): ids = IDSRule.create(cs=cs, rules="HAHAHA") cbn = CBN.create(cs=cs, name=cs.name + "_" + pt.name, blob="XXXX", patch_type=pt, ids_rule=ids) patch_names = scriba.submitters.cb.ORIG_PATCH_ORDER try: cur_cssc_id = CSSC.select().order_by(CSSC.id.desc()).get().id except CSSC.DoesNotExist: cur_cssc_id = 0 # run the scheduler for _ in scriba.submitters.cb.ORIG_PATCH_ORDER: for c in cses: scriba.submitters.cb.CBSubmitter.rotator_submission(c) # make sure they got rotated correctly for n, cs in enumerate(cses): cables = list( CSSC.select().where((CSSC.cs == cs) & (CSSC.id > cur_cssc_id)).order_by( CSSC.id.asc())) assert len(cables) > 0 assert all(c.cbns[0].patch_type.name == pn for c, pn in zip(cables, (patch_names * 10)[n:]))
def test_cbns_original(self): r0 = Round.create(num=0) r1 = Round.create(num=1) our_team = Team.create(name=Team.OUR_NAME) other_team = Team.create(name="opponent") cs = ChallengeSet.create(name="foo") cs.rounds = [r0, r1] cbn = ChallengeBinaryNode.create(name="foo", cs=cs, blob="aaa1") cbn_patched = ChallengeBinaryNode.create(name="foo", cs=cs, patch_type=PatchType.create( name="patch1", functionality_risk=0, exploitability=1, ), blob="aaa2") cbn_other_team = ChallengeBinaryNode.create(name="foo", cs=cs, blob="aaa3") ChallengeSetFielding.create(cs=cs, cbns=[cbn], team=our_team, available_round=r0) ChallengeSetFielding.create(cs=cs, cbns=[cbn_patched], team=our_team, submission_round=r0).save() ChallengeSetFielding.create(cs=cs, cbns=[cbn_other_team], team=other_team, available_round=r0).save() assert_equals(len(cs.cbns_original), 1) assert_in(cbn, cs.cbns_original) assert_not_in(cbn_patched, cs.cbns_original) assert_not_in(cbn_other_team, cs.cbns_original)
def compute_patch_performance(target_cs): """ Compute patch performance for all patched binaries of given CS. This will update DB with results. :param target_cs: CS for which patch performance needs to be computed. :return: None. """ l.info("Trying to compute patch performance for CS:" + str(target_cs.id)) patched_bins_perf = get_all_cb_sets_perf(target_cs) l.info("Got Raw Perf Scores.") l.info("Trying to Group Raw Scores.") grouped_perf_results = group_poll_results(patched_bins_perf) l.info("Grouped Raw Scores.") original_cbs_perf = [] if 'original' in grouped_perf_results: original_cbs_perf = grouped_perf_results['original'] del grouped_perf_results['original'] if len(original_cbs_perf) <= 0: l.warning("No polls have been evaluated against original binary. " "Ignoring this round of patch performance computation.") return if len(original_cbs_perf['fail']) > 0: l.warning( "Weired. There are some failed polls for original binary, ignoring failed polls." ) # consider only passed polls original_cbs_perf = original_cbs_perf['pass'] for curr_patch_type in grouped_perf_results: l.info("Computing Scores for Patch Type:" + str(curr_patch_type)) pass_perf_objects = grouped_perf_results[curr_patch_type]['pass'] patched_cbs_pass_poll_ids = [] if len(pass_perf_objects) > 0: patched_cbs_pass_poll_ids = map(lambda perf_obj: perf_obj.poll.id, pass_perf_objects) else: l.warning("No passed polls found for Patch Type:" + str(curr_patch_type)) # skip to next patch type continue failed_perf_objects = grouped_perf_results[curr_patch_type]['fail'] has_fails = len(failed_perf_objects) > 0 failed_polls = [] if has_fails: failed_polls = map(lambda perf_obj: perf_obj.poll.id, failed_perf_objects) failed_polls_json = {'poll_ids': list(failed_polls)} original_cbs_pass_poll_ids = map(lambda perf_obj: perf_obj.poll.id, original_cbs_perf) common_pass_poll_ids = set(original_cbs_pass_poll_ids) common_pass_poll_ids.intersection_update(patched_cbs_pass_poll_ids) if not (len(common_pass_poll_ids) > 0): l.warning( "No polls have been common between original and patched cbs. Ignoring patch type:" + str(curr_patch_type)) # skip to next patch type continue polls_included = {'poll_ids': list(common_pass_poll_ids)} base_perf_objects = filter( lambda perf_obj: perf_obj.poll.id in common_pass_poll_ids, original_cbs_perf) patched_perf_objects = filter( lambda perf_obj: perf_obj.poll.id in common_pass_poll_ids, pass_perf_objects) base_perf_jsons = map( lambda perf_obj: perf_obj.performances['perf']['median'], base_perf_objects) patched_perf_jsons = map( lambda perf_obj: perf_obj.performances['perf']['median'], patched_perf_objects) base_perf_total = get_perf_totals(base_perf_jsons) # get the size of binaries, size of the binaries will be same on all runs base_perf_total[SIZE_PERF_NAME] = base_perf_jsons[0][SIZE_PERF_NAME] patched_perf_total = get_perf_totals(patched_perf_jsons) # again size of binaries will be same across all tests. patched_perf_total[SIZE_PERF_NAME] = patched_perf_jsons[0][ SIZE_PERF_NAME] target_score = compute_overhead(base_perf_total, patched_perf_total) l.info("Trying to create PatchScore into DB for patch type:" + str(curr_patch_type) + " for cs:" + str(target_cs.id)) # convert patch type name to PatchType curr_patch_type = PatchType.get(PatchType.name == curr_patch_type) # create patch score PatchScore.create(cs=target_cs, patch_type=curr_patch_type, num_polls=len(common_pass_poll_ids), polls_included=polls_included, has_failed_polls=has_fails, failed_polls=failed_polls_json, round=Round.current_round(), perf_score=target_score)
def create_patchtype(args): PT.create(name=args.name, exploitability=args.exploitability, functionality_risk=args.functionality_risk)
def test_patch_selection(self): t = Team.create(name=Team.OUR_NAME) r0 = Round.create(num=0) cs = CS.create(name='x') # Set up a CBN for it, with some feedback cbn_orig = CBN.create(cs=cs, name="unpatched", blob="XXXX") pf_orig = PF.create(cs=cs, round=r0, success=1.0, timeout=0, connect=0, function=0, time_overhead=0.0, memory_overhead=0.0) # Field the default CBN CSF.create(cs=cs, cbns=[cbn_orig], team=t, available_round=r0, poll_feedback=pf_orig) # Make sure we properly handle the case when there are no patches assert_is_none(scriba.submitters.cb.CBSubmitter.patch_decision(cs)) # And patch it pt = PT.create(name="a_patch", functionality_risk=0., exploitability=0.) cbn_p1 = CBN.create(cs=cs, name="patch1", blob="XXXYZ", patch_type=pt) PS.create(cs=cs, patch_type=pt, num_polls=10, has_failed_polls=False, failed_polls=0, round=r0, perf_score={ 'score': { 'ref': { 'task_clock': 1.0, 'rss': 1.0, 'flt': 1.0, 'file_size': 1.0 }, 'rep': { 'task_clock': 1.1, 'file_size': 1.1, 'rss': 1.1, 'flt': 1.1, } } }) # Make sure we choose this patch assert_equals(scriba.submitters.cb.CBSubmitter.patch_decision(cs), [cbn_p1]) # Field the patch - we're down the first round r1 = Round.create(num=1) pf1 = PF.create(cs=cs, round=r1, success=0.0, timeout=0, connect=0, function=0, time_overhead=0.0, memory_overhead=0.0) CSF.create(cs=cs, cbns=[cbn_p1], team=t, available_round=r1, poll_feedback=pf1) r2 = Round.create(num=2) pf2 = PF.create(cs=cs, round=r1, success=1.0, timeout=0, connect=0, function=0, time_overhead=1.3, memory_overhead=1.3) CSF.create(cs=cs, cbns=[cbn_p1], team=t, available_round=r2, poll_feedback=pf2) # Make sure we revert assert_equals(scriba.submitters.cb.CBSubmitter.patch_decision(cs), [cbn_orig])
def test_simple_selector(self): try: t = Team.get_our() except Team.DoesNotExist: t = Team.create(name=Team.OUR_NAME) cs = CS.create(name='x') # Set up a CBN for it, with some feedback cbn_orig = CBN.create(cs=cs, name="unpatched", blob="XXXX") # Field the default CBN r0 = Round.create(num=0) pf_r0 = PF.create(cs=cs, round=r0, success=1.0, timeout=0, connect=0, function=0, time_overhead=0.0, memory_overhead=0.0) cs.seen_in_round(r0) CSF.create(cs=cs, cbns=[cbn_orig], team=t, available_round=r0, poll_feedback=pf_r0) # make sure we don't submit on round 0 assert_is_none( scriba.submitters.cb.CBSubmitter.patch_decision_simple(cs, r0)) # tick the round r1 = Round.create(num=1) pf_r1 = PF.create(cs=cs, round=r1, success=1.0, timeout=0, connect=0, function=0, time_overhead=0.0, memory_overhead=0.0) cs.seen_in_round(r1) CSF.create(cs=cs, cbns=[cbn_orig], team=t, available_round=r1, poll_feedback=pf_r1) # make sure we don't submit on round 0 assert_is_none( scriba.submitters.cb.CBSubmitter.patch_decision_simple(cs, r1)) # tick the round r2 = Round.create(num=2) pf_r2 = PF.create(cs=cs, round=r2, success=1.0, timeout=0, connect=0, function=0, time_overhead=0.0, memory_overhead=0.0) cs.seen_in_round(r2) CSF.create(cs=cs, cbns=[cbn_orig], team=t, available_round=r2, poll_feedback=pf_r2) # Make sure we properly handle the case when there are no patches assert_is_none( scriba.submitters.cb.CBSubmitter.patch_decision_simple(cs, r0)) # And patch it, without feedback pt1 = PT.create(name="a_patch", functionality_risk=0., exploitability=0.4) cbn_p1 = CBN.create(cs=cs, name="patch1", blob="XXXYZ", patch_type=pt1) pt2 = PT.create(name="b_patch", functionality_risk=0., exploitability=0.3) cbn_p2 = CBN.create(cs=cs, name="patch2", blob="XXXZZ", patch_type=pt2) # Make sure we grab the "best" patch assert_items_equal( scriba.submitters.cb.CBSubmitter.patch_decision_simple(cs, r2), [cbn_p2]) # emulate the ambassador submitting this f****r csf0s = CSF.create(cs=cs, cbns=[cbn_p2], team=t, submission_round=r2) # add a new, better patch pt3 = PT.create(name="c_patch", functionality_risk=0., exploitability=0.2) cbn_p3 = CBN.create(cs=cs, name="patch3", blob="XXXXZ", patch_type=pt3) # make sure we select the new patch, because it's still the right round assert_items_equal( scriba.submitters.cb.CBSubmitter.patch_decision_simple(cs, r2), [cbn_p3]) csf0s.cbns = [cbn_p3] csf0s.save() # make sure we are now happy assert_is_none( scriba.submitters.cb.CBSubmitter.patch_decision_simple(cs, r2)) # down a round r3 = Round.create(num=3) pf_r3 = PF.create(cs=cs, round=r3, success=0, timeout=0, connect=0, function=0, time_overhead=0.0, memory_overhead=0.0) cs.seen_in_round(r3) CSF.create(cs=cs, cbns=[cbn_p3], team=t, available_round=r3, poll_feedback=pf_r3) # tick the round r4 = Round.create(num=4) pf_r4 = PF.create(cs=cs, round=r4, success=1.0, timeout=0, connect=0, function=0, time_overhead=0.05, memory_overhead=0.05) cs.seen_in_round(r4) CSF.create(cs=cs, cbns=[cbn_p3], team=t, available_round=r4, poll_feedback=pf_r4) # make sure we don't choose to change the selection assert_is_none( scriba.submitters.cb.CBSubmitter.patch_decision_simple(cs, r4)) # now find a better patch pt4 = PT.create(name="d_patch", functionality_risk=0., exploitability=0.1) cbn_p4 = CBN.create(cs=cs, name="patch4", blob="XXXYX", patch_type=pt4) # too late, man assert_is_none( scriba.submitters.cb.CBSubmitter.patch_decision_simple(cs, r4)) # now we get the baaaad news r5 = Round.create(num=5) pf_r5 = PF.create(cs=cs, round=r5, success=0.8, timeout=0, connect=0, function=0.2, time_overhead=0.05, memory_overhead=0.05) cs.seen_in_round(r5) CSF.create(cs=cs, cbns=[cbn_p3], team=t, available_round=r5, poll_feedback=pf_r5) # Make sure we properly roll back assert_items_equal( scriba.submitters.cb.CBSubmitter.patch_decision_simple(cs, r5), cs.cbns_original) CSF.create(cs=cs, cbns=cs.cbns_original, team=t, submission_round=r5) # down a round r6 = Round.create(num=6) pf_r6 = PF.create(cs=cs, round=r4, success=0, timeout=0, connect=0, function=0, time_overhead=0.0, memory_overhead=0.0) cs.seen_in_round(r6) CSF.create(cs=cs, cbns=[cbn_orig], team=t, available_round=r6, poll_feedback=pf_r6) # that worked r7 = Round.create(num=7) pf_r7 = PF.create(cs=cs, round=r7, success=1.0, timeout=0, connect=0, function=0, time_overhead=0.0, memory_overhead=0.0) cs.seen_in_round(r7) CSF.create(cs=cs, cbns=[cbn_orig], team=t, available_round=r7, poll_feedback=pf_r7) # make sure we're happy staying unpatched assert_is_none( scriba.submitters.cb.CBSubmitter.patch_decision_simple(cs, r7))