def check(): """Check security issues according to config and pass results to next tools.""" overall_report = dict() # source code analysis # ==================== # currently empty # compile # ======= ret_makefile = subprocess.run([config.compiler] + config.compiler_args, # command stdout=subprocess.PIPE, # capture stdout stderr=subprocess.PIPE, # capture stderr universal_newlines=True) # use text mode for std* file objects overall_report['makefile'] = ret_makefile # runtime analysis # ================ with open('compile.txt', 'r') as f: if 'error' not in f.read().lower(): # if compilation succeeded overall_report, test_case_report_list = runtime_analysis(config, overall_report) # pass this info to next tools for subsequent processing # ====================================================== pp(overall_report) # results from runtime analysis if 'runtime_analysis_done' in overall_report: success_count = 0 for report in test_case_report_list: if 'timeout' in report: util.addFinding("Time limit exceeded!", 0, "", "TEST_080006") elif report['return_code'] != 0: if report['stderr_stream'] != '': # ASan/LeakSan/Stack protector probably reported something pass # but these findings will be added by analyze.py else: util.addFinding("It seems your program might have crashed.", 0,"","TEST_100006") # output_match == None means the user might have tried to print to outfile elif report['stdout_stream'] != '' or report['output_match'] is None: util.addFinding("A test case failed! Make sure you are not trying to print something.", 0,"","TEST_100006") elif not all(report['output_match']): # not all test cases passed util.addFinding("A test case failed!", 0, "", "TEST_100006") else: success_count += 1 with open('stderr.txt', 'a') as f: f.write(report['stderr_stream']) with open('stdout.txt', 'a') as f: f.write(report['outfile']) if success_count == len(test_case_report_list): util.addFinding("Program behaves as expected!", 1, "CHALLENGE_PASS", "TEST_900006") util.dumpFindings() # next tools subprocess.run(["./analyse.py"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) subprocess.run(["./ai.py"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# make inject ret = subprocess.run(["make inject"], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # make main ret = subprocess.run(["make main"], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout = ret.stdout.decode("utf-8") okFail = 1 if runTestAndMatch("result = ", "n", "^ERROR: gets was used!$", "stderr.txt"): okFail = 0 util.addFinding("Usage of dangerous function", okFail, "INCREMENTAL_2_FUNC_1246832686_B_", "TEST_100000") okFail = 1 if runTestAndMatch("result = ", "y", "^ERROR: gets was used!$", "stderr.txt"): okFail = 0 util.addFinding("Usage of dangerous function", okFail, "INCREMENTAL_2_FUNC_1246832686_B_", "TEST_100000") okFail = 0 if runTestAndMatch("result = ", "y", "^1$", "stderr.txt"): okFail = 1 util.addFinding("Program is not behaving as expected", okFail, "", "TEST_900001") okFail = 0 if runTestAndMatch("result = ", "Y", "^1$", "stderr.txt"):
def check(): """Check security issues according to config and pass results to next tools.""" overall_report = dict() # source code analysis # ==================== # currently empty # compile # ======= ret_makefile = subprocess.run( [config.compiler] + config.compiler_args, # command stdout=subprocess.PIPE, # capture stdout stderr=subprocess.PIPE, # capture stderr universal_newlines=True) # use text mode for std* file objects overall_report['makefile'] = ret_makefile # runtime analysis # ================ with open('compile.txt', 'r') as f: if 'error' not in f.read().lower(): # if compilation succeeded overall_report, test_case_report_list = runtime_analysis( config, overall_report) overall_report = source_code_analysis(config, overall_report) pp(test_case_report_list) # pass this info to next tools for subsequent processing # ====================================================== pp(overall_report) # results from runtime analysis if 'runtime_analysis_done' in overall_report: success_count = 0 for report in test_case_report_list: if 'timeout' in report: util.addFinding("Time limit exceeded!", 0, "", "TEST_080006") elif report['return_code'] != 0: if report[ 'stderr_stream'] != '': # ASan/LeakSan/Stack protector probably reported something pass # but these findings will be added by analyze.py else: util.addFinding( "It seems your program might have crashed.", 0, "", "TEST_100006") # output_match == None means the user might have tried to print to outfile elif report['stdout_stream'] != '' or report[ 'output_match'] is None: util.addFinding( "A test case failed! Make sure you are not trying to print something.", 0, "", "TEST_100006") elif isinstance(report['output_match'], str): if report[ 'output_match'] == "all_failed_test_cases_had_single_element_array": util.addFinding( "A test case failed!", 0, "MAX_GREATER_BY_K_IN_ARRAY__SINGLE_ELEMENT", "TEST_100003") elif not all(report['output_match']): # not all test cases passed priority = "TEST_100006" if 'source_code__if_semicolon' in overall_report: tag = "INCREMENTAL_2_MAX_GREATER_BY_K_IN_ARRAY__IF_SEMICOLON_" elif 'source_code__arr_i_minux_max' in overall_report \ or 'source_code__arr_i_minux_second_max' in overall_report: tag = "INCREMENTAL_2_MAX_GREATER_BY_K_IN_ARRAY__UNSIGNED_OVERFLOW_" elif 'source_code__second_max_plus_k' in overall_report: tag = "MAX_GREATER_BY_K_IN_ARRAY__UNSIGNED_OVERFLOW_LAST_IF_CONDITION" else: tag = "" priority = "TEST_100100" # give low priority so that other findings which have tags can take over util.addFinding("A test case failed!", 0, tag, priority) else: success_count += 1 with open('stderr.txt', 'a') as f: f.write(report['stderr_stream']) with open('stdout.txt', 'a') as f: f.write(report['outfile']) if success_count == len(test_case_report_list): util.addFinding("Program behaves as expected!", 1, "CHALLENGE_PASS", "TEST_900006") util.dumpFindings() # next tools subprocess.run(["./analyse.py"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) subprocess.run(["./ai.py"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def check(): """Check security issues according to config and pass results to next tools.""" overall_report = dict() # source code analysis # ==================== # currently empty # compile # ======= ret_makefile = subprocess.run( [config.compiler] + config.compiler_args, # command stdout=subprocess.PIPE, # capture stdout stderr=subprocess.PIPE, # capture stderr universal_newlines=True) # use text mode for std* file objects overall_report['makefile'] = ret_makefile # runtime analysis # ================ with open('compile.txt', 'r') as f: if 'error' not in f.read().lower(): # if compilation succeeded overall_report, test_case_report_list = runtime_analysis( config, overall_report) # pass this info to next tools for subsequent processing # ====================================================== pp(overall_report) # results from runtime analysis identifier = None if 'runtime_analysis_done' in overall_report: success_count = 0 success_test_case_reports = [] for report in test_case_report_list: if 'timeout' in report: util.addFinding("Time limit exceeded!", 0, "", "TEST_080006") elif report['return_code'] != 0: if report[ 'stderr_stream'] != '': # ASan/LeakSan/Stack protector probably reported something pass # but these findings will be added by analyze.py else: util.addFinding( "It seems your program might have crashed.", 0, "", "TEST_100006") # output_match == None means the user might have tried to print to outfile elif report['stdout_stream'] != '' or report[ 'output_match'] is None: util.addFinding( "A test case failed! Make sure you are not trying to print something.", 0, "", "TEST_100006") elif not all(report['output_match']): # not all test cases passed util.addFinding("A test case failed!", 0, "", "TEST_100006") else: success_count += 1 success_test_case_reports.append(report) with open('stderr.txt', 'a') as f: f.write(report['stderr_stream']) with open('stdout.txt', 'a') as f: f.write(report['outfile']) if success_count == len(test_case_report_list): util.addFinding("Program behaves as expected!", 1, "CHALLENGE_PASS", "TEST_900006") else: # if all the test suites in which the function succeeded contain no n=0 test case, # then set identifier="all failed test suites contain n=0 test case". # Otherwise set identifier to None. # The point of the idenfitifer is to let feedback.py know that a # n=0 test case is the one likely causing failures success_test_cases = [tc for report in success_test_case_reports \ for tc in report['test_suite']] identifier = "all failed test suites contain n=0 test case" for tc in success_test_cases: if tc['input'][0] == '0': identifier = None break util.dumpFindings() # next tools import analyse analyse.call_analyse(identifier) subprocess.run(["./ai.py"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout = ret.stdout.decode("utf-8") ########################################################################################################################################## # EXP15-C. Do not place a semicolon on the same line as an if, for, or while statement # https://wiki.sei.cmu.edu/confluence/display/c/EXP15-C.+Do+not+place+a+semicolon+on+the+same+line+as+an+if%2C+for%2C+or+while+statement # Fix the bug in line "for (; ii<strlen(str1)-1; ii++); {" of utilities.c ########################################################################################################################################## # # Line to search for: # for (; ii<strlen(str1)-1; ii++); { nHits = util.searchSource("utilities.c.pp", "^\s*for.*;\s*{\s*$") if nHits > 0: util.addFinding("Program does not behave as expected", 0, "INCREMENTAL_2_FUNC_1362465447_", "TEST_100001") util.dumpFindings() # run analysis ret = subprocess.run(["./analyse.py"], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # run AI ret = subprocess.run(["./ai.py"], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) sys.exit(0) os.remove("pizzas.txt") shutil.copyfile("pizzas.ok.txt", "pizzas.txt")