Esempio n. 1
0
def check():
    """Check security issues according to config and pass results to next tools."""
    
    overall_report = dict()

    # source code analysis
    # ====================
    # currently empty
    
    # compile
    # =======
    ret_makefile = subprocess.run([config.compiler] + config.compiler_args, # command
                                  stdout=subprocess.PIPE, # capture stdout
                                  stderr=subprocess.PIPE, # capture stderr
                                  universal_newlines=True) # use text mode for std* file objects
    overall_report['makefile'] = ret_makefile
    
    # runtime analysis
    # ================
    with open('compile.txt', 'r') as f:
        if 'error' not in f.read().lower(): # if compilation succeeded
            overall_report, test_case_report_list = runtime_analysis(config, overall_report)
            
    # pass this info to next tools for subsequent processing
    # ======================================================
    
    pp(overall_report)
    # results from runtime analysis
    if 'runtime_analysis_done' in overall_report:
        success_count = 0
        for report in test_case_report_list:
            if 'timeout' in report:
                util.addFinding("Time limit exceeded!", 0, "", "TEST_080006")
            elif report['return_code'] != 0:
                if report['stderr_stream'] != '': # ASan/LeakSan/Stack protector probably reported something
                    pass # but these findings will be added by analyze.py
                else:
                    util.addFinding("It seems your program might have crashed.", 0,"","TEST_100006")
            # output_match == None means the user might have tried to print to outfile
            elif report['stdout_stream'] != '' or report['output_match'] is None:
                util.addFinding("A test case failed! Make sure you are not trying to print something.",
                                0,"","TEST_100006")
            elif not all(report['output_match']): # not all test cases passed
                util.addFinding("A test case failed!", 0, "", "TEST_100006")
            else:
                success_count += 1

            with open('stderr.txt', 'a') as f:
                f.write(report['stderr_stream'])
            with open('stdout.txt', 'a') as f:
                f.write(report['outfile'])

        if success_count == len(test_case_report_list):
            util.addFinding("Program behaves as expected!", 1, "CHALLENGE_PASS", "TEST_900006")
            
    util.dumpFindings()
        
    # next tools
    subprocess.run(["./analyse.py"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    subprocess.run(["./ai.py"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
Esempio n. 2
0
                "TEST_900004")

okFail = 0
if runTestAndMatch("result = ", "X", "^0$", "stderr.txt"):
    okFail = 1
util.addFinding("Program is not behaving as expected", okFail, "",
                "TEST_900005")

okFail = 0
if runTestAndMatch("result = ", "X" * 90000, "^0$", "stderr.txt"):
    okFail = 1
util.addFinding("Reading STDIN not in synch with other reads", okFail,
                "INCREMENTAL_4_FUNC_1246832686_A_", "TEST_900006")

############################################
############################################
############################################
############################################
############################################
util.dumpFindings()
# run analysis
ret = subprocess.run(["./analyse.py"],
                     shell=True,
                     stdout=subprocess.PIPE,
                     stderr=subprocess.PIPE)
# run AI
ret = subprocess.run(["./ai.py"],
                     shell=True,
                     stdout=subprocess.PIPE,
                     stderr=subprocess.PIPE)
Esempio n. 3
0
def check():
    """Check security issues according to config and pass results to next tools."""

    overall_report = dict()

    # source code analysis
    # ====================
    # currently empty

    # compile
    # =======
    ret_makefile = subprocess.run(
        [config.compiler] + config.compiler_args,  # command
        stdout=subprocess.PIPE,  # capture stdout
        stderr=subprocess.PIPE,  # capture stderr
        universal_newlines=True)  # use text mode for std* file objects
    overall_report['makefile'] = ret_makefile

    # runtime analysis
    # ================
    with open('compile.txt', 'r') as f:
        if 'error' not in f.read().lower():  # if compilation succeeded
            overall_report, test_case_report_list = runtime_analysis(
                config, overall_report)
            overall_report = source_code_analysis(config, overall_report)
            pp(test_case_report_list)

    # pass this info to next tools for subsequent processing
    # ======================================================

    pp(overall_report)
    # results from runtime analysis
    if 'runtime_analysis_done' in overall_report:
        success_count = 0
        for report in test_case_report_list:
            if 'timeout' in report:
                util.addFinding("Time limit exceeded!", 0, "", "TEST_080006")
            elif report['return_code'] != 0:
                if report[
                        'stderr_stream'] != '':  # ASan/LeakSan/Stack protector probably reported something
                    pass  # but these findings will be added by analyze.py
                else:
                    util.addFinding(
                        "It seems your program might have crashed.", 0, "",
                        "TEST_100006")
            # output_match == None means the user might have tried to print to outfile
            elif report['stdout_stream'] != '' or report[
                    'output_match'] is None:
                util.addFinding(
                    "A test case failed! Make sure you are not trying to print something.",
                    0, "", "TEST_100006")
            elif isinstance(report['output_match'], str):
                if report[
                        'output_match'] == "all_failed_test_cases_had_single_element_array":
                    util.addFinding(
                        "A test case failed!", 0,
                        "MAX_GREATER_BY_K_IN_ARRAY__SINGLE_ELEMENT",
                        "TEST_100003")
            elif not all(report['output_match']):  # not all test cases passed
                priority = "TEST_100006"
                if 'source_code__if_semicolon' in overall_report:
                    tag = "INCREMENTAL_2_MAX_GREATER_BY_K_IN_ARRAY__IF_SEMICOLON_"
                elif 'source_code__arr_i_minux_max' in overall_report \
                or 'source_code__arr_i_minux_second_max' in overall_report:
                    tag = "INCREMENTAL_2_MAX_GREATER_BY_K_IN_ARRAY__UNSIGNED_OVERFLOW_"
                elif 'source_code__second_max_plus_k' in overall_report:
                    tag = "MAX_GREATER_BY_K_IN_ARRAY__UNSIGNED_OVERFLOW_LAST_IF_CONDITION"
                else:
                    tag = ""
                    priority = "TEST_100100"  # give low priority so that other findings which have tags can take over
                util.addFinding("A test case failed!", 0, tag, priority)
            else:
                success_count += 1

            with open('stderr.txt', 'a') as f:
                f.write(report['stderr_stream'])
            with open('stdout.txt', 'a') as f:
                f.write(report['outfile'])

        if success_count == len(test_case_report_list):
            util.addFinding("Program behaves as expected!", 1,
                            "CHALLENGE_PASS", "TEST_900006")

    util.dumpFindings()

    # next tools
    subprocess.run(["./analyse.py"],
                   stdout=subprocess.PIPE,
                   stderr=subprocess.PIPE)
    subprocess.run(["./ai.py"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
Esempio n. 4
0
            with open(outfile, "r") as f:
                current_outfile = f.read()
                report['outfile'] += current_outfile
                
            # check if test cases passed
            ret_output_match = config.check_for_output_match(current_outfile, test_suite)
            report['test_suite'] = test_suite
            report['output_match'] = ret_output_match
            
        except subprocess.TimeoutExpired:
            # kill the process group so that all child processes spawned by the process are also killed
            # The child need to be killed because, in addition to wasting CPU cycles,
            # it can hold stdout and then Python will wait indefinitely even if the timeout is expired
            os.killpg(os.getpgid(p.pid), signal.SIGKILL) 
            report['timeout'] = True
        finally:
            test_case_report_list.append(report)
        
    overall_report['runtime_analysis_done'] = True

    return overall_report, test_case_report_list


if __name__ == '__main__':
    try:
        check() # run checker
    except Exception as e:
        print("EXCEPTION IN CHECKER: " + str(e))
        util.dumpFindings();
    
Esempio n. 5
0
def check():
    """Check security issues according to config and pass results to next tools."""

    overall_report = dict()

    # source code analysis
    # ====================
    # currently empty

    # compile
    # =======
    ret_makefile = subprocess.run(
        [config.compiler] + config.compiler_args,  # command
        stdout=subprocess.PIPE,  # capture stdout
        stderr=subprocess.PIPE,  # capture stderr
        universal_newlines=True)  # use text mode for std* file objects
    overall_report['makefile'] = ret_makefile

    # runtime analysis
    # ================
    with open('compile.txt', 'r') as f:
        if 'error' not in f.read().lower():  # if compilation succeeded
            overall_report, test_case_report_list = runtime_analysis(
                config, overall_report)

    # pass this info to next tools for subsequent processing
    # ======================================================
    pp(overall_report)
    # results from runtime analysis
    identifier = None
    if 'runtime_analysis_done' in overall_report:
        success_count = 0
        success_test_case_reports = []
        for report in test_case_report_list:
            if 'timeout' in report:
                util.addFinding("Time limit exceeded!", 0, "", "TEST_080006")
            elif report['return_code'] != 0:
                if report[
                        'stderr_stream'] != '':  # ASan/LeakSan/Stack protector probably reported something
                    pass  # but these findings will be added by analyze.py
                else:
                    util.addFinding(
                        "It seems your program might have crashed.", 0, "",
                        "TEST_100006")
            # output_match == None means the user might have tried to print to outfile
            elif report['stdout_stream'] != '' or report[
                    'output_match'] is None:
                util.addFinding(
                    "A test case failed! Make sure you are not trying to print something.",
                    0, "", "TEST_100006")
            elif not all(report['output_match']):  # not all test cases passed
                util.addFinding("A test case failed!", 0, "", "TEST_100006")
            else:
                success_count += 1
                success_test_case_reports.append(report)

            with open('stderr.txt', 'a') as f:
                f.write(report['stderr_stream'])
            with open('stdout.txt', 'a') as f:
                f.write(report['outfile'])

        if success_count == len(test_case_report_list):
            util.addFinding("Program behaves as expected!", 1,
                            "CHALLENGE_PASS", "TEST_900006")
        else:
            # if all the test suites in which the function succeeded contain no n=0 test case,
            # then set identifier="all failed test suites contain n=0 test case".
            # Otherwise set identifier to None.
            # The point of the idenfitifer is to let feedback.py know that a
            # n=0 test case is the one likely causing failures

            success_test_cases = [tc for report in success_test_case_reports \
                                  for tc in report['test_suite']]
            identifier = "all failed test suites contain n=0 test case"
            for tc in success_test_cases:
                if tc['input'][0] == '0':
                    identifier = None
                    break

    util.dumpFindings()

    # next tools
    import analyse
    analyse.call_analyse(identifier)
    subprocess.run(["./ai.py"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)