Beispiel #1
0
def do_test(errors, expected_errors_filename):
    if should_record_tests():
        save_report(errors, expected_errors_filename)
        return
    else:
        patterns = utils.load_json_from_path(expected_errors_filename)
        check_results(errors, patterns)
Beispiel #2
0
def get_buck_stats():
    trace_filename = os.path.join(
        DEFAULT_BUCK_OUT,
        'log',
        'traces',
        'build.trace'
    )
    ARGS = 'args'
    SUCCESS_STATUS = 'success_type'
    buck_stats = {}
    try:
        trace = utils.load_json_from_path(trace_filename)
        for t in trace:
            if SUCCESS_STATUS in t[ARGS]:
                status = t[ARGS][SUCCESS_STATUS]
                count = buck_stats.get(status, 0)
                buck_stats[status] = count + 1

        buck_stats_message = 'Buck build statistics:\n\n'
        for key, value in sorted(buck_stats.items()):
            buck_stats_message += '  {0:>8}  {1}\n'.format(value, key)

        return buck_stats_message
    except IOError as e:
        logging.error('Caught %s: %s' % (e.__class__.__name__, str(e)))
        logging.error(traceback.format_exc())
        return ''
def run_analysis(root, clean_cmds, build_cmds, analyzer, env=None):
    if not os.path.exists(root):
        os.makedirs(root)
    os.chdir(root)

    for clean_cmd in clean_cmds:
        subprocess.check_call(clean_cmd, env=env)

    for build_cmd in build_cmds:
        temp_out_dir = tempfile.mkdtemp(suffix='_out', prefix='infer_')
        infer_cmd = (['infer', '-a', analyzer, '-o', temp_out_dir, '--'] +
                     build_cmd)
        # Only record the output of the last build command. We record
        # all of them but each command overwrites the output of the
        # previous one.
        with tempfile.TemporaryFile(mode='w',
                                    suffix='.out',
                                    prefix='analysis_') as analysis_output:
            subprocess.check_call(infer_cmd, stdout=analysis_output, env=env)

    json_path = os.path.join(temp_out_dir, REPORT_JSON)
    found_errors = utils.load_json_from_path(json_path)
    shutil.rmtree(temp_out_dir)
    os.chdir(SCRIPT_DIR)

    return found_errors
def do_test(errors, expected_errors_filename):
    if should_record_tests():
        save_report(errors, expected_errors_filename)
        return
    else:
        patterns = utils.load_json_from_path(expected_errors_filename)
        check_results(errors, patterns)
Beispiel #5
0
def get_buck_stats():
    trace_filename = os.path.join(
        DEFAULT_BUCK_OUT,
        'log',
        'traces',
        'build.trace'
    )
    ARGS = 'args'
    SUCCESS_STATUS = 'success_type'
    buck_stats = {}
    try:
        trace = utils.load_json_from_path(trace_filename)
        for t in trace:
            if SUCCESS_STATUS in t[ARGS]:
                status = t[ARGS][SUCCESS_STATUS]
                count = buck_stats.get(status, 0)
                buck_stats[status] = count + 1

        buck_stats_message = 'Buck build statistics:\n\n'
        for key, value in sorted(buck_stats.items()):
            buck_stats_message += '  {0:>8}  {1}\n'.format(value, key)

        return buck_stats_message
    except IOError as e:
        logging.error('Caught %s: %s' % (e.__class__.__name__, str(e)))
        logging.error(traceback.format_exc())
        return ''
def run_analysis(root, clean_cmds, build_cmds, analyzer, env=None):
    if not os.path.exists(root):
        os.makedirs(root)
    os.chdir(root)

    for clean_cmd in clean_cmds:
        subprocess.check_call(clean_cmd, env=env)

    for build_cmd in build_cmds:
        temp_out_dir = tempfile.mkdtemp(suffix='_out', prefix='infer_')
        infer_cmd = (['infer', '-a', analyzer, '-o', temp_out_dir, '--'] +
                     build_cmd)
        # Only record the output of the last build command. We record
        # all of them but each command overwrites the output of the
        # previous one.
        with tempfile.TemporaryFile(
                mode='w',
                suffix='.out',
                prefix='analysis_') as analysis_output:
            subprocess.check_call(infer_cmd, stdout=analysis_output, env=env)

    json_path = os.path.join(temp_out_dir, REPORT_JSON)
    found_errors = utils.load_json_from_path(json_path)
    shutil.rmtree(temp_out_dir)
    os.chdir(SCRIPT_DIR)

    return found_errors
def run_analysis(clean_cmds, build_cmds, env=None):
    for clean_cmd in clean_cmds:
        subprocess.check_call(clean_cmd, env=env)

    temp_out_dir = tempfile.mkdtemp(suffix='_out', prefix='infer_')
    for build_cmd in build_cmds:
        extra_args = (build_cmd['infer_args']
                      if 'infer_args' in build_cmd
                      else [])
        infer_cmd = (['infer', '-o', temp_out_dir] +
                     extra_args +
                     ['--'] +
                     build_cmd['compile'])
        print(str(infer_cmd))
        # Only record the output of the last build command. We record
        # all of them but each command overwrites the output of the
        # previous one.
        with tempfile.TemporaryFile(
                mode='w',
                suffix='.out',
                prefix='analysis_') as analysis_output:
            subprocess.check_call(infer_cmd, stdout=analysis_output, env=env)

    json_path = os.path.join(temp_out_dir, REPORT_JSON)
    found_errors = utils.load_json_from_path(json_path)
    shutil.rmtree(temp_out_dir)
    os.chdir(SCRIPT_DIR)

    return found_errors
def run_analysis(clean_cmds, build_cmds, extra_check, env=None):
    for clean_cmd in clean_cmds:
        subprocess.check_call(clean_cmd, env=env)

    temp_out_dir = tempfile.mkdtemp(suffix='_out', prefix='infer_')
    for build_cmd in build_cmds:
        extra_args = (build_cmd['infer_args']
                      if 'infer_args' in build_cmd else [])
        infer_cmd = (['infer', '-o', temp_out_dir] + extra_args + ['--'] +
                     build_cmd['compile'])
        # Only record the output of the last build command. We record
        # all of them but each command overwrites the output of the
        # previous one.
        with tempfile.TemporaryFile(mode='w',
                                    suffix='.out',
                                    prefix='analysis_') as analysis_output:
            subprocess.check_call(infer_cmd, stdout=analysis_output, env=env)

    json_path = os.path.join(temp_out_dir, REPORT_JSON)
    found_errors = utils.load_json_from_path(json_path)
    extra_check(temp_out_dir)
    shutil.rmtree(temp_out_dir)
    os.chdir(SCRIPT_DIR)

    return found_errors
Beispiel #9
0
def run_analysis(root, clean_cmd, build_cmd, analyzer):
    os.chdir(root)

    subprocess.check_call(clean_cmd)

    temp_out_dir = tempfile.mkdtemp(suffix='_out', prefix='infer_')
    infer_cmd = ['infer', '-a', analyzer, '-o', temp_out_dir, '--'] + build_cmd

    with tempfile.TemporaryFile(mode='w', suffix='.out',
                                prefix='analysis_') as analysis_output:
        subprocess.check_call(infer_cmd, stdout=analysis_output)

    json_path = os.path.join(temp_out_dir, REPORT_JSON)
    found_errors = utils.load_json_from_path(json_path)
    shutil.rmtree(temp_out_dir)
    os.chdir(CURRENT_DIR)

    return found_errors
def run_analysis(root, clean_cmd, build_cmd, analyzer):
    os.chdir(root)

    subprocess.check_call(clean_cmd)

    temp_out_dir = tempfile.mkdtemp(suffix='_out', prefix='infer_')
    infer_cmd = ['infer', '-a', analyzer, '-o', temp_out_dir, '--'] + build_cmd

    with tempfile.TemporaryFile(
            mode='w',
            suffix='.out',
            prefix='analysis_') as analysis_output:
        subprocess.check_call(infer_cmd, stdout=analysis_output)

    json_path = os.path.join(temp_out_dir, REPORT_JSON)
    found_errors = utils.load_json_from_path(json_path)
    shutil.rmtree(temp_out_dir)
    os.chdir(CURRENT_DIR)

    return found_errors
        # Set this to True to create an issues.exp file using the
        # results of the test. This is a temporary hack to aid
        # migrating the tests from this file to Makefiles. It can be
        # useful to compare the result of your migrated test with the
        # issues.exp that this gives you.
        if False:
            inferprint_cmd = ([
                INFERPRINT_BIN, '-q', '--issues-tests', 'issues.exp',
                '--from-json-report',
                os.path.join(temp_out_dir, 'report.json')
            ] + extra_args)
            subprocess.check_call(inferprint_cmd, env=env)

    json_path = os.path.join(temp_out_dir, REPORT_JSON)
    found_errors = utils.load_json_from_path(json_path)
    extra_check(temp_out_dir)
    shutil.rmtree(temp_out_dir)
    os.chdir(SCRIPT_DIR)

    return found_errors


def match_pattern(f, p):
    for key in p.keys():
        if f[key] != p[key]:
            return False
    return True


def is_expected(e, patterns):
Beispiel #12
0
                mode='w',
                suffix='.out',
                prefix='analysis_') as analysis_output:
            try:
                subprocess.check_call(infer_cmd,
                                      stdout=analysis_output, env=env)
                if should_fail is not None:
                    # hacky since we should clean up infer-out, etc. as below
                    # if you made the test fails, this is your punishment
                    assert False
            except subprocess.CalledProcessError, exn:
                if exn.returncode != should_fail:
                    raise

    json_path = os.path.join(temp_out_dir, REPORT_JSON)
    found_errors = utils.load_json_from_path(json_path)
    extra_check(temp_out_dir)
    shutil.rmtree(temp_out_dir)
    os.chdir(SCRIPT_DIR)

    return found_errors


def match_pattern(f, p):
    for key in p.keys():
        if f[key] != p[key]:
            return False
    return True


def is_expected(e, patterns):
Beispiel #13
0
def main():
    toplevel_envvar_value = os.environ.get(TOP_LEVEL_ENVVAR, None)
    is_toplevel_instance = False
    if toplevel_envvar_value is None:
        os.environ[TOP_LEVEL_ENVVAR] = '1'
        is_toplevel_instance = True

    to_parse, cmd = split_args_to_parse()
    # get the module name (if any), then load it
    capture_module_name = os.path.basename(cmd[0]) if len(cmd) > 0 else None
    mod_name = get_module_name(capture_module_name)
    imported_module = None
    if mod_name:
        # There is module that supports the command
        imported_module = load_module(mod_name)

    # get the module's argparser and merge it with the global argparser
    module_argparser = []
    if imported_module:
        module_argparser.append(
            imported_module.create_argparser(capture_module_name)
        )
    global_argparser = create_argparser(module_argparser)

    args = global_argparser.parse_args(to_parse)

    validate_args(imported_module, args)

    remove_infer_out = (imported_module is not None
                        and not args.reactive
                        and capture_module_name != 'analyze'
                        and not args.buck)
    if remove_infer_out:
        analyze.remove_infer_out(args.infer_out)

    if imported_module is not None:
        analyze.create_results_dir(args.infer_out)
        analyze.reset_start_file(args.infer_out,
                                 touch_if_present=not args.continue_capture)

        utils.configure_logging(args)
        logging.info('output of locale.getdefaultlocale(): %s',
                     str(locale.getdefaultlocale()))
        logging.info('encoding we chose in the end: %s',
                     config.CODESET)
        logging.info('Running command %s',
                     ' '.join(map(utils.decode, sys.argv)))
        logging.info('Path to infer script %s (%s)', utils.decode(__file__),
                     os.path.realpath(utils.decode(__file__)))
        logging.info(analyze.get_infer_version())
        logging.info('Platform: %s', utils.decode(platform.platform()))

        def log_getenv(k):
            v = os.getenv(k)
            if v is not None:
                v = utils.decode(v)
            else:
                v = '<NOT SET>'
            logging.info('%s=%s', k, v)

        log_getenv('PATH')
        log_getenv('SHELL')
        log_getenv('PWD')

        capture_exitcode = imported_module.gen_instance(args, cmd).capture()
        if capture_exitcode != os.EX_OK:
            logging.error('Error during capture phase, exiting')
            exit(capture_exitcode)
        logging.info('Capture phase was successful')
    elif capture_module_name is not None:
        # There was a command, but it's not supported
        utils.stdout('Command "{cmd}" not recognised'
                     .format(cmd='' if capture_module_name is None
                             else capture_module_name))
        global_argparser.print_help()
        sys.exit(1)
    else:
        global_argparser.print_help()
        sys.exit(os.EX_OK)

    if not (mod_name == 'buck' or mod_name == 'javac'):
        # Something should be already captured, otherwise analysis would fail
        if not os.path.exists(os.path.join(args.infer_out, 'captured')):
            print('There was nothing to analyze, exiting')
            exit(os.EX_USAGE)
        analysis = analyze.AnalyzerWrapper(args)
        analysis.analyze_and_report()
        analysis.save_stats()

    if is_toplevel_instance is True:
        buck_out_for_stats_aggregator = None
        if (mod_name == 'buck' and
            os.path.isfile(
                os.path.join(args.infer_out,
                             config.INFER_BUCK_DEPS_FILENAME))):
            buck_out_for_stats_aggregator = 'buck-out'
        logging.info('Aggregating stats')
        output = utils.run_infer_stats_aggregator(
            args.infer_out, buck_out_for_stats_aggregator)
        logging.info(output)

    if args.fail_on_bug:
        bugs_filename = os.path.join(args.infer_out,
                                     config.JSON_REPORT_FILENAME)
        try:
            bugs = utils.load_json_from_path(bugs_filename)
            if len(bugs) > 0:
                sys.exit(config.BUG_FOUND_ERROR_CODE)
        except OSError:
            pass
Beispiel #14
0
def main():
    toplevel_envvar_value = os.environ.get(TOP_LEVEL_ENVVAR, None)
    is_toplevel_instance = False
    if toplevel_envvar_value is None:
        os.environ[TOP_LEVEL_ENVVAR] = '1'
        is_toplevel_instance = True

    to_parse, cmd = split_args_to_parse()
    # get the module name (if any), then load it
    capture_module_name = os.path.basename(cmd[0]) if len(cmd) > 0 else None
    mod_name = get_module_name(capture_module_name)
    imported_module = None
    if mod_name:
        # There is module that supports the command
        imported_module = load_module(mod_name)

    # get the module's argparser and merge it with the global argparser
    module_argparser = []
    if imported_module:
        module_argparser.append(
            imported_module.create_argparser(capture_module_name))
    global_argparser = create_argparser(module_argparser)

    args = global_argparser.parse_args(to_parse)

    validate_args(imported_module, args)

    remove_infer_out = (imported_module is not None and not args.reactive
                        and capture_module_name != 'analyze' and not args.buck)
    if remove_infer_out:
        analyze.remove_infer_out(args.infer_out)

    if imported_module is not None:
        analyze.create_results_dir(args.infer_out)
        analyze.reset_start_file(args.infer_out,
                                 touch_if_present=not args.continue_capture)

        utils.configure_logging(args)
        logging.info('output of locale.getdefaultlocale(): %s',
                     str(locale.getdefaultlocale()))
        logging.info('encoding we chose in the end: %s', config.CODESET)
        logging.info('Running command %s',
                     ' '.join(map(utils.decode, sys.argv)))
        logging.info('Path to infer script %s (%s)', utils.decode(__file__),
                     os.path.realpath(utils.decode(__file__)))
        logging.info(analyze.get_infer_version())
        logging.info('Platform: %s', utils.decode(platform.platform()))

        def log_getenv(k):
            v = os.getenv(k)
            if v is not None:
                v = utils.decode(v)
            else:
                v = '<NOT SET>'
            logging.info('%s=%s', k, v)

        log_getenv('PATH')
        log_getenv('SHELL')
        log_getenv('PWD')

        capture_exitcode = imported_module.gen_instance(args, cmd).capture()
        if capture_exitcode != os.EX_OK:
            logging.error('Error during capture phase, exiting')
            exit(capture_exitcode)
        logging.info('Capture phase was successful')
    elif capture_module_name is not None:
        # There was a command, but it's not supported
        utils.stdout('Command "{cmd}" not recognised'.format(
            cmd='' if capture_module_name is None else capture_module_name))
        global_argparser.print_help()
        sys.exit(1)
    else:
        global_argparser.print_help()
        sys.exit(os.EX_OK)

    if not (mod_name == 'buck' or mod_name == 'javac'):
        # Something should be already captured, otherwise analysis would fail
        if not os.path.exists(os.path.join(args.infer_out, 'captured')):
            print('There was nothing to analyze, exiting')
            exit(os.EX_USAGE)
        analysis = analyze.AnalyzerWrapper(args)
        analysis.analyze_and_report()
        analysis.save_stats()

    if is_toplevel_instance is True:
        buck_out_for_stats_aggregator = None
        if (mod_name == 'buck' and os.path.isfile(
                os.path.join(args.infer_out,
                             config.INFER_BUCK_DEPS_FILENAME))):
            buck_out_for_stats_aggregator = 'buck-out'
        logging.info('Aggregating stats')
        output = utils.run_infer_stats_aggregator(
            args.infer_out, buck_out_for_stats_aggregator)
        logging.info(output)

    if args.fail_on_bug:
        bugs_filename = os.path.join(args.infer_out,
                                     config.JSON_REPORT_FILENAME)
        try:
            bugs = utils.load_json_from_path(bugs_filename)
            if len(bugs) > 0:
                sys.exit(config.BUG_FOUND_ERROR_CODE)
        except OSError:
            pass