Example #1
0
def main(args):
    """
    Get and print the version information from the version config
    file and Thrift API definition.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    output_format = args.output_format

    has_analyzer_version = False
    try:
        from libcodechecker.libhandlers import analyzer_version
        has_analyzer_version = True

        # Print analyzer version information.
        print("CodeChecker analyzer version:")
        analyzer_version.print_version(output_format)
    except Exception:
        pass

    try:
        from libcodechecker.libhandlers import webserver_version

        if has_analyzer_version:
            print()  # Print a new line to separate version information.

        # Print web server version information.
        print("CodeChecker web server version:")
        webserver_version.print_version(output_format)
    except Exception:
        pass
Example #2
0
def main(args):
    """
    List the analyzers' basic information supported by CodeChecker.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    context = generic_package_context.get_context()
    working, errored = \
        analyzer_types.check_supported_analyzers(
            analyzer_types.supported_analyzers,
            context)

    if args.dump_config:
        binary = context.analyzer_binaries.get(args.dump_config)

        if args.dump_config == 'clang-tidy':
            subprocess.call([binary, '-dump-config', '-checks=*'])
        elif args.dump_config == 'clangsa':
            # TODO: Not supported by ClangSA yet!
            LOG.warning("'--dump-config clangsa' is not supported yet.")

        return

    if args.output_format not in ['csv', 'json']:
        if 'details' not in args:
            header = ['Name']
        else:
            header = ['Name', 'Path', 'Version']
    else:
        if 'details' not in args:
            header = ['name']
        else:
            header = ['name', 'path', 'version_string']

    rows = []
    for analyzer in working:
        if 'details' not in args:
            rows.append([analyzer])
        else:
            binary = context.analyzer_binaries.get(analyzer)
            try:
                version = subprocess.check_output([binary, '--version'])
            except (subprocess.CalledProcessError, OSError):
                version = 'ERROR'

            rows.append([analyzer, binary, version])

    if 'all' in args:
        for analyzer, err_reason in errored:
            if 'details' not in args:
                rows.append([analyzer])
            else:
                rows.append([
                    analyzer,
                    context.analyzer_binaries.get(analyzer), err_reason
                ])

    if len(rows) > 0:
        print(output_formatters.twodim_to_str(args.output_format, header,
                                              rows))
Example #3
0
def main(args):
    """
    Get and print the version information from the version config
    file and Thrift API definition.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    print_version(args.output_format)
Example #4
0
def main(args):
    """
    Generates a build log by running the original build command.
    No analysis is done.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    args.logfile = os.path.realpath(args.logfile)
    if os.path.exists(args.logfile):
        os.remove(args.logfile)

    context = generic_package_context.get_context()
    build_manager.perform_build_command(args.logfile,
                                        args.command,
                                        context,
                                        silent='quiet' in args)
Example #5
0
def main(args):
    """
    Get and print the version information from the version config
    file and Thrift API definition.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    context = generic_package_context.get_context()

    server_versions = [
        '{0}.{1}'.format(major, minor)
        for major, minor in version.SUPPORTED_VERSIONS.items()
    ]

    if args.output_format != 'json':
        server_versions = ', '.join(server_versions)

    rows = [("Base package version", context.version),
            ("Package build date", context.package_build_date),
            ("Git commit ID (hash)", context.package_git_hash),
            ("Git tag information", context.package_git_tag),
            ("Configuration schema", str(context.product_db_version_info)),
            ("Database schema", str(context.run_db_version_info)),
            ("Server supported API (Thrift)", server_versions),
            ("Client API (Thrift)", version.CLIENT_API)]

    if args.output_format != "json":
        print(
            output_formatters.twodim_to_str(args.output_format,
                                            ["Kind", "Version"], rows))
    elif args.output_format == "json":
        # Use a special JSON format here, instead of
        # [ {"kind": "something", "version": "0.0.0"}, {"kind": "foo", ... } ]
        # do
        # { "something": "0.0.0", "foo": ... }
        print(json.dumps(dict(rows)))
Example #6
0
def main(args):
    """
    Execute a wrapper over log-analyze-parse, aka 'check'.
    """

    logger.setup_logger(args.verbose if 'verbose' in args else None)

    def __load_module(name):
        """Loads the given subcommand's definition from the libs."""
        try:
            module = libhandlers.load_module(name)
        except ImportError:
            LOG.error("Couldn't import subcommand '" + name + "'.")
            raise

        return module

    def __update_if_key_exists(source, target, key):
        """Append the source Namespace's element with 'key' to target with
        the same key, but only if it exists."""
        if key in source:
            setattr(target, key, getattr(source, key))

    # If no output directory is given then the checker results go to a
    # temporary directory. This way the subsequent "quick" checks don't pollute
    # the result list of a previous check. If the detection status function is
    # intended to be used (i.e. by keeping the .plist files) then the output
    # directory has to be provided explicitly.
    is_temp_output = False
    if 'output_dir' in args:
        output_dir = args.output_dir
    else:
        output_dir = tempfile.mkdtemp()
        is_temp_output = True

    output_dir = os.path.abspath(output_dir)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logfile = None
    is_command = False
    try:
        # --- Step 1.: Perform logging if build command was specified.
        if 'command' in args:
            logfile = tempfile.NamedTemporaryFile().name
            is_command = True

            # Translate the argument list between check and log.
            log_args = argparse.Namespace(command=args.command,
                                          logfile=logfile)
            __update_if_key_exists(args, log_args, 'quiet')
            __update_if_key_exists(args, log_args, 'verbose')

            log_module = __load_module('log')
            LOG.debug("Calling LOG with args:")
            LOG.debug(log_args)

            log_module.main(log_args)
        elif 'logfile' in args:
            logfile = args.logfile

        # --- Step 2.: Perform the analysis.
        if not os.path.exists(logfile):
            raise OSError("The specified logfile '" + logfile + "' does not "
                          "exist.")

        analyze_args = argparse.Namespace(logfile=[logfile],
                                          output_path=output_dir,
                                          output_format='plist',
                                          jobs=args.jobs)
        # Some arguments don't have default values.
        # We can't set these keys to None because it would result in an error
        # after the call.
        args_to_update = [
            'quiet',
            'skipfile',
            'analyzers',
            'add_compiler_defaults',
            'clangsa_args_cfg_file',
            'tidy_args_cfg_file',
            'tidy_config',
            'capture_analysis_output',
            'ctu_phases',
            'stats_output',
            'stats_dir',
            'stats_enabled',
            'stats_relevance_threshold',
            'stats_min_sample_count',
            'enable_all',
            'ordered_checkers',  # --enable and --disable.
            'timeout',
            'report_hash'
        ]
        for key in args_to_update:
            __update_if_key_exists(args, analyze_args, key)
        if 'force' in args or 'clean' in args:
            setattr(analyze_args, 'clean', True)
        __update_if_key_exists(args, analyze_args, 'verbose')

        analyze_module = __load_module('analyze')
        LOG.debug("Calling ANALYZE with args:")
        LOG.debug(analyze_args)

        analyze_module.main(analyze_args)

        # --- Step 3.: Print to stdout.
        parse_args = argparse.Namespace(input=[output_dir],
                                        input_format='plist')
        __update_if_key_exists(args, parse_args, 'print_steps')
        __update_if_key_exists(args, parse_args, 'verbose')
        __update_if_key_exists(args, parse_args, 'skipfile')

        parse_module = __load_module('parse')
        LOG.debug("Calling PARSE with args:")
        LOG.debug(parse_args)

        parse_module.main(parse_args)
    except ImportError:
        LOG.error("Check failed: couldn't import a library.")
    except Exception as ex:
        LOG.error("Running check failed. " + ex.message)
        import traceback
        traceback.print_exc()
    finally:
        if is_temp_output:
            shutil.rmtree(output_dir)
        if is_command:
            os.remove(logfile)

    LOG.debug("Check finished.")
Example #7
0
def main(args):
    """
    Perform analysis on the given logfiles and store the results in a machine-
    readable format.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    if len(args.logfile) != 1:
        LOG.warning("Only one log file can be processed right now!")
        sys.exit(1)

    args.output_path = os.path.abspath(args.output_path)
    if os.path.exists(args.output_path) and \
            not os.path.isdir(args.output_path):
        LOG.error("The given output path is not a directory: " +
                  args.output_path)
        sys.exit(1)

    if 'enable_all' in args:
        LOG.info("'--enable-all' was supplied for this analysis.")

    # We clear the output directory in the following cases.
    ctu_dir = os.path.join(args.output_path, 'ctu-dir')
    if 'ctu_phases' in args and args.ctu_phases[0] and \
            os.path.isdir(ctu_dir):
        # Clear the CTU-dir if the user turned on the collection phase.
        LOG.debug("Previous CTU contents have been deleted.")
        shutil.rmtree(ctu_dir)

    if 'clean' in args and os.path.isdir(args.output_path):
        LOG.info("Previous analysis results in '{0}' have been removed, "
                 "overwriting with current result".format(args.output_path))
        shutil.rmtree(args.output_path)

    if not os.path.exists(args.output_path):
        os.makedirs(args.output_path)

    LOG.debug("args: " + str(args))
    LOG.debug("Output will be stored to: '" + args.output_path + "'")

    # Parse the JSON CCDBs and retrieve the compile commands.
    actions = []
    for log_file in args.logfile:
        if not os.path.exists(log_file):
            LOG.error("The specified logfile '" + log_file + "' does not "
                      "exist!")
            continue

        parseLogOptions = ParseLogOptions(args)
        actions += log_parser.parse_log(log_file, parseLogOptions)
    if len(actions) == 0:
        LOG.info("None of the specified build log files contained "
                 "valid compilation commands. No analysis needed...")
        sys.exit(1)

    context = generic_package_context.get_context()
    metadata = {
        'action_num': len(actions),
        'command': sys.argv,
        'versions': {
            'codechecker':
            "{0} ({1})".format(context.package_git_tag,
                               context.package_git_hash)
        },
        'working_directory': os.getcwd(),
        'output_path': args.output_path,
        'result_source_files': {}
    }

    if 'name' in args:
        metadata['name'] = args.name

    # Update metadata dictionary with old values.
    metadata_file = os.path.join(args.output_path, 'metadata.json')
    if os.path.exists(metadata_file):
        with open(metadata_file, 'r') as data:
            metadata_prev = json.load(data)
            metadata['result_source_files'] =\
                metadata_prev['result_source_files']

    analyzer.perform_analysis(args, context, actions, metadata)

    LOG.debug("Analysis metadata write to '" + metadata_file + "'")
    with open(metadata_file, 'w') as metafile:
        json.dump(metadata, metafile)

    # WARN: store command will search for this file!!!!
    compile_cmd_json = os.path.join(args.output_path, 'compile_cmd.json')
    try:
        source = os.path.abspath(args.logfile[0])
        target = os.path.abspath(compile_cmd_json)

        if source != target:
            shutil.copyfile(source, target)
    except shutil.Error:
        LOG.debug("Compilation database JSON file is the same.")
    except Exception:
        LOG.debug("Copying compilation database JSON file failed.")
Example #8
0
def main(args):
    """
    Entry point for parsing some analysis results and printing them to the
    stdout in a human-readable format.
    """

    logger.setup_logger(args.verbose if 'verbose' in args else None)

    context = package_context.get_context()

    # To ensure the help message prints the default folder properly,
    # the 'default' for 'args.input' is a string, not a list.
    # But we need lists for the foreach here to work.
    if isinstance(args.input, str):
        args.input = [args.input]

    original_cwd = os.getcwd()

    suppr_handler = None
    if 'suppress' in args:
        __make_handler = False
        if not os.path.isfile(args.suppress):
            if 'create_suppress' in args:
                with open(args.suppress, 'w') as _:
                    # Just create the file.
                    __make_handler = True
                    LOG.info("Will write source-code suppressions to "
                             "suppress file.")
            else:
                LOG.warning("Suppress file '" + args.suppress + "' given, but "
                            "it does not exist -- will not suppress anything.")
        else:
            __make_handler = True

        if __make_handler:
            suppr_handler = suppress_handler.\
                GenericSuppressHandler(args.suppress,
                                       'create_suppress' in args)
    elif 'create_suppress' in args:
        LOG.error("Can't use '--export-source-suppress' unless '--suppress "
                  "SUPPRESS_FILE' is also given.")
        sys.exit(2)

    processed_path_hashes = set()

    def skip_html_report_data_handler(report_hash, source_file, report_line,
                                      checker_name, diag, files):
        """
        Report handler which skips bugs which were suppressed by source code
        comments.
        """
        report = Report(None, diag['path'], files)
        path_hash = get_report_path_hash(report, files)
        if path_hash in processed_path_hashes:
            LOG.debug("Skip report because it is a deduplication of an "
                      "already processed report!")
            LOG.debug("Path hash: %s", path_hash)
            LOG.debug(diag)
            return True

        skip = plist_parser.skip_report(report_hash, source_file, report_line,
                                        checker_name, suppr_handler)
        if not skip:
            processed_path_hashes.add(path_hash)

        return skip

    skip_handler = None
    if 'skipfile' in args:
        with open(args.skipfile, 'r') as skip_file:
            skip_handler = SkipListHandler(skip_file.read())

    html_builder = None

    for input_path in args.input:

        input_path = os.path.abspath(input_path)
        os.chdir(original_cwd)
        LOG.debug("Parsing input argument: '" + input_path + "'")

        export = args.export if 'export' in args else None
        if export is not None and export == 'html':
            output_path = os.path.abspath(args.output_path)

            if not html_builder:
                html_builder = \
                    PlistToHtml.HtmlBuilder(context.path_plist_to_html_dist,
                                            context.severity_map)

            LOG.info("Generating html output files:")
            PlistToHtml.parse(input_path, output_path,
                              context.path_plist_to_html_dist,
                              skip_html_report_data_handler, html_builder)
            continue

        files = []
        metadata_dict = {}
        if os.path.isfile(input_path):
            files.append(input_path)

        elif os.path.isdir(input_path):
            metadata_file = os.path.join(input_path, "metadata.json")
            if os.path.exists(metadata_file):
                metadata_dict = util.load_json_or_empty(metadata_file)
                LOG.debug(metadata_dict)

                if 'working_directory' in metadata_dict:
                    working_dir = metadata_dict['working_directory']
                    try:
                        os.chdir(working_dir)
                    except OSError as oerr:
                        LOG.debug(oerr)
                        LOG.error(
                            "Working directory %s is missing.\n"
                            "Can not parse reports safely.", working_dir)
                        sys.exit(1)

            _, _, file_names = next(os.walk(input_path), ([], [], []))
            files = [
                os.path.join(input_path, file_name) for file_name in file_names
            ]

        file_change = set()
        file_report_map = defaultdict(list)

        rh = plist_parser.PlistToPlaintextFormatter(suppr_handler,
                                                    skip_handler,
                                                    context.severity_map,
                                                    processed_path_hashes)
        rh.print_steps = 'print_steps' in args

        for file_path in files:
            f_change = parse(file_path, metadata_dict, rh, file_report_map)
            file_change = file_change.union(f_change)

        report_stats = rh.write(file_report_map)
        severity_stats = report_stats.get('severity')
        file_stats = report_stats.get('files')
        reports_stats = report_stats.get('reports')

        print("\n----==== Summary ====----")
        if file_stats:
            vals = [[os.path.basename(k), v]
                    for k, v in dict(file_stats).items()]
            keys = ['Filename', 'Report count']
            table = twodim_to_str('table', keys, vals, 1, True)
            print(table)

        if severity_stats:
            vals = [[k, v] for k, v in dict(severity_stats).items()]
            keys = ['Severity', 'Report count']
            table = twodim_to_str('table', keys, vals, 1, True)
            print(table)

        report_count = reports_stats.get("report_count", 0)
        print("----=================----")
        print("Total number of reports: {}".format(report_count))
        print("----=================----")

        if file_change:
            changed_files = '\n'.join([' - ' + f for f in file_change])
            LOG.warning("The following source file contents changed since the "
                        "latest analysis:\n{0}\nMultiple reports were not "
                        "shown and skipped from the statistics. Please "
                        "analyze your project again to update the "
                        "reports!".format(changed_files))

    os.chdir(original_cwd)

    # Create index.html for the generated html files.
    if html_builder:
        html_builder.create_index_html(args.output_path)
Example #9
0
def main(args):
    """
    Store the defect results in the specified input list as bug reports in the
    database.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    if not host_check.check_zlib():
        raise Exception("zlib is not available on the system!")

    # To ensure the help message prints the default folder properly,
    # the 'default' for 'args.input' is a string, not a list.
    # But we need lists for the foreach here to work.
    if isinstance(args.input, str):
        args.input = [args.input]

    if 'name' not in args:
        LOG.debug("Generating name for analysis...")
        generated = __get_run_name(args.input)
        if generated:
            setattr(args, 'name', generated)
        else:
            LOG.error("No suitable name was found in the inputs for the "
                      "analysis run. Please specify one by passing argument "
                      "--name run_name in the invocation.")
            sys.exit(2)  # argparse returns error code 2 for bad invocations.

    LOG.info("Storing analysis results for run '" + args.name + "'")

    if 'force' in args:
        LOG.info("argument --force was specified: the run with name '" +
                 args.name + "' will be deleted.")

    protocol, host, port, product_name = split_product_url(args.product_url)

    # Before any transmission happens, check if we have the PRODUCT_STORE
    # permission to prevent a possibly long ZIP operation only to get an
    # error later on.
    product_client = libclient.setup_product_client(protocol, host, port,
                                                    product_name)
    product_id = product_client.getCurrentProduct().id

    auth_client, _ = libclient.setup_auth_client(protocol, host, port)
    has_perm = libclient.check_permission(auth_client,
                                          Permission.PRODUCT_STORE,
                                          {'productID': product_id})
    if not has_perm:
        LOG.error("You are not authorised to store analysis results in "
                  "product '{0}'".format(product_name))
        sys.exit(1)

    # Setup connection to the remote server.
    client = libclient.setup_client(args.product_url, product_client=False)

    LOG.debug("Initializing client connecting to {0}:{1}/{2} done.".format(
        host, port, product_name))

    _, zip_file = tempfile.mkstemp('.zip')
    LOG.debug("Will write mass store ZIP to '{0}'...".format(zip_file))

    try:
        assemble_zip(args.input, zip_file, client)

        if os.stat(zip_file).st_size > MAX_UPLOAD_SIZE:
            LOG.error("The result list to upload is too big (max: {}).".format(
                sizeof_fmt(MAX_UPLOAD_SIZE)))
            sys.exit(1)

        with open(zip_file, 'rb') as zf:
            b64zip = base64.b64encode(zf.read())

        context = generic_package_context.get_context()

        trim_path_prefixes = args.trim_path_prefix if \
            'trim_path_prefix' in args else None

        client.massStoreRun(args.name, args.tag if 'tag' in args else None,
                            str(context.version), b64zip, 'force' in args,
                            trim_path_prefixes)

        LOG.info("Storage finished successfully.")
    except RequestFailed as reqfail:
        if reqfail.errorCode == ErrorCode.SOURCE_FILE:
            header = ['File', 'Line', 'Checker name']
            table = twodim_to_str('table', header,
                                  [c.split('|') for c in reqfail.extraInfo])
            LOG.warning("Setting the review statuses for some reports failed "
                        "because of non valid source code comments: "
                        "{0}\n {1}".format(reqfail.message, table))
        sys.exit(1)
    except Exception as ex:
        LOG.info("Storage failed: " + str(ex))
        sys.exit(1)
    finally:
        os.remove(zip_file)
Example #10
0
def main(args):
    """
    List the checkers available in the specified (or all supported) analyzers
    alongside with their description or enabled status in various formats.
    """

    logger.setup_logger(args.verbose if 'verbose' in args else None)

    # If nothing is set, list checkers for all supported analyzers.
    analyzers = args.analyzers \
        if 'analyzers' in args \
        else analyzer_types.supported_analyzers

    context = package_context.get_context()
    working, errored = analyzer_types.check_supported_analyzers(
        analyzers, context)

    analyzer_environment = analyzer_env.get_check_env(
        context.path_env_extra, context.ld_lib_path_extra)

    analyzer_config_map = analyzer_types.build_config_handlers(
        args, context, working)
    # List available checker profiles.
    if 'profile' in args and args.profile == 'list':
        if 'details' not in args:
            if args.output_format not in ['csv', 'json']:
                header = ['Profile name']
            else:
                header = ['profile_name']
        else:
            if args.output_format not in ['csv', 'json']:
                header = ['Profile name', 'Description']
            else:
                header = ['profile_name', 'description']

        rows = []
        for (profile, description) in context.available_profiles.items():
            if 'details' not in args:
                rows.append([profile])
            else:
                rows.append([profile, description])

        print(output_formatters.twodim_to_str(args.output_format, header,
                                              rows))
        return

    # Use good looking different headers based on format.
    if 'details' not in args:
        if args.output_format not in ['csv', 'json']:
            header = ['Name']
        else:
            header = ['name']
    else:
        if args.output_format not in ['csv', 'json']:
            header = ['', 'Name', 'Analyzer', 'Severity', 'Description']
        else:
            header = ['enabled', 'name', 'analyzer', 'severity', 'description']

    rows = []
    for analyzer in working:
        config_handler = analyzer_config_map.get(analyzer)
        analyzer_class = \
            analyzer_types.supported_analyzers[analyzer]

        checkers = analyzer_class.get_analyzer_checkers(
            config_handler, analyzer_environment)
        default_checker_cfg = context.checker_config.get(analyzer +
                                                         '_checkers')

        profile_checkers = None
        if 'profile' in args:
            if args.profile not in context.available_profiles:
                LOG.error("Checker profile '" + args.profile +
                          "' does not exist!")
                LOG.error("To list available profiles, use '--profile list'.")
                return

            profile_checkers = [(args.profile, True)]

        config_handler.initialize_checkers(context.available_profiles,
                                           context.package_root, checkers,
                                           default_checker_cfg,
                                           profile_checkers)

        for checker_name, value in config_handler.checks().items():
            enabled, description = value

            if not enabled and 'profile' in args:
                continue

            if enabled and 'only_disabled' in args:
                continue
            elif not enabled and 'only_enabled' in args:
                continue

            if args.output_format != 'json':
                enabled = '+' if enabled else '-'

            if 'details' not in args:
                rows.append([checker_name])
            else:
                severity = context.severity_map.get(checker_name)
                rows.append(
                    [enabled, checker_name, analyzer, severity, description])

    if len(rows) > 0:
        print(output_formatters.twodim_to_str(args.output_format, header,
                                              rows))

    for analyzer_binary, reason in errored:
        LOG.error("Failed to get checkers for '" + analyzer_binary +
                  "'! The error reason was: '" + reason + "'")
        LOG.error("Please check your installation and the "
                  "'config/package_layout.json' file!")
Example #11
0
def main(args):
    """
    Entry point for parsing some analysis results and printing them to the
    stdout in a human-readable format.
    """

    logger.setup_logger(args.verbose if 'verbose' in args else None)

    context = generic_package_context.get_context()

    # To ensure the help message prints the default folder properly,
    # the 'default' for 'args.input' is a string, not a list.
    # But we need lists for the foreach here to work.
    if isinstance(args.input, str):
        args.input = [args.input]

    original_cwd = os.getcwd()

    suppress_handler = None
    if 'suppress' in args:
        __make_handler = False
        if not os.path.isfile(args.suppress):
            if 'create_suppress' in args:
                with open(args.suppress, 'w') as _:
                    # Just create the file.
                    __make_handler = True
                    LOG.info("Will write source-code suppressions to "
                             "suppress file.")
            else:
                LOG.warning("Suppress file '" + args.suppress + "' given, but "
                            "it does not exist -- will not suppress anything.")
        else:
            __make_handler = True

        if __make_handler:
            suppress_handler = generic_package_suppress_handler.\
                GenericSuppressHandler(args.suppress,
                                       'create_suppress' in args)
    elif 'create_suppress' in args:
        LOG.error("Can't use '--export-source-suppress' unless '--suppress "
                  "SUPPRESS_FILE' is also given.")
        sys.exit(2)

    skip_handler = None
    if 'skipfile' in args:
        skip_handler = SkipListHandler(args.skipfile)

    for input_path in args.input:

        input_path = os.path.abspath(input_path)
        os.chdir(original_cwd)
        LOG.debug("Parsing input argument: '" + input_path + "'")

        export = args.export if 'export' in args else None
        if export is not None and export == 'html':
            output_path = os.path.abspath(args.output_path)

            LOG.info("Generating html output files:")
            PlistToHtml.parse(input_path, output_path,
                              context.path_plist_to_html_dist, 'clean' in args)
            continue

        severity_stats = Counter({})
        file_stats = Counter({})
        report_count = Counter({})

        files = []
        metadata_dict = {}
        if os.path.isfile(input_path):
            files.append(input_path)

        elif os.path.isdir(input_path):
            metadata_file = os.path.join(input_path, "metadata.json")
            if os.path.exists(metadata_file):
                with open(metadata_file, 'r') as metadata:
                    metadata_dict = json.load(metadata)
                    LOG.debug(metadata_dict)

                if 'working_directory' in metadata_dict:
                    os.chdir(metadata_dict['working_directory'])

            _, _, file_names = next(os.walk(input_path), ([], [], []))
            files = [
                os.path.join(input_path, file_name) for file_name in file_names
            ]

        for file_path in files:
            report_stats = parse(file_path, context, metadata_dict,
                                 suppress_handler, skip_handler, 'print_steps'
                                 in args)

            severity_stats.update(Counter(report_stats.get('severity', {})))
            file_stats.update(Counter(report_stats.get('files', {})))
            report_count.update(Counter(report_stats.get('reports', {})))

        print("\n----==== Summary ====----")

        if file_stats:
            vals = [[os.path.basename(k), v]
                    for k, v in dict(file_stats).items()]
            keys = ['Filename', 'Report count']
            table = twodim_to_str('table', keys, vals, 1, True)
            print(table)

        if severity_stats:
            vals = [[k, v] for k, v in dict(severity_stats).items()]
            keys = ['Severity', 'Report count']
            table = twodim_to_str('table', keys, vals, 1, True)
            print(table)

        report_count = dict(report_count).get("report_count", 0)
        print("----=================----")
        print("Total number of reports: {}".format(report_count))
        print("----=================----")

    os.chdir(original_cwd)
Example #12
0
def init_logger(level, logger_name='system'):
    logger.setup_logger(level)
    global LOG
    LOG = logger.get_logger(logger_name)