def test_new_intercept_build(self):
        """
        Test log file parsing escapes with upstream (GitHub) intercept-build.
        """
        logfile = os.path.join(self.__test_files, "intercept-new.json")

        # Upstream scan-build-py creates an argument vector, as opposed to a
        # command string. This argument vector contains the define as it's
        # element in the following format:
        # -DVARIABLE=\"some value\"
        # and the target triplet, e.g.:
        # --target=x86_64-linux-gnu
        #
        # The define is passed to the analyzer properly.

        build_action = log_parser.\
            parse_unique_log(load_json_or_empty(logfile), self.__this_dir)[0]

        self.assertEqual(build_action.source, r'/tmp/a.cpp')
        self.assertEqual(len(build_action.analyzer_options), 1)
        self.assertTrue(len(build_action.target) > 0)
        self.assertEqual(build_action.analyzer_options[0],
                         r'-DVARIABLE="some value"')

        # Test source file with spaces.
        logfile = os.path.join(self.__test_files, "intercept-new-space.json")

        build_action = log_parser.\
            parse_unique_log(load_json_or_empty(logfile), self.__this_dir)[0]

        self.assertEqual(build_action.source, '/tmp/a b.cpp')
        self.assertEqual(build_action.lang, 'c++')
Exemple #2
0
    def test_new_intercept_build(self):
        """
        Test log file parsing escapes with upstream (GitHub) intercept-build.
        """
        logfile = os.path.join(self.__test_files, "intercept-new.json")

        # Upstream scan-build-py creates an argument vector, as opposed to a
        # command string. This argument vector contains the define as it's
        # element in the following format:
        # -DVARIABLE=\"some value\"
        # and the target triplet, e.g.:
        # --target=x86_64-linux-gnu
        #
        # The define is passed to the analyzer properly.

        build_action = log_parser.parse_log(load_json_or_empty(logfile))[0]

        self.assertEqual(build_action.source, r'/tmp/a.cpp')
        self.assertEqual(len(build_action.analyzer_options), 1)
        self.assertTrue(len(build_action.target) > 0)
        self.assertEqual(build_action.analyzer_options[0],
                         r'-DVARIABLE="\"some value"\"')

        # Test source file with spaces.
        logfile = os.path.join(self.__test_files, "intercept-new-space.json")

        build_action = log_parser.parse_log(load_json_or_empty(logfile))[0]

        self.assertEqual(build_action.source, r'/tmp/a\ b.cpp')
        self.assertEqual(build_action.lang, 'c++')
    def test_new_ldlogger(self):
        """
        Test log file parsing escape behaviour with after-#631 LD-LOGGER.
        """
        logfile = os.path.join(self.__test_files, "ldlogger-new.json")

        # LD-LOGGERS after http://github.com/Ericsson/codechecker/pull/631
        # now properly log the multiword arguments. When these are parsed by
        # the log_parser, the define's value will be passed to the analyzer.
        #
        # Logfile contains -DVARIABLE="some value"
        # and --target=x86_64-linux-gnu.

        build_action = log_parser.\
            parse_unique_log(load_json_or_empty(logfile), self.__this_dir)[0]

        self.assertEqual(build_action.source, r'/tmp/a.cpp')
        self.assertEqual(len(build_action.analyzer_options), 1)
        self.assertTrue(len(build_action.target) > 0)
        self.assertEqual(build_action.analyzer_options[0],
                         r'-DVARIABLE=some value')

        # Test source file with spaces.
        logfile = os.path.join(self.__test_files, "ldlogger-new-space.json")

        build_action = log_parser.\
            parse_unique_log(load_json_or_empty(logfile), self.__this_dir)[0]

        self.assertEqual(build_action.source, r'/tmp/a b.cpp')
        self.assertEqual(build_action.lang, 'c++')
    def test_old_intercept_build(self):
        """
        Test log file parsing escape behaviour with clang-5.0 intercept-build.
        """
        logfile = os.path.join(self.__test_files, "intercept-old.json")

        # Scan-build-py shipping with clang-5.0 makes a logfile that contains:
        # -DVARIABLE=\"some value\" and --target=x86_64-linux-gnu
        #
        # The define is passed to the analyzer properly.

        build_action = log_parser.\
            parse_unique_log(load_json_or_empty(logfile), self.__this_dir)[0]

        self.assertEqual(build_action.source, r'/tmp/a.cpp')
        self.assertEqual(len(build_action.analyzer_options), 1)
        self.assertTrue(len(build_action.target) > 0)
        self.assertEqual(build_action.analyzer_options[0],
                         r'-DVARIABLE="some')

        # Test source file with spaces.
        logfile = os.path.join(self.__test_files, "intercept-old-space.json")

        build_action = log_parser.\
            parse_unique_log(load_json_or_empty(logfile), self.__this_dir)[0]

        self.assertEqual(build_action.source, '/tmp/a b.cpp')
        self.assertEqual(build_action.lang, 'c++')
def get_context():
    LOG.debug('Loading package config.')

    package_root = os.environ['CC_PACKAGE_ROOT']

    pckg_config_file = os.path.join(package_root, "config", "config.json")
    LOG.debug('Reading config: %s', pckg_config_file)
    cfg_dict = load_json_or_empty(pckg_config_file)

    if not cfg_dict:
        sys.exit(1)

    LOG.debug(cfg_dict)

    LOG.debug('Loading layout config.')

    layout_cfg_file = os.path.join(package_root, "config",
                                   "package_layout.json")
    LOG.debug(layout_cfg_file)
    lcfg_dict = load_json_or_empty(layout_cfg_file)

    if not lcfg_dict:
        sys.exit(1)

    try:
        return Context(package_root, lcfg_dict['runtime'], cfg_dict)
    except KeyError:
        import traceback
        traceback.print_exc()
        sys.exit(1)
Exemple #6
0
    def test_old_intercept_build(self):
        """
        Test log file parsing escape behaviour with clang-5.0 intercept-build.
        """
        logfile = os.path.join(self.__test_files, "intercept-old.json")

        # Scan-build-py shipping with clang-5.0 makes a logfile that contains:
        # -DVARIABLE=\"some value\" and --target=x86_64-linux-gnu
        #
        # The define is passed to the analyzer properly.

        build_action = log_parser.parse_log(load_json_or_empty(logfile))[0]

        self.assertEqual(build_action.source, r'/tmp/a.cpp')
        self.assertEqual(len(build_action.analyzer_options), 1)
        self.assertTrue(len(build_action.target) > 0)
        self.assertEqual(build_action.analyzer_options[0],
                         r'-DVARIABLE="\"some value"\"')

        # Test source file with spaces.
        logfile = os.path.join(self.__test_files, "intercept-old-space.json")

        build_action = log_parser.parse_log(load_json_or_empty(logfile))[0]

        self.assertEqual(build_action.source, r'/tmp/a\ b.cpp')
        self.assertEqual(build_action.lang, 'c++')
Exemple #7
0
    def test_new_ldlogger(self):
        """
        Test log file parsing escape behaviour with after-#631 LD-LOGGER.
        """
        logfile = os.path.join(self.__test_files, "ldlogger-new.json")

        # LD-LOGGERS after http://github.com/Ericsson/codechecker/pull/631
        # now properly log the multiword arguments. When these are parsed by
        # the log_parser, the define's value will be passed to the analyzer.
        #
        # Logfile contains -DVARIABLE="some value"
        # and --target=x86_64-linux-gnu.

        build_action = log_parser.parse_log(load_json_or_empty(logfile))[0]

        self.assertEqual(build_action.source, r'/tmp/a.cpp')
        self.assertEqual(len(build_action.analyzer_options), 1)
        self.assertTrue(len(build_action.target) > 0)
        self.assertEqual(build_action.analyzer_options[0],
                         r'-DVARIABLE="\"some value"\"')

        # Test source file with spaces.
        logfile = os.path.join(self.__test_files, "ldlogger-new-space.json")

        build_action = log_parser.parse_log(load_json_or_empty(logfile))[0]

        self.assertEqual(build_action.source, r'/tmp/a\ b.cpp')
        self.assertEqual(build_action.lang, 'c++')
    def __init__(self):
        LOG.debug("Loading clientside session config.")

        # Check whether user's configuration exists.
        user_home = os.path.expanduser("~")
        session_cfg_file = os.path.join(user_home,
                                        ".codechecker.passwords.json")
        LOG.info("Checking local passwords or tokens in %s", session_cfg_file)

        scfg_dict = {}

        mistyped_cfg_file = os.path.join(user_home,
                                         ".codechecker.password.json")

        if os.path.exists(session_cfg_file):
            check_file_owner_rw(session_cfg_file)
            scfg_dict = load_json_or_empty(session_cfg_file, {},
                                           "user authentication")
            scfg_dict['credentials'] = \
                simplify_credentials(scfg_dict['credentials'])
            if not scfg_dict['credentials']:
                LOG.info("No saved tokens.")
            else:
                LOG.debug("Tokens or passwords were found for these hosts:")
                for k, v in scfg_dict['credentials'].items():
                    user, _ = v.split(":")
                    LOG.debug("  user '%s' host '%s'", user, k)
        elif os.path.exists(mistyped_cfg_file):
            LOG.warning("Typo in file name! Rename '%s' to '%s'.",
                        mistyped_cfg_file, session_cfg_file)
        else:
            LOG.info("Password file not found.")

        if not scfg_dict.get('credentials'):
            scfg_dict['credentials'] = {}

        self.__save = scfg_dict
        self.__autologin = scfg_dict.get('client_autologin', True)
        # Check and load token storage for user.
        self.token_file = os.path.join(user_home, ".codechecker.session.json")
        LOG.info("Checking for local valid sessions.")

        if os.path.exists(self.token_file):
            token_dict = load_json_or_empty(self.token_file, {},
                                            "user authentication")
            check_file_owner_rw(self.token_file)

            self.__tokens = token_dict.get('tokens')
            LOG.debug("Found session information for these hosts:")
            for k, _ in self.__tokens.items():
                LOG.debug("  %s", k)
        else:
            with open(self.token_file, 'w', encoding="utf-8",
                      errors="ignore") as f:
                json.dump({'tokens': {}}, f)
            os.chmod(self.token_file, stat.S_IRUSR | stat.S_IWUSR)

            self.__tokens = {}
Exemple #9
0
    def __init__(self, package_root, pckg_layout, cfg_dict):
        # Get the common environment variables.
        self.pckg_layout = pckg_layout

        self._package_root = package_root
        self._severity_map = SeverityMap(
            load_json_or_empty(self.checkers_severity_map_file, {}))
        self.__system_comment_map = \
            load_json_or_empty(self.system_comment_map_file, {})
        self.__package_version = None
        self.__package_build_date = None
        self.__package_git_hash = None

        self.__set_version()
def process_config_file(args, subcommand_name):
    """
    Handler to get config file options.
    """
    if 'config_file' not in args:
        return {}
    if args.config_file and os.path.exists(args.config_file):
        cfg = load_json_or_empty(args.config_file, default={})

        # The subcommand name is analyze but the
        # configuration section name is analyzer.
        if subcommand_name == 'analyze':
            # The config value can be 'analyze' or 'analyzer'
            # for backward compatibility.
            analyze_cfg = cfg.get("analyze", [])
            analyzer_cfg = cfg.get("analyzer", [])
            if analyze_cfg:
                if analyzer_cfg:
                    LOG.warning("There is an 'analyze' and an 'analyzer' "
                                "config configuration option in the config "
                                "file. Please use the 'analyze' value to be "
                                "in sync with the subcommands.\n"
                                "Using the 'analyze' configuration.")
                return analyze_cfg
            if analyzer_cfg:
                return analyzer_cfg

        return cfg.get(subcommand_name, [])
    def __init__(self, package_root, pckg_layout, cfg_dict):
        env_vars = cfg_dict['environment_variables']
        self.__checker_config = cfg_dict['checker_config']
        self.__available_profiles = cfg_dict['available_profiles']

        # Get the common environment variables.
        self.pckg_layout = pckg_layout
        self.env_vars = env_vars

        self._package_root = package_root
        self._severity_map = SeverityMap(
            load_json_or_empty(self.checkers_severity_map_file, {}))
        self.__package_version = None
        self.__package_build_date = None
        self.__package_git_hash = None
        self.__analyzers = {}

        self.logger_bin = None
        self.logger_file = None
        self.logger_compilers = None

        # Get package specific environment variables.
        self.set_env(env_vars)

        self.__set_version()
        self.__populate_analyzers()
    def __set_version(self):
        """
        Get the package version from the version config file.
        """
        vfile_data = load_json_or_empty(self.version_file)

        if not vfile_data:
            sys.exit(1)

        package_version = vfile_data['version']
        package_build_date = vfile_data['package_build_date']
        package_git_hash = vfile_data.get('git_hash')
        package_git_tag = vfile_data.get('git_describe', {}).get('tag')
        package_git_dirtytag = vfile_data.get('git_describe', {}).get('dirty')

        self.__package_version = package_version['major'] + '.' + \
            package_version['minor'] + '.' + \
            package_version['revision']

        self.__package_build_date = package_build_date
        self.__package_git_hash = package_git_hash

        self.__package_git_tag = package_git_tag
        if (LOG.getEffectiveLevel() == logger.DEBUG
                or LOG.getEffectiveLevel() == logger.DEBUG_ANALYZER):
            self.__package_git_tag = package_git_dirtytag
Exemple #13
0
def __get_run_name(input_list):
    """Create a runname for the stored analysis from the input list."""

    # Try to create a name from the metada JSON(s).
    names = set()
    for input_path in input_list:
        metafile = os.path.join(input_path, "metadata.json")
        if os.path.isdir(input_path) and os.path.exists(metafile):
            metajson = util.load_json_or_empty(metafile)

            if 'version' in metajson and metajson['version'] >= 2:
                for tool in metajson.get('tools', {}):
                    name = tool.get('run_name')
            else:
                name = metajson.get('name')

            if not name:
                name = "unnamed result folder"

            names.add(name)

    if len(names) == 1:
        name = names.pop()
        if name != "unnamed result folder":
            return name
    elif len(names) > 1:
        return "multiple projects: " + ', '.join(names)
    else:
        return False
    def __set_version(self):
        """
        Get the package version from the version config file.
        """
        vfile_data = load_json_or_empty(self.version_file)

        if not vfile_data:
            sys.exit(1)

        package_version = vfile_data['version']
        package_build_date = vfile_data['package_build_date']
        package_git_hash = vfile_data.get('git_hash')
        package_git_tag = vfile_data.get('git_describe', {}).get('tag')
        package_git_dirtytag = vfile_data.get('git_describe', {}).get('dirty')

        self.__package_version = package_version['major'] + '.' + \
            package_version['minor'] + '.' + \
            package_version['revision']

        self.__package_build_date = package_build_date
        self.__package_git_hash = package_git_hash

        self.__package_git_tag = package_git_tag
        if (LOG.getEffectiveLevel() == logger.DEBUG or
                LOG.getEffectiveLevel() ==
                logger.DEBUG_ANALYZER):
            self.__package_git_tag = package_git_dirtytag
Exemple #15
0
def process_config_file(args):
    """
    Handler to get config file options.
    """
    if args.config_file and os.path.exists(args.config_file):
        cfg = load_json_or_empty(args.config_file, default={})
        return cfg.get('analyzer', [])
Exemple #16
0
def enable_auth(workspace):
    """
    Create a dummy authentication-enabled configuration and
    an auth-enabled server.

    Running the tests only work if the initial value (in package
    server_config.json) is FALSE for authentication.enabled.
    """

    server_config_filename = "server_config.json"

    cc_package = codechecker_package()
    original_auth_cfg = os.path.join(cc_package,
                                     'config',
                                     server_config_filename)

    shutil.copy(original_auth_cfg, workspace)

    server_cfg_file = os.path.join(workspace,
                                   server_config_filename)

    scfg_dict = util.load_json_or_empty(server_cfg_file, {})
    scfg_dict["authentication"]["enabled"] = True
    scfg_dict["authentication"]["method_dictionary"]["enabled"] = True
    scfg_dict["authentication"]["method_dictionary"]["auths"] = \
        ["cc:test", "john:doe", "admin:admin123", "colon:my:password"]

    with open(server_cfg_file, 'w') as scfg:
        json.dump(scfg_dict, scfg, indent=2, sort_keys=True)

    # Create a root user.
    root_file = os.path.join(workspace, 'root.user')
    with open(root_file, 'w') as rootf:
        rootf.write(sha256("root:root").hexdigest())
    os.chmod(root_file, stat.S_IRUSR | stat.S_IWUSR)
Exemple #17
0
def metadata_info(metadata_file):
    check_commands = []
    check_durations = []
    cc_version = None
    analyzer_statistics = {}
    checkers = {}

    if not os.path.isfile(metadata_file):
        return check_commands, check_durations, cc_version, \
               analyzer_statistics, checkers

    metadata_dict = load_json_or_empty(metadata_file, {})

    if 'command' in metadata_dict:
        check_commands.append(metadata_dict['command'])
    if 'timestamps' in metadata_dict:
        check_durations.append(
            float(metadata_dict['timestamps']['end'] -
                  metadata_dict['timestamps']['begin']))

    # Get CodeChecker version.
    cc_version = metadata_dict.get('versions', {}).get('codechecker')

    # Get analyzer statistics.
    analyzer_statistics = metadata_dict.get('analyzer_statistics', {})

    checkers = metadata_dict.get('checkers', {})

    return check_commands, check_durations, cc_version, analyzer_statistics, \
        checkers
    def test_response_file_simple(self):
        """
        Test simple response file where the source file comes outside the
        response file.
        """
        with open(self.compile_command_file_path,
                  "w",
                  encoding="utf-8",
                  errors="ignore") as build_json:
            build_json.write(
                json.dumps([
                    dict(directory=self.tmp_dir,
                         command="g++ {0} @{1}".format(self.src_file_path,
                                                       self.rsp_file_path),
                         file=self.src_file_path)
                ]))

        with open(self.rsp_file_path, "w", encoding="utf-8",
                  errors="ignore") as rsp_file:
            rsp_file.write("""-DVARIABLE="some value" """)

        logfile = os.path.join(self.compile_command_file_path)

        build_actions, _ = log_parser. \
            parse_unique_log(load_json_or_empty(logfile), self.__this_dir)
        build_action = build_actions[0]
        self.assertEqual(len(build_action.analyzer_options), 1)
        self.assertEqual(build_action.analyzer_options[0], '-DVARIABLE=some')
def metadata_info(metadata_file):
    check_commands = []
    check_durations = []
    cc_version = None
    analyzer_statistics = {}
    checkers = {}

    if not os.path.isfile(metadata_file):
        return check_commands, check_durations, cc_version, \
               analyzer_statistics, checkers

    metadata_dict = load_json_or_empty(metadata_file, {})

    if 'command' in metadata_dict:
        check_commands.append(metadata_dict['command'])
    if 'timestamps' in metadata_dict:
        check_durations.append(
            float(metadata_dict['timestamps']['end'] -
                  metadata_dict['timestamps']['begin']))

    # Get CodeChecker version.
    cc_version = metadata_dict.get('versions', {}).get('codechecker')

    # Get analyzer statistics.
    analyzer_statistics = metadata_dict.get('analyzer_statistics', {})

    checkers = metadata_dict.get('checkers', {})

    return check_commands, check_durations, cc_version, analyzer_statistics, \
        checkers
    def __init__(self, package_root, pckg_layout, cfg_dict):
        env_vars = cfg_dict['environment_variables']
        self.__checker_config = cfg_dict['checker_config']
        self.__available_profiles = cfg_dict['available_profiles']

        # Get the common environment variables.
        self.pckg_layout = pckg_layout
        self.env_vars = env_vars

        self._package_root = package_root
        self._severity_map = SeverityMap(
            load_json_or_empty(self.checkers_severity_map_file, {}))
        self.__package_version = None
        self.__package_build_date = None
        self.__package_git_hash = None
        self.__analyzers = {}

        self.logger_bin = None
        self.logger_file = None
        self.logger_compilers = None

        # Get package specific environment variables.
        self.set_env(env_vars)

        self.__set_version()
        self.__populate_analyzers()
Exemple #21
0
def enable_auth(workspace):
    """
    Create a dummy authentication-enabled configuration and
    an auth-enabled server.

    Running the tests only work if the initial value (in package
    server_config.json) is FALSE for authentication.enabled.
    """

    server_config_filename = "server_config.json"

    cc_package = codechecker_package()
    original_auth_cfg = os.path.join(cc_package, 'config',
                                     server_config_filename)

    shutil.copy(original_auth_cfg, workspace)

    server_cfg_file = os.path.join(workspace, server_config_filename)

    scfg_dict = util.load_json_or_empty(server_cfg_file, {})
    scfg_dict["authentication"]["enabled"] = True
    scfg_dict["authentication"]["method_dictionary"]["enabled"] = True
    scfg_dict["authentication"]["method_dictionary"]["auths"] = \
        ["cc:test", "john:doe", "admin:admin123", "colon:my:password"]

    with open(server_cfg_file, 'w') as scfg:
        json.dump(scfg_dict, scfg, indent=2, sort_keys=True)

    # Create a root user.
    root_file = os.path.join(workspace, 'root.user')
    with open(root_file, 'w') as rootf:
        rootf.write(sha256("root:root").hexdigest())
    os.chmod(root_file, stat.S_IRUSR | stat.S_IWUSR)
    def test_old_ldlogger(self):
        """
        Test log file parsing escape behaviour with pre-2017 Q2 LD-LOGGER.
        """
        logfile = os.path.join(self.__test_files, "ldlogger-old.json")

        # LD-LOGGER before http://github.com/Ericsson/codechecker/pull/631
        # used an escape mechanism that, when parsed by the log parser via
        # shlex, made CodeChecker parse arguments with multiword string
        # literals in them be considered as "file" (instead of compile option),
        # eventually ignored by the command builder, thus lessening analysis
        # accuracy, as defines were lost.
        #
        # Logfile contains "-DVARIABLE="some value"".
        #
        # There is no good way to back-and-forth convert in log_parser or
        # option_parser, so here we aim for a non-failing stalemate of the
        # define being considered a file and ignored, for now.

        build_actions, _ = log_parser.\
            parse_unique_log(load_json_or_empty(logfile), self.__this_dir)
        build_action = build_actions[0]

        self.assertEqual(build_action.source, r'/tmp/a.cpp')
        self.assertEqual(len(build_action.analyzer_options), 1)
Exemple #23
0
def check_config_file(args):
    """
    LOG and check about the config file usage.

    If a config file is set but does not exist the program will
    exit.
    LOG is not initialized in the process_config_file function yet
    so we can not log the usage there. Using print will
    always print out the config file data which can mess up the
    tests depending on the output.
    """

    if args.config_file and not os.path.exists(args.config_file):
        LOG.error("Configuration file '%s' does not exist.",
                  args.config_file)
        sys.exit(1)
    elif not args.config_file:
        return

    cfg = load_json_or_empty(args.config_file, default={})
    if cfg.get("enabled"):
        LOG.debug("Using config file: '%s'.", args.config_file)
        return cfg.get('analyzer', [])

    LOG.debug("Config file '%s' is available but disabled.", args.config_file)
    def test_response_file_contains_source_file(self):
        """
        Test response file where the source file comes from the response file.
        """
        with open(self.compile_command_file_path, "w") as build_json:
            build_json.write(
                json.dumps([
                    dict(directory=self.tmp_dir,
                         command="g++ @{0}".format(self.rsp_file_path),
                         file="@{0}".format(self.rsp_file_path))
                ]))

        with open(self.rsp_file_path, "w") as rsp_file:
            rsp_file.write("""-DVARIABLE="some value" {0}""".format(
                self.src_file_path))

        logfile = os.path.join(self.compile_command_file_path)

        build_actions, _ = log_parser. \
            parse_unique_log(load_json_or_empty(logfile), self.__this_dir)
        build_action = build_actions[0]

        self.assertEqual(len(build_action.analyzer_options), 1)
        self.assertEqual(build_action.source, self.src_file_path)
        self.assertEqual(build_action.analyzer_options[0], '-DVARIABLE=some')
def get_blame_file_data(
    blame_file: str
) -> Tuple[Optional[str], Optional[str], Optional[str]]:
    """
    Get blame information from the given file.

    It will return a tuple of 'blame information', 'remote url' and
    'tracking branch'.
    """
    blame_info = None
    remote_url = None
    tracking_branch = None

    if os.path.isfile(blame_file):
        data = util.load_json_or_empty(blame_file)
        if data:
            remote_url = data.get("remote_url")
            tracking_branch = data.get("tracking_branch")

            blame_info = data

            # Remove fields which are not needed anymore from the blame info.
            del blame_info["remote_url"]
            del blame_info["tracking_branch"]

    return blame_info, remote_url, tracking_branch
    def __init__(self):
        LOG.debug("Loading clientside session config.")

        # Check whether user's configuration exists.
        user_home = os.path.expanduser("~")
        session_cfg_file = os.path.join(user_home,
                                        ".codechecker.passwords.json")
        LOG.debug(session_cfg_file)

        scfg_dict = {}
        if os.path.exists(session_cfg_file):
            scfg_dict = load_json_or_empty(session_cfg_file, {},
                                           "user authentication")
            scfg_dict['credentials'] = \
                simplify_credentials(scfg_dict['credentials'])

            check_file_owner_rw(session_cfg_file)
        else:
            misstyped_cfg_file = os.path.join(user_home,
                                              ".codechecker.password.json")
            if os.path.exists(misstyped_cfg_file):
                LOG.warning("Typo in file name! Rename '%s' to '%s'.",
                            misstyped_cfg_file, session_cfg_file)

        if not scfg_dict.get('credentials'):
            scfg_dict['credentials'] = {}

        self.__save = scfg_dict
        self.__autologin = scfg_dict.get('client_autologin', True)

        # Check and load token storage for user.
        self.token_file = os.path.join(user_home, ".codechecker.session.json")
        LOG.debug(self.token_file)

        if os.path.exists(self.token_file):
            token_dict = load_json_or_empty(self.token_file, {},
                                            "user authentication")
            check_file_owner_rw(self.token_file)

            self.__tokens = token_dict.get('tokens')
        else:
            with open(self.token_file, 'w') as f:
                json.dump({'tokens': {}}, f)
            os.chmod(self.token_file, stat.S_IRUSR | stat.S_IWUSR)

            self.__tokens = {}
Exemple #27
0
    def __init__(self):
        """ Initialize web context. """
        self._lib_dir_path = os.environ.get('CC_LIB_DIR', '')
        self._data_files_dir_path = os.environ.get('CC_DATA_FILES_DIR', '')

        lcfg_dict = self.__get_package_layout()
        self.pckg_layout = lcfg_dict['runtime']

        self._severity_map = SeverityMap(
            load_json_or_empty(self.checkers_severity_map_file, {}))
        self.__system_comment_map = \
            load_json_or_empty(self.system_comment_map_file, {})
        self.__package_version = None
        self.__package_build_date = None
        self.__package_git_hash = None

        self.__set_version()
def get_instances(folder=None):
    """Returns the list of running servers for the current user."""

    # This method does NOT write the descriptor file.

    descriptor = __get_instance_descriptor_path(folder)
    instances = load_json_or_empty(descriptor, {}, lock=True)

    return [i for i in instances if __check_instance(i['hostname'], i['pid'])]
def get_instances(folder=None):
    """Returns the list of running servers for the current user."""

    # This method does NOT write the descriptor file.

    descriptor = __get_instance_descriptor_path(folder)
    instances = load_json_or_empty(descriptor, {}, lock=True)

    return [i for i in instances if __check_instance(i['hostname'], i['pid'])]
Exemple #30
0
    def load_compiler_info(filename, compiler):
        contents = load_json_or_empty(filename, {})
        compiler_info = contents.get(compiler)
        if compiler_info is None:
            LOG.error("Could not find compiler %s in file %s",
                      compiler, filename)

        ICI = ImplicitCompilerInfo
        ICI.compiler_includes[compiler] = compiler_info.get('includes')
        ICI.compiler_standard[compiler] = compiler_info.get('default_standard')
        ICI.compiler_target[compiler] = compiler_info.get('target')
    def load_compiler_info(filename, compiler):
        contents = load_json_or_empty(filename, {})
        compiler_info = contents.get(compiler)
        if compiler_info is None:
            LOG.error("Could not find compiler %s in file %s", compiler,
                      filename)

        ICI = ImplicitCompilerInfo
        ICI.compiler_includes[compiler] = compiler_info.get('includes')
        ICI.compiler_standard[compiler] = compiler_info.get('default_standard')
        ICI.compiler_target[compiler] = compiler_info.get('target')
Exemple #32
0
    def get_metadata_info(self, metadata_file):
        """ Get metadata information from the given file. """
        if not os.path.isfile(metadata_file):
            return [], [], None, {}, {}

        metadata_dict = load_json_or_empty(metadata_file, {})

        if 'version' in metadata_dict:
            return self.__get_metadata_info_v2(metadata_dict)
        else:
            return self.__get_metadata_info_v1(metadata_dict)
    def __init__(self):
        """ Initialize web context. """
        self._lib_dir_path = os.environ.get('CC_LIB_DIR', '')
        self._data_files_dir_path = os.environ.get('CC_DATA_FILES_DIR', '')

        lcfg_dict = self.__get_package_layout()
        self.pckg_layout = lcfg_dict['runtime']

        self._severity_map = SeverityMap(
            load_json_or_empty(self.checkers_severity_map_file, {}))
        self.__system_comment_map = \
            load_json_or_empty(self.system_comment_map_file, {})
        self.__package_version = None
        self.__package_build_date = None
        self.__package_git_hash = None

        # This should be initialized in command line scripts based on the
        # given CLI options.
        self.codechecker_workspace = None

        self.__set_version()
Exemple #34
0
    def __get_package_layout(self):
        """ Get package layout configuration. """
        layout_cfg_file = os.path.join(self._data_files_dir_path, "config",
                                       "package_layout.json")

        LOG.debug('Reading config: %s', layout_cfg_file)
        lcfg_dict = load_json_or_empty(layout_cfg_file)

        if not lcfg_dict:
            raise ValueError(f"No configuration file '{layout_cfg_file}' can "
                             f"be found or it is empty!")

        return lcfg_dict
Exemple #35
0
    def test_include_rel_to_abs(self):
        """
        Test working directory prepending to relative include paths.
        """
        logfile = os.path.join(self.__test_files, "include.json")

        build_action = log_parser.parse_log(load_json_or_empty(logfile))[0]

        self.assertEqual(len(build_action.analyzer_options), 4)
        self.assertEqual(build_action.analyzer_options[0], '-I')
        self.assertEqual(build_action.analyzer_options[1], '/include')
        self.assertEqual(build_action.analyzer_options[2], '-I/include')
        self.assertEqual(build_action.analyzer_options[3], '-I/tmp')
    def __init__(self):
        LOG.debug("Loading clientside session config.")

        # Check whether user's configuration exists.
        user_home = os.path.expanduser("~")
        session_cfg_file = os.path.join(user_home,
                                        ".codechecker.passwords.json")
        LOG.debug(session_cfg_file)

        scfg_dict = {}
        if os.path.exists(session_cfg_file):
            scfg_dict = load_json_or_empty(session_cfg_file, {},
                                           "user authentication")
        if os.path.exists(session_cfg_file):
            check_file_owner_rw(session_cfg_file)

        if not scfg_dict.get('credentials'):
            scfg_dict['credentials'] = {}

        self.__save = scfg_dict
        self.__autologin = scfg_dict.get('client_autologin', True)

        # Check and load token storage for user.
        self.token_file = os.path.join(user_home, ".codechecker.session.json")
        LOG.debug(self.token_file)

        if os.path.exists(self.token_file):
            token_dict = load_json_or_empty(self.token_file, {},
                                            "user authentication")
            check_file_owner_rw(self.token_file)

            self.__tokens = token_dict.get('tokens')
        else:
            with open(self.token_file, 'w') as f:
                json.dump({'tokens': {}}, f)
            os.chmod(self.token_file, stat.S_IRUSR | stat.S_IWUSR)

            self.__tokens = {}
    def test_include_rel_to_abs(self):
        """
        Test working directory prepending to relative include paths.
        """
        logfile = os.path.join(self.__test_files, "include.json")

        build_action = log_parser.\
            parse_unique_log(load_json_or_empty(logfile), self.__this_dir)[0]

        self.assertEqual(len(build_action.analyzer_options), 4)
        self.assertEqual(build_action.analyzer_options[0], '-I')
        self.assertEqual(build_action.analyzer_options[1], '/include')
        self.assertEqual(build_action.analyzer_options[2], '-I/include')
        self.assertEqual(build_action.analyzer_options[3], '-I/tmp')
    def __init__(self, package_root, pckg_layout, cfg_dict):
        # Get the common environment variables.
        self.pckg_layout = pckg_layout

        self._package_root = package_root
        self._severity_map = SeverityMap(
            load_json_or_empty(self.checkers_severity_map_file, {}))
        self.__package_version = None
        self.__product_db_version_info = None
        self.__run_db_version_info = None
        self.__package_build_date = None
        self.__package_git_hash = None

        self.__set_version()
    def __get_package_config(self):
        """ Get package configuration. """
        pckg_config_file = os.path.join(self._data_files_dir_path, "config",
                                        "config.json")

        LOG.debug('Reading config: %s', pckg_config_file)
        cfg_dict = load_json_or_empty(pckg_config_file)

        if not cfg_dict:
            raise ValueError(f"No configuration file '{pckg_config_file}' can "
                             f"be found or it is empty!")

        LOG.debug(cfg_dict)
        return cfg_dict
def get_context():
    LOG.debug('Loading package config.')

    package_root = os.environ['CC_PACKAGE_ROOT']

    pckg_config_file = os.path.join(package_root, "config", "config.json")
    LOG.debug('Reading config: %s', pckg_config_file)
    cfg_dict = load_json_or_empty(pckg_config_file)

    if not cfg_dict:
        sys.exit(1)

    LOG.debug(cfg_dict)

    LOG.debug('Loading layout config.')

    layout_cfg_file = os.path.join(package_root, "config",
                                   "package_layout.json")
    LOG.debug(layout_cfg_file)
    lcfg_dict = load_json_or_empty(layout_cfg_file)

    if not lcfg_dict:
        sys.exit(1)

    # Merge static and runtime layout.
    layout_config = lcfg_dict['static'].copy()
    layout_config.update(lcfg_dict['runtime'])

    LOG.debug(layout_config)

    try:
        return Context(package_root, layout_config, cfg_dict)
    except KeyError:
        import traceback
        traceback.print_exc()
        sys.exit(1)
Exemple #41
0
 def __get_config_dict(self):
     """
     Get server config information from the configuration file. Raise
     ValueError if the configuration file is invalid.
     """
     LOG.debug(self.__configuration_file)
     cfg_dict = load_json_or_empty(self.__configuration_file, {},
                                   'server configuration')
     if cfg_dict != {}:
         check_file_owner_rw(self.__configuration_file)
     else:
         # If the configuration dict is empty, it means a JSON couldn't
         # have been parsed from it.
         raise ValueError("Server configuration file was invalid, or "
                          "empty.")
     return cfg_dict
 def __get_config_dict(self):
     """
     Get server config information from the configuration file. Raise
     ValueError if the configuration file is invalid.
     """
     LOG.debug(self.__configuration_file)
     cfg_dict = load_json_or_empty(self.__configuration_file, {},
                                   'server configuration')
     if cfg_dict != {}:
         check_file_owner_rw(self.__configuration_file)
     else:
         # If the configuration dict is empty, it means a JSON couldn't
         # have been parsed from it.
         raise ValueError("Server configuration file was invalid, or "
                          "empty.")
     return cfg_dict
    def test_response_file_contains_multiple_source_files(self):
        """
        Test response file where multiple source files come from the response
        file.
        """
        with open(self.compile_command_file_path,
                  "w",
                  encoding="utf-8",
                  errors="ignore") as build_json:
            build_json.write(
                json.dumps([
                    dict(directory=self.tmp_dir,
                         command="g++ @{0}".format(self.rsp_file_path),
                         file="@{0}".format(self.rsp_file_path))
                ]))

        a_file_path = os.path.join(self.tmp_dir, "a.cpp")
        with open(a_file_path, "w", encoding="utf-8",
                  errors="ignore") as src_file:
            src_file.write("int main() { return 0; }")

        b_file_path = os.path.join(self.tmp_dir, "b.cpp")
        with open(b_file_path, "w", encoding="utf-8",
                  errors="ignore") as src_file:
            src_file.write("void foo() {}")

        with open(self.rsp_file_path, "w", encoding="utf-8",
                  errors="ignore") as rsp_file:
            rsp_file.write("""-DVARIABLE="some value" {0} {1}""".format(
                a_file_path, b_file_path))

        logfile = os.path.join(self.compile_command_file_path)

        build_actions, _ = log_parser. \
            parse_unique_log(load_json_or_empty(logfile), self.__this_dir)

        self.assertEqual(len(build_actions), 2)

        a_build_action = [b for b in build_actions
                          if b.source == a_file_path][0]
        self.assertEqual(len(a_build_action.analyzer_options), 1)
        self.assertEqual(a_build_action.analyzer_options[0], '-DVARIABLE=some')

        b_build_action = [b for b in build_actions
                          if b.source == b_file_path][0]
        self.assertEqual(len(b_build_action.analyzer_options), 1)
        self.assertEqual(b_build_action.analyzer_options[0], '-DVARIABLE=some')
Exemple #44
0
def merge_metadata_json(metadata_files, num_of_report_dir=1):
    """ Merge content of multiple metadata files and return it as json. """

    if not metadata_files:
        return {}

    ret = {'version': 2, 'num_of_report_dir': num_of_report_dir, 'tools': []}

    for metadata_file in metadata_files:
        try:
            metadata_dict = load_json_or_empty(metadata_file, {})
            metadata = metadata_v1_to_v2(metadata_dict)
            for tool in metadata['tools']:
                ret['tools'].append(tool)
        except Exception as ex:
            LOG.warning('Failed to parse %s file with the following error: %s',
                        metadata_file, str(ex))

    return ret
Exemple #45
0
def __get_run_name(input_list):
    """Create a runname for the stored analysis from the input list."""

    # Try to create a name from the metada JSON(s).
    names = []
    for input_path in input_list:
        metafile = os.path.join(input_path, "metadata.json")
        if os.path.isdir(input_path) and os.path.exists(metafile):
            metajson = util.load_json_or_empty(metafile)

            if 'name' in metajson:
                names.append(metajson['name'])
            else:
                names.append("unnamed result folder")

    if len(names) == 1 and names[0] != "unnamed result folder":
        return names[0]
    elif len(names) > 1:
        return "multiple projects: " + ', '.join(names)
    else:
        return False
Exemple #46
0
def enable_storage_of_analysis_statistics(workspace):
    """
    Enables storing analysis statistics information for the server.
    """

    server_config_filename = "server_config.json"

    cc_package = codechecker_package()
    original_auth_cfg = os.path.join(cc_package,
                                     'config',
                                     server_config_filename)

    shutil.copy(original_auth_cfg, workspace)

    server_cfg_file = os.path.join(workspace,
                                   server_config_filename)

    scfg_dict = util.load_json_or_empty(server_cfg_file, {})
    scfg_dict["store"]["analysis_statistics_dir"] = \
        os.path.join(workspace, 'analysis_statistics')

    with open(server_cfg_file, 'w') as scfg:
        json.dump(scfg_dict, scfg, indent=2, sort_keys=True)
    def test_old_ldlogger(self):
        """
        Test log file parsing escape behaviour with pre-2017 Q2 LD-LOGGER.
        """
        logfile = os.path.join(self.__test_files, "ldlogger-old.json")

        # LD-LOGGER before http://github.com/Ericsson/codechecker/pull/631
        # used an escape mechanism that, when parsed by the log parser via
        # shlex, made CodeChecker parse arguments with multiword string
        # literals in them be considered as "file" (instead of compile option),
        # eventually ignored by the command builder, thus lessening analysis
        # accuracy, as defines were lost.
        #
        # Logfile contains "-DVARIABLE="some value"".
        #
        # There is no good way to back-and-forth convert in log_parser or
        # option_parser, so here we aim for a non-failing stalemate of the
        # define being considered a file and ignored, for now.

        build_action = log_parser.\
            parse_unique_log(load_json_or_empty(logfile), self.__this_dir)[0]

        self.assertEqual(build_action.source, r'/tmp/a.cpp')
        self.assertEqual(len(build_action.analyzer_options), 1)
Exemple #48
0
def server_init_start(args):
    """
    Start or manage a CodeChecker report server.
    """

    if 'list' in args or 'stop' in args or 'stop_all' in args:
        __instance_management(args)
        sys.exit(0)

    if 'reload' in args:
        __reload_config(args)
        sys.exit(0)

    # Actual server starting from this point.
    if not host_check.check_zlib():
        raise Exception("zlib is not available on the system!")

    # WARNING
    # In case of SQLite args.dbaddress default value is used
    # for which the is_localhost should return true.
    if util.is_localhost(args.dbaddress) and \
            not os.path.exists(args.config_directory):
        os.makedirs(args.config_directory)

    # Make sure the SQLite file can be created if it not exists.
    if 'sqlite' in args and \
            not os.path.isdir(os.path.dirname(args.sqlite)):
        os.makedirs(os.path.dirname(args.sqlite))

    if 'reset_root' in args:
        try:
            os.remove(os.path.join(args.config_directory, 'root.user'))
            LOG.info("Master superuser (root) credentials invalidated and "
                     "deleted. New ones will be generated...")
        except OSError:
            # File doesn't exist.
            pass

    if 'force_auth' in args:
        LOG.info("'--force-authentication' was passed as a command-line "
                 "option. The server will ask for users to authenticate!")

    context = webserver_context.get_context()
    context.codechecker_workspace = args.config_directory
    context.db_username = args.dbusername

    check_env = get_check_env(context.path_env_extra,
                              context.ld_lib_path_extra)

    cfg_sql_server = database.SQLServer.from_cmdline_args(
        vars(args), CONFIG_META, context.config_migration_root,
        interactive=True, env=check_env)

    LOG.info("Checking configuration database ...")
    db_status = cfg_sql_server.connect()
    db_status_msg = database_status.db_status_msg.get(db_status)
    LOG.info(db_status_msg)

    if db_status == DBStatus.SCHEMA_MISSING:
        LOG.debug("Config database schema is missing, initializing new.")
        db_status = cfg_sql_server.connect(init=True)
        if db_status != DBStatus.OK:
            LOG.error("Config database initialization failed!")
            LOG.error("Please check debug logs.")
            sys.exit(1)

    if db_status == DBStatus.SCHEMA_MISMATCH_NO:
        LOG.debug("Configuration database schema mismatch.")
        LOG.debug("No schema upgrade is possible.")
        sys.exit(1)

    force_upgrade = True if 'force_upgrade' in args else False

    if db_status == DBStatus.SCHEMA_MISMATCH_OK:
        LOG.debug("Configuration database schema mismatch.")
        LOG.debug("Schema upgrade is possible.")
        LOG.warning("Please note after migration only "
                    "newer CodeChecker versions can be used "
                    "to start the server")
        LOG.warning("It is advised to make a full backup of your "
                    "configuration database")

        LOG.warning(cfg_sql_server.get_db_location())

        question = 'Do you want to upgrade to the new schema?' \
                   ' Y(es)/n(o) '
        if force_upgrade or util.get_user_input(question):
            print("Upgrading schema ...")
            ret = cfg_sql_server.upgrade()
            msg = database_status.db_status_msg.get(
                ret, 'Unknown database status')
            print(msg)
            if ret != DBStatus.OK:
                LOG.error("Schema migration failed")
                sys.exit(ret)
        else:
            LOG.info("No schema migration was done.")
            sys.exit(0)

    if db_status == DBStatus.MISSING:
        LOG.error("Missing configuration database.")
        LOG.error("Server can not be started.")
        sys.exit(1)

    # Configuration database setup and check is needed before database
    # statuses can be checked.
    try:
        if args.status:
            ret = __db_status_check(cfg_sql_server, context, args.status)
            sys.exit(ret)
    except AttributeError:
        LOG.debug('Status was not in the arguments.')

    try:
        if args.product_to_upgrade:
            ret = __db_migration(cfg_sql_server, context,
                                 args.product_to_upgrade, force_upgrade)
            sys.exit(ret)
    except AttributeError:
        LOG.debug('Product upgrade was not in the arguments.')

    # Create the main database link from the arguments passed over the
    # command line.
    cfg_dir = os.path.abspath(args.config_directory)
    default_product_path = os.path.join(cfg_dir, 'Default.sqlite')
    create_default_product = 'sqlite' in args and \
                             not os.path.exists(default_product_path)

    if create_default_product:
        # Create a default product and add it to the configuration database.

        LOG.debug("Create default product...")
        LOG.debug("Configuring schema and migration...")

        prod_server = database.SQLiteDatabase(
            default_product_path, RUN_META,
            context.run_migration_root, check_env)

        LOG.debug("Checking 'Default' product database.")
        db_status = prod_server.connect()
        if db_status != DBStatus.MISSING:
            db_status = prod_server.connect(init=True)
            LOG.debug(database_status.db_status_msg.get(db_status))
            if db_status != DBStatus.OK:
                LOG.error("Failed to configure default product")
                sys.exit(1)

        product_conn_string = prod_server.get_connection_string()

        server.add_initial_run_database(
            cfg_sql_server, product_conn_string)

        LOG.info("Product 'Default' at '%s' created and set up.",
                 default_product_path)

    prod_statuses = check_product_db_status(cfg_sql_server, context)

    upgrade_available = {}
    for k, v in prod_statuses.items():
        db_status, _, _, _ = v
        if db_status == DBStatus.SCHEMA_MISMATCH_OK or \
                db_status == DBStatus.SCHEMA_MISSING:
            upgrade_available[k] = v

    if upgrade_available:
        print_prod_status(prod_statuses)
        LOG.warning("Multiple products can be upgraded, make a backup!")
        __db_migration(cfg_sql_server, context, 'all', force_upgrade)

    prod_statuses = check_product_db_status(cfg_sql_server, context)
    print_prod_status(prod_statuses)

    non_ok_db = False
    for k, v in prod_statuses.items():
        db_status, _, _, _ = v
        if db_status != DBStatus.OK:
            non_ok_db = True
        break

    if non_ok_db:
        msg = "There are some database issues. " \
              "Do you want to start the " \
              "server? Y(es)/n(o) "
        if not util.get_user_input(msg):
            sys.exit(1)

    # Start database viewer.
    checker_md_docs = os.path.join(context.doc_root, 'checker_md_docs')
    checker_md_docs_map = os.path.join(checker_md_docs,
                                       'checker_doc_map.json')
    checker_md_docs_map = util.load_json_or_empty(checker_md_docs_map, {})

    package_data = {'www_root': context.www_root,
                    'doc_root': context.doc_root,
                    'checker_md_docs': checker_md_docs,
                    'checker_md_docs_map': checker_md_docs_map,
                    'version': context.package_git_tag}

    suppr_handler = suppress_handler. \
        GenericSuppressHandler(None, False)

    try:
        server.start_server(args.config_directory,
                            package_data,
                            args.view_port,
                            cfg_sql_server,
                            suppr_handler,
                            args.listen_address,
                            'force_auth' in args,
                            args.skip_db_cleanup,
                            context,
                            check_env)
    except socket.error as err:
        if err.errno == errno.EADDRINUSE:
            LOG.error("Server can't be started, maybe the given port number "
                      "(%s) is already used. Check the connection "
                      "parameters.", args.view_port)
            sys.exit(1)
        else:
            raise
Exemple #49
0
def main(args):
    """
    Perform analysis on the given logfiles and store the results in a machine-
    readable format.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    if len(args.logfile) != 1:
        LOG.warning("Only one log file can be processed right now!")
        sys.exit(1)

    args.output_path = os.path.abspath(args.output_path)
    if os.path.exists(args.output_path) and \
            not os.path.isdir(args.output_path):
        LOG.error("The given output path is not a directory: " +
                  args.output_path)
        sys.exit(1)

    if 'enable_all' in args:
        LOG.info("'--enable-all' was supplied for this analysis.")

    # We clear the output directory in the following cases.
    ctu_dir = os.path.join(args.output_path, 'ctu-dir')
    if 'ctu_phases' in args and args.ctu_phases[0] and \
            os.path.isdir(ctu_dir):
        # Clear the CTU-dir if the user turned on the collection phase.
        LOG.debug("Previous CTU contents have been deleted.")
        shutil.rmtree(ctu_dir)

    if 'clean' in args and os.path.isdir(args.output_path):
        LOG.info("Previous analysis results in '%s' have been removed, "
                 "overwriting with current result", args.output_path)
        shutil.rmtree(args.output_path)

    if not os.path.exists(args.output_path):
        os.makedirs(args.output_path)

    LOG.debug("args: " + str(args))
    LOG.debug("Output will be stored to: '" + args.output_path + "'")

    # Process the skip list if present.
    skip_handler = __get_skip_handler(args)

    # Enable alpha uniqueing by default if ctu analysis is used.
    if 'none' in args.compile_uniqueing and 'ctu_phases' in args:
        args.compile_uniqueing = "alpha"

    compiler_info_file = None
    if 'compiler_info_file' in args:
        LOG.debug("Compiler info is read from: %s", args.compiler_info_file)
        if not os.path.exists(args.compiler_info_file):
            LOG.error("Compiler info file %s does not exist",
                      args.compiler_info_file)
            sys.exit(1)
        compiler_info_file = args.compiler_info_file

    report_dir = args.output_path

    # Parse the JSON CCDBs and retrieve the compile commands.
    actions = []
    for log_file in args.logfile:
        if not os.path.exists(log_file):
            LOG.error("The specified logfile '%s' does not exist!",
                      log_file)
            continue

        actions += log_parser.parse_unique_log(
            load_json_or_empty(log_file),
            report_dir,
            args.compile_uniqueing,
            skip_handler,
            compiler_info_file
            )

    if not actions:
        LOG.info("None of the specified build log files contained "
                 "valid compilation commands. No analysis needed...")
        sys.exit(1)

    uniqued_compilation_db_file = os.path.join(
        args.output_path, "unique_compile_commands.json")
    with open(uniqued_compilation_db_file, 'w') as f:
        json.dump(actions, f,
                  cls=log_parser.CompileCommandEncoder)

    context = analyzer_context.get_context()
    metadata = {'action_num': len(actions),
                'command': sys.argv,
                'versions': {
                    'codechecker': "{0} ({1})".format(
                        context.package_git_tag,
                        context.package_git_hash)},
                'working_directory': os.getcwd(),
                'output_path': args.output_path,
                'result_source_files': {}}

    if 'name' in args:
        metadata['name'] = args.name

    # Update metadata dictionary with old values.
    metadata_file = os.path.join(args.output_path, 'metadata.json')
    if os.path.exists(metadata_file):
        metadata_prev = load_json_or_empty(metadata_file)
        metadata['result_source_files'] = \
            metadata_prev['result_source_files']

    analyzer.perform_analysis(args, skip_handler, context, actions, metadata)

    __update_skip_file(args)

    LOG.debug("Analysis metadata write to '%s'", metadata_file)
    with open(metadata_file, 'w') as metafile:
        json.dump(metadata, metafile)

    # WARN: store command will search for this file!!!!
    compile_cmd_json = os.path.join(args.output_path, 'compile_cmd.json')
    try:
        source = os.path.abspath(args.logfile[0])
        target = os.path.abspath(compile_cmd_json)

        if source != target:
            shutil.copyfile(source, target)
    except shutil.Error:
        LOG.debug("Compilation database JSON file is the same.")
    except Exception:
        LOG.debug("Copying compilation database JSON file failed.")

    try:
        from codechecker_analyzer import analyzer_statistics
        analyzer_statistics.collect(metadata, "analyze")
    except Exception:
        pass
Exemple #50
0
def main(args):
    """
    Entry point for parsing some analysis results and printing them to the
    stdout in a human-readable format.
    """

    logger.setup_logger(args.verbose if 'verbose' in args else None)

    context = analyzer_context.get_context()

    # To ensure the help message prints the default folder properly,
    # the 'default' for 'args.input' is a string, not a list.
    # But we need lists for the foreach here to work.
    if isinstance(args.input, str):
        args.input = [args.input]

    original_cwd = os.getcwd()

    suppr_handler = None
    if 'suppress' in args:
        __make_handler = False
        if not os.path.isfile(args.suppress):
            if 'create_suppress' in args:
                with open(args.suppress, 'w') as _:
                    # Just create the file.
                    __make_handler = True
                    LOG.info("Will write source-code suppressions to "
                             "suppress file.")
            else:
                LOG.warning("Suppress file '%s' given, but it does not exist"
                            " -- will not suppress anything.", args.suppress)
        else:
            __make_handler = True

        if __make_handler:
            suppr_handler = suppress_handler.\
                GenericSuppressHandler(args.suppress,
                                       'create_suppress' in args)
    elif 'create_suppress' in args:
        LOG.error("Can't use '--export-source-suppress' unless '--suppress "
                  "SUPPRESS_FILE' is also given.")
        sys.exit(2)

    processed_path_hashes = set()

    skip_handler = None
    if 'skipfile' in args:
        with open(args.skipfile, 'r') as skip_file:
            skip_handler = SkipListHandler(skip_file.read())

    trim_path_prefixes = args.trim_path_prefix if \
        'trim_path_prefix' in args else None

    def trim_path_prefixes_handler(source_file):
        """
        Callback to util.trim_path_prefixes to prevent module dependency
        of plist_to_html
        """
        return util.trim_path_prefixes(source_file, trim_path_prefixes)

    html_builder = None

    def skip_html_report_data_handler(report_hash, source_file, report_line,
                                      checker_name, diag, files):
        """
        Report handler which skips bugs which were suppressed by source code
        comments.
        """
        report = Report(None, diag['path'], files)
        path_hash = get_report_path_hash(report, files)
        if path_hash in processed_path_hashes:
            LOG.debug("Skip report because it is a deduplication of an "
                      "already processed report!")
            LOG.debug("Path hash: %s", path_hash)
            LOG.debug(diag)
            return True

        skip = plist_parser.skip_report(report_hash,
                                        source_file,
                                        report_line,
                                        checker_name,
                                        suppr_handler)
        if skip_handler:
            skip |= skip_handler.should_skip(source_file)

        if not skip:
            processed_path_hashes.add(path_hash)

        return skip

    for input_path in args.input:

        input_path = os.path.abspath(input_path)
        os.chdir(original_cwd)
        LOG.debug("Parsing input argument: '%s'", input_path)

        export = args.export if 'export' in args else None
        if export is not None and export == 'html':
            output_path = os.path.abspath(args.output_path)

            if not html_builder:
                html_builder = \
                    PlistToHtml.HtmlBuilder(context.path_plist_to_html_dist,
                                            context.severity_map)

            LOG.info("Generating html output files:")
            PlistToHtml.parse(input_path,
                              output_path,
                              context.path_plist_to_html_dist,
                              skip_html_report_data_handler,
                              html_builder,
                              trim_path_prefixes_handler)
            continue

        files = []
        metadata_dict = {}
        if os.path.isfile(input_path):
            files.append(input_path)

        elif os.path.isdir(input_path):
            metadata_file = os.path.join(input_path, "metadata.json")
            if os.path.exists(metadata_file):
                metadata_dict = util.load_json_or_empty(metadata_file)
                LOG.debug(metadata_dict)

                if 'working_directory' in metadata_dict:
                    working_dir = metadata_dict['working_directory']
                    try:
                        os.chdir(working_dir)
                    except OSError as oerr:
                        LOG.debug(oerr)
                        LOG.error("Working directory %s is missing.\n"
                                  "Can not parse reports safely.", working_dir)
                        sys.exit(1)

            _, _, file_names = next(os.walk(input_path), ([], [], []))
            files = [os.path.join(input_path, file_name) for file_name
                     in file_names]

        file_change = set()
        file_report_map = defaultdict(list)

        rh = plist_parser.PlistToPlaintextFormatter(suppr_handler,
                                                    skip_handler,
                                                    context.severity_map,
                                                    processed_path_hashes,
                                                    trim_path_prefixes)
        rh.print_steps = 'print_steps' in args

        for file_path in files:
            f_change = parse(file_path, metadata_dict, rh, file_report_map)
            file_change = file_change.union(f_change)

        report_stats = rh.write(file_report_map)
        severity_stats = report_stats.get('severity')
        file_stats = report_stats.get('files')
        reports_stats = report_stats.get('reports')

        print("\n----==== Summary ====----")
        if file_stats:
            vals = [[os.path.basename(k), v] for k, v in
                    dict(file_stats).items()]
            keys = ['Filename', 'Report count']
            table = twodim_to_str('table', keys, vals, 1, True)
            print(table)

        if severity_stats:
            vals = [[k, v] for k, v in dict(severity_stats).items()]
            keys = ['Severity', 'Report count']
            table = twodim_to_str('table', keys, vals, 1, True)
            print(table)

        report_count = reports_stats.get("report_count", 0)
        print("----=================----")
        print("Total number of reports: {}".format(report_count))
        print("----=================----")

        if file_change:
            changed_files = '\n'.join([' - ' + f for f in file_change])
            LOG.warning("The following source file contents changed since the "
                        "latest analysis:\n%s\nMultiple reports were not "
                        "shown and skipped from the statistics. Please "
                        "analyze your project again to update the "
                        "reports!", changed_files)

    os.chdir(original_cwd)

    # Create index.html and statistics.html for the generated html files.
    if html_builder:
        html_builder.create_index_html(args.output_path)
        html_builder.create_statistics_html(args.output_path)

        print('\nTo view statistics in a browser run:\n> firefox {0}'.format(
            os.path.join(args.output_path, 'statistics.html')))

        print('\nTo view the results in a browser run:\n> firefox {0}'.format(
            os.path.join(args.output_path, 'index.html')))