コード例 #1
0
    def test_new_intercept_build(self):
        """
        Test log file parsing escapes with upstream (GitHub) intercept-build.
        """
        logfile = os.path.join(self.__test_files, "intercept-new.json")

        # Upstream scan-build-py creates an argument vector, as opposed to a
        # command string. This argument vector contains the define as it's
        # element in the following format:
        # -DVARIABLE=\"some value\"
        # and the target triplet, e.g.:
        # --target=x86_64-linux-gnu
        #
        # The define is passed to the analyzer properly.

        build_action = log_parser.parse_log(load_json_or_empty(logfile))[0]

        self.assertEqual(build_action.source, r'/tmp/a.cpp')
        self.assertEqual(len(build_action.analyzer_options), 1)
        self.assertTrue(len(build_action.target) > 0)
        self.assertEqual(build_action.analyzer_options[0],
                         r'-DVARIABLE="\"some value"\"')

        # Test source file with spaces.
        logfile = os.path.join(self.__test_files, "intercept-new-space.json")

        build_action = log_parser.parse_log(load_json_or_empty(logfile))[0]

        self.assertEqual(build_action.source, r'/tmp/a\ b.cpp')
        self.assertEqual(build_action.lang, 'c++')
コード例 #2
0
    def test_old_intercept_build(self):
        """
        Test log file parsing escape behaviour with clang-5.0 intercept-build.
        """
        logfile = os.path.join(self.__test_files, "intercept-old.json")

        # Scan-build-py shipping with clang-5.0 makes a logfile that contains:
        # -DVARIABLE=\"some value\" and --target=x86_64-linux-gnu
        #
        # The define is passed to the analyzer properly.

        build_action = log_parser.parse_log(load_json_or_empty(logfile))[0]

        self.assertEqual(build_action.source, r'/tmp/a.cpp')
        self.assertEqual(len(build_action.analyzer_options), 1)
        self.assertTrue(len(build_action.target) > 0)
        self.assertEqual(build_action.analyzer_options[0],
                         r'-DVARIABLE="\"some value"\"')

        # Test source file with spaces.
        logfile = os.path.join(self.__test_files, "intercept-old-space.json")

        build_action = log_parser.parse_log(load_json_or_empty(logfile))[0]

        self.assertEqual(build_action.source, r'/tmp/a\ b.cpp')
        self.assertEqual(build_action.lang, 'c++')
コード例 #3
0
    def test_new_ldlogger(self):
        """
        Test log file parsing escape behaviour with after-#631 LD-LOGGER.
        """
        logfile = os.path.join(self.__test_files, "ldlogger-new.json")

        # LD-LOGGERS after http://github.com/Ericsson/codechecker/pull/631
        # now properly log the multiword arguments. When these are parsed by
        # the log_parser, the define's value will be passed to the analyzer.
        #
        # Logfile contains -DVARIABLE="some value"
        # and --target=x86_64-linux-gnu.

        build_action = log_parser.parse_log(load_json_or_empty(logfile))[0]

        self.assertEqual(build_action.source, r'/tmp/a.cpp')
        self.assertEqual(len(build_action.analyzer_options), 1)
        self.assertTrue(len(build_action.target) > 0)
        self.assertEqual(build_action.analyzer_options[0],
                         r'-DVARIABLE="\"some value"\"')

        # Test source file with spaces.
        logfile = os.path.join(self.__test_files, "ldlogger-new-space.json")

        build_action = log_parser.parse_log(load_json_or_empty(logfile))[0]

        self.assertEqual(build_action.source, r'/tmp/a\ b.cpp')
        self.assertEqual(build_action.lang, 'c++')
コード例 #4
0
def get_context():
    LOG.debug('Loading package config.')

    package_root = os.environ['CC_PACKAGE_ROOT']

    pckg_config_file = os.path.join(package_root, "config", "config.json")
    LOG.debug('Reading config: ' + pckg_config_file)
    cfg_dict = load_json_or_empty(pckg_config_file)

    if not cfg_dict:
        sys.exit(1)

    LOG.debug(cfg_dict)

    LOG.debug('Loading layout config.')

    layout_cfg_file = os.path.join(package_root, "config",
                                   "package_layout.json")
    LOG.debug(layout_cfg_file)
    lcfg_dict = load_json_or_empty(layout_cfg_file)

    if not lcfg_dict:
        sys.exit(1)

    # Merge static and runtime layout.
    layout_config = lcfg_dict['static'].copy()
    layout_config.update(lcfg_dict['runtime'])

    LOG.debug(layout_config)

    try:
        return Context(package_root, layout_config, cfg_dict)
    except KeyError as kerr:
        LOG.error(kerr)
        sys.exit(1)
コード例 #5
0
    def __init__(self, package_root, pckg_layout, cfg_dict):
        env_vars = cfg_dict['environment_variables']
        self.__checker_config = cfg_dict['checker_config']
        self.__available_profiles = cfg_dict['available_profiles']

        # Get the common environment variables.
        self.pckg_layout = pckg_layout
        self.env_vars = env_vars

        self._package_root = package_root
        self._severity_map = SeverityMap(
            load_json_or_empty(self.checkers_severity_map_file, {}))
        self.__package_version = None
        self.__package_build_date = None
        self.__package_git_hash = None
        self.__analyzers = {}

        self.logger_bin = None
        self.logger_file = None
        self.logger_compilers = None

        # Get package specific environment variables.
        self.set_env(env_vars)

        self.__set_version()
        self.__populate_analyzers()
コード例 #6
0
    def __set_version(self):
        """
        Get the package version from the version config file.
        """
        vfile_data = load_json_or_empty(self.version_file)

        if not vfile_data:
            sys.exit(1)

        package_version = vfile_data['version']
        package_build_date = vfile_data['package_build_date']
        package_git_hash = vfile_data.get('git_hash')
        package_git_tag = vfile_data.get('git_describe', {}).get('tag')
        package_git_dirtytag = vfile_data.get('git_describe', {}).get('dirty')

        self.__package_version = package_version['major'] + '.' + \
            package_version['minor'] + '.' + \
            package_version['revision']

        self.__package_build_date = package_build_date
        self.__package_git_hash = package_git_hash

        self.__package_git_tag = package_git_tag
        if (LOG.getEffectiveLevel() == logger.DEBUG or
                LOG.getEffectiveLevel() ==
                logger.DEBUG_ANALYZER):
            self.__package_git_tag = package_git_dirtytag
コード例 #7
0
    def __set_version(self):
        """
        Get the package version from the version config file.
        """
        vfile_data = load_json_or_empty(self.version_file)

        if not vfile_data:
            sys.exit(1)

        package_version = vfile_data['version']
        package_build_date = vfile_data['package_build_date']
        package_git_hash = vfile_data['git_hash']
        package_git_tag = vfile_data['git_describe']['tag']
        package_git_dirtytag = vfile_data['git_describe']['dirty']
        product_database_version = vfile_data['product_db_version']
        run_database_version = vfile_data['run_db_version']

        self.__package_version = package_version['major'] + '.' + \
            package_version['minor'] + '.' + \
            package_version['revision']
        self.__product_db_version_info = db_version.DBVersionInfo(
            product_database_version['major'],
            product_database_version['minor'])
        self.__run_db_version_info = db_version.DBVersionInfo(
            run_database_version['major'], run_database_version['minor'])

        self.__package_build_date = package_build_date
        self.__package_git_hash = package_git_hash

        self.__package_git_tag = package_git_tag
        if (LOG.getEffectiveLevel() == logger.DEBUG
                or LOG.getEffectiveLevel() == logger.DEBUG_ANALYZER):
            self.__package_git_tag = package_git_dirtytag
コード例 #8
0
def enable_auth(workspace):
    """
    Create a dummy authentication-enabled configuration and
    an auth-enabled server.

    Running the tests only work if the initial value (in package
    server_config.json) is FALSE for authentication.enabled.
    """

    server_config_filename = "server_config.json"

    cc_package = codechecker_package()
    original_auth_cfg = os.path.join(cc_package, 'config',
                                     server_config_filename)

    shutil.copy(original_auth_cfg, workspace)

    server_cfg_file = os.path.join(workspace, server_config_filename)

    scfg_dict = util.load_json_or_empty(server_cfg_file, {})
    scfg_dict["authentication"]["enabled"] = True
    scfg_dict["authentication"]["method_dictionary"]["enabled"] = True
    scfg_dict["authentication"]["method_dictionary"]["auths"] = \
        ["cc:test", "john:doe", "admin:admin123"]

    with open(server_cfg_file, 'w') as scfg:
        json.dump(scfg_dict, scfg, indent=2, sort_keys=True)

    # Create a root user.
    root_file = os.path.join(workspace, 'root.user')
    with open(root_file, 'w') as rootf:
        rootf.write(sha256("root:root").hexdigest())
    os.chmod(root_file, stat.S_IRUSR | stat.S_IWUSR)
コード例 #9
0
def metadata_info(metadata_file):
    check_commands = []
    check_durations = []
    cc_version = None
    analyzer_statistics = {}

    if not os.path.isfile(metadata_file):
        return check_commands, check_durations, cc_version, analyzer_statistics

    metadata_dict = load_json_or_empty(metadata_file, {})

    if 'command' in metadata_dict:
        check_commands.append(metadata_dict['command'])
    if 'timestamps' in metadata_dict:
        check_durations.append(
            float(metadata_dict['timestamps']['end'] -
                  metadata_dict['timestamps']['begin']))

    # Get CodeChecker version.
    cc_version = metadata_dict.get('versions', {}).get('codechecker')

    # Get analyzer statistics.
    analyzer_statistics = metadata_dict.get('analyzer_statistics', {})

    return check_commands, check_durations, cc_version, analyzer_statistics
コード例 #10
0
def load_compiler_info(filename, compiler, attr):
    data = load_json_or_empty(filename, {})
    value = data.get(compiler)
    if value is None:
        LOG.error("Could not find compiler %s in file %s" %
                  (compiler, filename))
    return value.get(attr) if isinstance(value, dict) else value
コード例 #11
0
def get_instances(folder=None):
    """Returns the list of running servers for the current user."""

    # This method does NOT write the descriptor file.

    descriptor = __get_instance_descriptor_path(folder)
    instances = load_json_or_empty(descriptor, {}, lock=True)

    return [i for i in instances if __check_instance(i['hostname'], i['pid'])]
コード例 #12
0
def dump_compiler_info(filename, compiler, attr, data):
    all_data = dict()
    if os.path.exists(filename):
        all_data = load_json_or_empty(filename)
    if compiler not in all_data:
        all_data[compiler] = dict()
    all_data[compiler].update({attr: data})
    with open(filename, 'w') as f:
        json.dump(all_data, f)
コード例 #13
0
    def test_include_rel_to_abs(self):
        """
        Test working directory prepending to relative include paths.
        """
        logfile = os.path.join(self.__test_files, "include.json")

        build_action = log_parser.parse_log(load_json_or_empty(logfile))[0]

        self.assertEqual(len(build_action.analyzer_options), 4)
        self.assertEqual(build_action.analyzer_options[0], '-I')
        self.assertEqual(build_action.analyzer_options[1], '/include')
        self.assertEqual(build_action.analyzer_options[2], '-I/include')
        self.assertEqual(build_action.analyzer_options[3], '-I/tmp')
コード例 #14
0
    def __init__(self):
        LOG.debug("Loading clientside session config.")

        # Check whether user's configuration exists.
        user_home = os.path.expanduser("~")
        session_cfg_file = os.path.join(user_home,
                                        ".codechecker.passwords.json")
        LOG.debug(session_cfg_file)

        scfg_dict = {}
        if os.path.exists(session_cfg_file):
            scfg_dict = load_json_or_empty(session_cfg_file, {},
                                           "user authentication")
        if os.path.exists(session_cfg_file):
            check_file_owner_rw(session_cfg_file)

        if not scfg_dict.get('credentials'):
            scfg_dict['credentials'] = {}

        self.__save = scfg_dict
        self.__autologin = scfg_dict.get('client_autologin', True)

        # Check and load token storage for user.
        self.token_file = os.path.join(user_home, ".codechecker.session.json")
        LOG.debug(self.token_file)

        if os.path.exists(self.token_file):
            token_dict = load_json_or_empty(self.token_file, {},
                                            "user authentication")
            check_file_owner_rw(self.token_file)

            self.__tokens = token_dict.get('tokens')
        else:
            with open(self.token_file, 'w') as f:
                json.dump({'tokens': {}}, f)
            os.chmod(self.token_file, stat.S_IRUSR | stat.S_IWUSR)

            self.__tokens = {}
コード例 #15
0
    def __init__(self, package_root, pckg_layout, cfg_dict):
        # Get the common environment variables.
        self.pckg_layout = pckg_layout

        self._package_root = package_root
        self._severity_map = SeverityMap(
            load_json_or_empty(self.checkers_severity_map_file, {}))
        self.__package_version = None
        self.__product_db_version_info = None
        self.__run_db_version_info = None
        self.__package_build_date = None
        self.__package_git_hash = None

        self.__set_version()
コード例 #16
0
ファイル: store_handler.py プロジェクト: redNixon/codechecker
def metadata_info(metadata_file):
    check_commands = []
    check_durations = []

    if not os.path.isfile(metadata_file):
        return check_commands, check_durations

    metadata_dict = load_json_or_empty(metadata_file, {})

    if 'command' in metadata_dict:
        check_commands.append(metadata_dict['command'])
    if 'timestamps' in metadata_dict:
        check_durations.append(
            float(metadata_dict['timestamps']['end'] -
                  metadata_dict['timestamps']['begin']))
    return check_commands, check_durations
コード例 #17
0
 def __get_config_dict(self):
     """
     Get server config information from the configuration file. Raise
     ValueError if the configuration file is invalid.
     """
     LOG.debug(self.__configuration_file)
     cfg_dict = load_json_or_empty(self.__configuration_file, {},
                                   'server configuration')
     if cfg_dict != {}:
         check_file_owner_rw(self.__configuration_file)
     else:
         # If the configuration dict is empty, it means a JSON couldn't
         # have been parsed from it.
         raise ValueError("Server configuration file was invalid, or "
                          "empty.")
     return cfg_dict
コード例 #18
0
    def set_env(self, env_vars):
        """
        Get the environment variables.
        """
        self._package_root = os.environ.get(env_vars['env_package_root'])

        self.codechecker_workspace = os.environ.get('codechecker_workspace')

        self._severity_map = SeverityMap(
            load_json_or_empty(self.checkers_severity_map_file, {}))

        # Get generic package specific environment variables.
        self.logger_bin = os.environ.get(env_vars['cc_logger_bin'])
        self.logger_file = os.environ.get(env_vars['cc_logger_file'])
        self.logger_compilers = os.environ.get(env_vars['cc_logger_compiles'])
        self.ld_preload = os.environ.get(env_vars['ld_preload'])
        self.ld_lib_path = env_vars['env_ld_lib_path']
コード例 #19
0
ファイル: store.py プロジェクト: jarv-git/codechecker
def __get_run_name(input_list):
    """Create a runname for the stored analysis from the input list."""

    # Try to create a name from the metada JSON(s).
    names = []
    for input_path in input_list:
        metafile = os.path.join(input_path, "metadata.json")
        if os.path.isdir(input_path) and os.path.exists(metafile):
            metajson = util.load_json_or_empty(metafile)

            if 'name' in metajson:
                names.append(metajson['name'])
            else:
                names.append("unnamed result folder")

    if len(names) == 1 and names[0] != "unnamed result folder":
        return names[0]
    elif len(names) > 1:
        return "multiple projects: " + ', '.join(names)
    else:
        return False
コード例 #20
0
def parse_log(logfilepath, parseLogOptions):
    """
    logfilepath: the compile command json file which should be parsed.
    """
    LOG.debug('Parsing log file: ' + logfilepath)

    try:
        data = load_json_or_empty(logfilepath, {})
        actions = parse_compile_commands_json(data, parseLogOptions)
    except (ValueError, KeyError, TypeError) as ex:
        if os.stat(logfilepath).st_size == 0:
            LOG.error('The compile database is empty.')
        else:
            LOG.error('The compile database is not valid.')
        LOG.debug(traceback.format_exc())
        LOG.debug(ex)
        sys.exit(1)

    LOG.debug('Parsing log file done.')

    return actions
コード例 #21
0
ファイル: env.py プロジェクト: redNixon/codechecker
def enable_storage_of_analysis_statistics(workspace):
    """
    Enables storing analysis statistics information for the server.
    """

    server_config_filename = "server_config.json"

    cc_package = codechecker_package()
    original_auth_cfg = os.path.join(cc_package, 'config',
                                     server_config_filename)

    shutil.copy(original_auth_cfg, workspace)

    server_cfg_file = os.path.join(workspace, server_config_filename)

    scfg_dict = util.load_json_or_empty(server_cfg_file, {})
    scfg_dict["store"]["analysis_statistics_dir"] = \
        os.path.join(workspace, 'analysis_statistics')

    with open(server_cfg_file, 'w') as scfg:
        json.dump(scfg_dict, scfg, indent=2, sort_keys=True)
コード例 #22
0
    def test_old_ldlogger(self):
        """
        Test log file parsing escape behaviour with pre-2017 Q2 LD-LOGGER.
        """
        logfile = os.path.join(self.__test_files, "ldlogger-old.json")

        # LD-LOGGER before http://github.com/Ericsson/codechecker/pull/631
        # used an escape mechanism that, when parsed by the log parser via
        # shlex, made CodeChecker parse arguments with multiword string
        # literals in them be considered as "file" (instead of compile option),
        # eventually ignored by the command builder, thus lessening analysis
        # accuracy, as defines were lost.
        #
        # Logfile contains "-DVARIABLE="some value"".
        #
        # There is no good way to back-and-forth convert in log_parser or
        # option_parser, so here we aim for a non-failing stalemate of the
        # define being considered a file and ignored, for now.

        build_action = log_parser.parse_log(load_json_or_empty(logfile))[0]

        self.assertEqual(build_action.source, r'/tmp/a.cpp')
        self.assertEqual(len(build_action.analyzer_options), 0)
コード例 #23
0
ファイル: server.py プロジェクト: jarv-git/codechecker
def server_init_start(args):
    """
    Start or manage a CodeChecker report server.
    """

    if 'list' in args or 'stop' in args or 'stop_all' in args:
        __instance_management(args)
        sys.exit(0)

    if 'reload' in args:
        __reload_config(args)
        sys.exit(0)

    # Actual server starting from this point.
    if not host_check.check_zlib():
        raise Exception("zlib is not available on the system!")

    # WARNING
    # In case of SQLite args.dbaddress default value is used
    # for which the is_localhost should return true.
    if util.is_localhost(args.dbaddress) and \
            not os.path.exists(args.config_directory):
        os.makedirs(args.config_directory)

    # Make sure the SQLite file can be created if it not exists.
    if 'sqlite' in args and \
            not os.path.isdir(os.path.dirname(args.sqlite)):
        os.makedirs(os.path.dirname(args.sqlite))

    if 'reset_root' in args:
        try:
            os.remove(os.path.join(args.config_directory, 'root.user'))
            LOG.info("Master superuser (root) credentials invalidated and "
                     "deleted. New ones will be generated...")
        except OSError:
            # File doesn't exist.
            pass

    if 'force_auth' in args:
        LOG.info("'--force-authentication' was passed as a command-line "
                 "option. The server will ask for users to authenticate!")

    context = package_context.get_context()
    context.codechecker_workspace = args.config_directory
    context.db_username = args.dbusername

    check_env = analyzer_env.get_check_env(context.path_env_extra,
                                           context.ld_lib_path_extra)

    cfg_sql_server = database.SQLServer.from_cmdline_args(
        vars(args), CONFIG_META, context.config_migration_root,
        interactive=True, env=check_env)

    LOG.info("Checking configuration database ...")
    db_status = cfg_sql_server.connect()
    db_status_msg = database_status.db_status_msg.get(db_status)
    LOG.info(db_status_msg)

    if db_status == DBStatus.SCHEMA_MISSING:
        LOG.debug("Config database schema is missing, initializing new.")
        db_status = cfg_sql_server.connect(init=True)
        if db_status != DBStatus.OK:
            LOG.error("Config database initialization failed!")
            LOG.error("Please check debug logs.")
            sys.exit(1)

    if db_status == DBStatus.SCHEMA_MISMATCH_NO:
        LOG.debug("Configuration database schema mismatch.")
        LOG.debug("No schema upgrade is possible.")
        sys.exit(1)

    force_upgrade = True if 'force_upgrade' in args else False

    if db_status == DBStatus.SCHEMA_MISMATCH_OK:
        LOG.debug("Configuration database schema mismatch.")
        LOG.debug("Schema upgrade is possible.")
        LOG.warning("Please note after migration only "
                    "newer CodeChecker versions can be used"
                    "to start the server")
        LOG.warning("It is advised to make a full backup of your "
                    "configuration database")

        LOG.warning(cfg_sql_server.get_db_location())

        question = 'Do you want to upgrade to the new schema?' \
                   ' Y(es)/n(o) '
        if force_upgrade or util.get_user_input(question):
            print("Upgrading schema ...")
            ret = cfg_sql_server.upgrade()
            msg = database_status.db_status_msg.get(
                ret, 'Unknown database status')
            print(msg)
            if ret != DBStatus.OK:
                LOG.error("Schema migration failed")
                sys.exit(ret)
        else:
            LOG.info("No schema migration was done.")
            sys.exit(0)

    if db_status == DBStatus.MISSING:
        LOG.error("Missing configuration database.")
        LOG.error("Server can not be started.")
        sys.exit(1)

    # Configuration database setup and check is needed before database
    # statuses can be checked.
    try:
        if args.status:
            ret = __db_status_check(cfg_sql_server, context, args.status)
            sys.exit(ret)
    except AttributeError:
        LOG.debug('Status was not in the arguments.')

    try:
        if args.product_to_upgrade:
            ret = __db_migration(cfg_sql_server, context,
                                 args.product_to_upgrade, force_upgrade)
            sys.exit(ret)
    except AttributeError:
        LOG.debug('Product upgrade was not in the arguments.')

    # Create the main database link from the arguments passed over the
    # command line.
    cfg_dir = os.path.abspath(args.config_directory)
    default_product_path = os.path.join(cfg_dir, 'Default.sqlite')
    create_default_product = 'sqlite' in args and \
                             not os.path.exists(default_product_path)

    if create_default_product:
        # Create a default product and add it to the configuration database.

        LOG.debug("Create default product...")
        LOG.debug("Configuring schema and migration...")

        prod_server = database.SQLiteDatabase(
            default_product_path, RUN_META,
            context.run_migration_root, check_env)

        LOG.debug("Checking 'Default' product database.")
        db_status = prod_server.connect()
        if db_status != DBStatus.MISSING:
            db_status = prod_server.connect(init=True)
            LOG.debug(database_status.db_status_msg.get(db_status))
            if db_status != DBStatus.OK:
                LOG.error("Failed to configure default product")
                sys.exit(1)

        product_conn_string = prod_server.get_connection_string()

        server.add_initial_run_database(
            cfg_sql_server, product_conn_string)

        LOG.info("Product 'Default' at '{0}' created and set up."
                 .format(default_product_path))

    prod_statuses = check_product_db_status(cfg_sql_server, context)

    upgrade_available = {}
    for k, v in prod_statuses.items():
        db_status, _, _, _ = v
        if db_status == DBStatus.SCHEMA_MISMATCH_OK or \
                db_status == DBStatus.SCHEMA_MISSING:
            upgrade_available[k] = v

    if upgrade_available:
        print_prod_status(prod_statuses)
        LOG.warning("Multiple products can be upgraded, make a backup!")
        __db_migration(cfg_sql_server, context, 'all', force_upgrade)

    prod_statuses = check_product_db_status(cfg_sql_server, context)
    print_prod_status(prod_statuses)

    non_ok_db = False
    for k, v in prod_statuses.items():
        db_status, _, _, _ = v
        if db_status != DBStatus.OK:
            non_ok_db = True
        break

    if non_ok_db:
        msg = "There are some database issues. " \
              "Do you want to start the " \
              "server? Y(es)/n(o) "
        if not util.get_user_input(msg):
            sys.exit(1)

    # Start database viewer.
    checker_md_docs = os.path.join(context.doc_root, 'checker_md_docs')
    checker_md_docs_map = os.path.join(checker_md_docs,
                                       'checker_doc_map.json')
    checker_md_docs_map = util.load_json_or_empty(checker_md_docs_map, {})

    package_data = {'www_root': context.www_root,
                    'doc_root': context.doc_root,
                    'checker_md_docs': checker_md_docs,
                    'checker_md_docs_map': checker_md_docs_map,
                    'version': context.package_git_tag}

    suppr_handler = suppress_handler. \
        GenericSuppressHandler(None, False)

    try:
        server.start_server(args.config_directory,
                            package_data,
                            args.view_port,
                            cfg_sql_server,
                            suppr_handler,
                            args.listen_address,
                            'force_auth' in args,
                            args.skip_db_cleanup,
                            context,
                            check_env)
    except socket.error as err:
        if err.errno == errno.EADDRINUSE:
            LOG.error("Server can't be started, maybe the given port number "
                      "({}) is already used. Check the connection "
                      "parameters.".format(args.view_port))
            sys.exit(1)
        else:
            raise
コード例 #24
0
ファイル: parse.py プロジェクト: kirr/codechecker
def main(args):
    """
    Entry point for parsing some analysis results and printing them to the
    stdout in a human-readable format.
    """

    logger.setup_logger(args.verbose if 'verbose' in args else None)

    context = package_context.get_context()

    # To ensure the help message prints the default folder properly,
    # the 'default' for 'args.input' is a string, not a list.
    # But we need lists for the foreach here to work.
    if isinstance(args.input, str):
        args.input = [args.input]

    original_cwd = os.getcwd()

    suppr_handler = None
    if 'suppress' in args:
        __make_handler = False
        if not os.path.isfile(args.suppress):
            if 'create_suppress' in args:
                with open(args.suppress, 'w') as _:
                    # Just create the file.
                    __make_handler = True
                    LOG.info("Will write source-code suppressions to "
                             "suppress file.")
            else:
                LOG.warning("Suppress file '" + args.suppress + "' given, but "
                            "it does not exist -- will not suppress anything.")
        else:
            __make_handler = True

        if __make_handler:
            suppr_handler = suppress_handler.\
                GenericSuppressHandler(args.suppress,
                                       'create_suppress' in args)
    elif 'create_suppress' in args:
        LOG.error("Can't use '--export-source-suppress' unless '--suppress "
                  "SUPPRESS_FILE' is also given.")
        sys.exit(2)

    processed_path_hashes = set()

    def skip_html_report_data_handler(report_hash, source_file, report_line,
                                      checker_name, diag, files):
        """
        Report handler which skips bugs which were suppressed by source code
        comments.
        """
        report = Report(None, diag['path'], files)
        path_hash = get_report_path_hash(report, files)
        if path_hash in processed_path_hashes:
            LOG.debug("Skip report because it is a deduplication of an "
                      "already processed report!")
            LOG.debug("Path hash: %s", path_hash)
            LOG.debug(diag)
            return True

        skip = plist_parser.skip_report(report_hash, source_file, report_line,
                                        checker_name, suppr_handler)
        if not skip:
            processed_path_hashes.add(path_hash)

        return skip

    skip_handler = None
    if 'skipfile' in args:
        with open(args.skipfile, 'r') as skip_file:
            skip_handler = SkipListHandler(skip_file.read())

    html_builder = None

    for input_path in args.input:

        input_path = os.path.abspath(input_path)
        os.chdir(original_cwd)
        LOG.debug("Parsing input argument: '" + input_path + "'")

        export = args.export if 'export' in args else None
        if export is not None and export == 'html':
            output_path = os.path.abspath(args.output_path)

            if not html_builder:
                html_builder = \
                    PlistToHtml.HtmlBuilder(context.path_plist_to_html_dist,
                                            context.severity_map)

            LOG.info("Generating html output files:")
            PlistToHtml.parse(input_path, output_path,
                              context.path_plist_to_html_dist,
                              skip_html_report_data_handler, html_builder)
            continue

        files = []
        metadata_dict = {}
        if os.path.isfile(input_path):
            files.append(input_path)

        elif os.path.isdir(input_path):
            metadata_file = os.path.join(input_path, "metadata.json")
            if os.path.exists(metadata_file):
                metadata_dict = util.load_json_or_empty(metadata_file)
                LOG.debug(metadata_dict)

                if 'working_directory' in metadata_dict:
                    working_dir = metadata_dict['working_directory']
                    try:
                        os.chdir(working_dir)
                    except OSError as oerr:
                        LOG.debug(oerr)
                        LOG.error(
                            "Working directory %s is missing.\n"
                            "Can not parse reports safely.", working_dir)
                        sys.exit(1)

            _, _, file_names = next(os.walk(input_path), ([], [], []))
            files = [
                os.path.join(input_path, file_name) for file_name in file_names
            ]

        file_change = set()
        file_report_map = defaultdict(list)

        rh = plist_parser.PlistToPlaintextFormatter(suppr_handler,
                                                    skip_handler,
                                                    context.severity_map,
                                                    processed_path_hashes)
        rh.print_steps = 'print_steps' in args

        for file_path in files:
            f_change = parse(file_path, metadata_dict, rh, file_report_map)
            file_change = file_change.union(f_change)

        report_stats = rh.write(file_report_map)
        severity_stats = report_stats.get('severity')
        file_stats = report_stats.get('files')
        reports_stats = report_stats.get('reports')

        print("\n----==== Summary ====----")
        if file_stats:
            vals = [[os.path.basename(k), v]
                    for k, v in dict(file_stats).items()]
            keys = ['Filename', 'Report count']
            table = twodim_to_str('table', keys, vals, 1, True)
            print(table)

        if severity_stats:
            vals = [[k, v] for k, v in dict(severity_stats).items()]
            keys = ['Severity', 'Report count']
            table = twodim_to_str('table', keys, vals, 1, True)
            print(table)

        report_count = reports_stats.get("report_count", 0)
        print("----=================----")
        print("Total number of reports: {}".format(report_count))
        print("----=================----")

        if file_change:
            changed_files = '\n'.join([' - ' + f for f in file_change])
            LOG.warning("The following source file contents changed since the "
                        "latest analysis:\n{0}\nMultiple reports were not "
                        "shown and skipped from the statistics. Please "
                        "analyze your project again to update the "
                        "reports!".format(changed_files))

    os.chdir(original_cwd)

    # Create index.html for the generated html files.
    if html_builder:
        html_builder.create_index_html(args.output_path)
コード例 #25
0
    def __init__(self,
                 configuration_file,
                 session_salt,
                 root_sha,
                 force_auth=False):
        """
        Initialise a new Session Manager on the server.

        :param configuration_file: The configuration file to read
            authentication backends from.
        :param session_salt: An initial salt that will be used in hashing
            the session to the database.
        :param root_sha: The SHA-256 hash of the root user's authentication.
        :param force_auth: If True, the manager will be enabled even if the
            configuration file disables authentication.
        """
        self.__database_connection = None
        self.__logins_since_prune = 0
        self.__sessions = []
        self.__session_salt = hashlib.sha1(session_salt).hexdigest()

        LOG.debug(configuration_file)
        scfg_dict = load_json_or_empty(configuration_file, {},
                                       'server configuration')
        if scfg_dict != {}:
            check_file_owner_rw(configuration_file)
        else:
            # If the configuration dict is empty, it means a JSON couldn't
            # have been parsed from it.
            raise ValueError("Server configuration file was invalid, or "
                             "empty.")

        # FIXME: Refactor this. This is irrelevant to authentication config,
        # so it should NOT be handled by session_manager. A separate config
        # handler for the server's stuff should be created, that can properly
        # instantiate SessionManager with the found configuration.
        self.__max_run_count = scfg_dict['max_run_count'] \
            if 'max_run_count' in scfg_dict else None

        self.__auth_config = scfg_dict['authentication']

        if force_auth:
            LOG.debug("Authentication was force-enabled.")
            self.__auth_config['enabled'] = True

        if 'soft_expire' in self.__auth_config:
            LOG.debug("Found deprecated argument 'soft_expire' in "
                      "server_config.authentication.")

        self.__refresh_time = self.__auth_config['refresh_time'] \
            if 'refresh_time' in self.__auth_config else None

        # Save the root SHA into the configuration (but only in memory!)
        self.__auth_config['method_root'] = root_sha

        # If no methods are configured as enabled, disable authentication.
        if scfg_dict['authentication'].get('enabled'):
            found_auth_method = False

            if 'method_dictionary' in self.__auth_config and \
                    self.__auth_config['method_dictionary'].get('enabled'):
                found_auth_method = True

            if 'method_ldap' in self.__auth_config and \
                    self.__auth_config['method_ldap'].get('enabled'):
                if 'ldap' not in UNSUPPORTED_METHODS:
                    found_auth_method = True
                else:
                    LOG.warning("LDAP authentication was enabled but "
                                "prerequisites are NOT installed on the system"
                                "... Disabling LDAP authentication.")
                    self.__auth_config['method_ldap']['enabled'] = False

            if 'method_pam' in self.__auth_config and \
                    self.__auth_config['method_pam'].get('enabled'):
                if 'pam' not in UNSUPPORTED_METHODS:
                    found_auth_method = True
                else:
                    LOG.warning("PAM authentication was enabled but "
                                "prerequisites are NOT installed on the system"
                                "... Disabling PAM authentication.")
                    self.__auth_config['method_pam']['enabled'] = False

            if not found_auth_method:
                if force_auth:
                    LOG.warning("Authentication was manually enabled, but no "
                                "valid authentication backends are "
                                "configured... The server will only allow "
                                "the master superuser (root) access.")
                else:
                    LOG.warning("Authentication is enabled but no valid "
                                "authentication backends are configured... "
                                "Falling back to no authentication.")
                    self.__auth_config['enabled'] = False
コード例 #26
0
def main(args):
    """
    Perform analysis on the given logfiles and store the results in a machine-
    readable format.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    if len(args.logfile) != 1:
        LOG.warning("Only one log file can be processed right now!")
        sys.exit(1)

    args.output_path = os.path.abspath(args.output_path)
    if os.path.exists(args.output_path) and \
            not os.path.isdir(args.output_path):
        LOG.error("The given output path is not a directory: " +
                  args.output_path)
        sys.exit(1)

    if 'enable_all' in args:
        LOG.info("'--enable-all' was supplied for this analysis.")

    # We clear the output directory in the following cases.
    ctu_dir = os.path.join(args.output_path, 'ctu-dir')
    if 'ctu_phases' in args and args.ctu_phases[0] and \
            os.path.isdir(ctu_dir):
        # Clear the CTU-dir if the user turned on the collection phase.
        LOG.debug("Previous CTU contents have been deleted.")
        shutil.rmtree(ctu_dir)

    if 'clean' in args and os.path.isdir(args.output_path):
        LOG.info("Previous analysis results in '{0}' have been removed, "
                 "overwriting with current result".format(args.output_path))
        shutil.rmtree(args.output_path)

    if not os.path.exists(args.output_path):
        os.makedirs(args.output_path)

    LOG.debug("args: " + str(args))
    LOG.debug("Output will be stored to: '" + args.output_path + "'")

    # Process the skip list if present.
    skip_handler = __get_skip_handler(args)

    # Parse the JSON CCDBs and retrieve the compile commands.
    actions = []
    for log_file in args.logfile:
        if not os.path.exists(log_file):
            LOG.error("The specified logfile '" + log_file + "' does not "
                      "exist!")
            continue

        parseLogOptions = ParseLogOptions(args)
        actions += log_parser.parse_log(log_file, parseLogOptions,
                                        skip_handler)
    if len(actions) == 0:
        LOG.info("None of the specified build log files contained "
                 "valid compilation commands. No analysis needed...")
        sys.exit(1)

    context = package_context.get_context()
    metadata = {
        'action_num': len(actions),
        'command': sys.argv,
        'versions': {
            'codechecker':
            "{0} ({1})".format(context.package_git_tag,
                               context.package_git_hash)
        },
        'working_directory': os.getcwd(),
        'output_path': args.output_path,
        'result_source_files': {}
    }

    if 'name' in args:
        metadata['name'] = args.name

    # Update metadata dictionary with old values.
    metadata_file = os.path.join(args.output_path, 'metadata.json')
    if os.path.exists(metadata_file):
        metadata_prev = load_json_or_empty(metadata_file)
        metadata['result_source_files'] = \
            metadata_prev['result_source_files']

    analyzer.perform_analysis(args, skip_handler, context, actions, metadata)

    __update_skip_file(args)

    LOG.debug("Analysis metadata write to '" + metadata_file + "'")
    with open(metadata_file, 'w') as metafile:
        json.dump(metadata, metafile)

    # WARN: store command will search for this file!!!!
    compile_cmd_json = os.path.join(args.output_path, 'compile_cmd.json')
    try:
        source = os.path.abspath(args.logfile[0])
        target = os.path.abspath(compile_cmd_json)

        if source != target:
            shutil.copyfile(source, target)
    except shutil.Error:
        LOG.debug("Compilation database JSON file is the same.")
    except Exception:
        LOG.debug("Copying compilation database JSON file failed.")