Esempio n. 1
0
def __build_clang_tidy_config_handler(args, context):
    """
    Build the config handler for clang tidy analyzer.
    Handle config options from the command line and config files.
    """

    config_handler = config_handler_clang_tidy.ClangTidyConfigHandler()
    config_handler.analyzer_binary = context.analyzer_binaries.get(CLANG_TIDY)

    # FIXME We cannot get the resource dir from the clang-tidy binary,
    # therefore now we get a clang binary which is a sibling of the clang-tidy.
    # TODO Support "clang-tidy -print-resource-dir" .
    check_env = analyzer_env.get_check_env(context.path_env_extra,
                                           context.ld_lib_path_extra)
    # Overwrite PATH to contain only the parent of the clang binary.
    if os.path.isabs(config_handler.analyzer_binary):
        check_env['PATH'] = os.path.dirname(config_handler.analyzer_binary)
    clang_bin = analyzer_clangsa.ClangSA.resolve_missing_binary(
        'clang', check_env)
    if os.path.isfile(clang_bin):
        config_handler.compiler_resource_dir =\
            __get_compiler_resource_dir(context, clang_bin)
    else:
        config_handler.compiler_resource_dir =\
            __get_compiler_resource_dir(context,
                                        config_handler.analyzer_binary)

    try:
        with open(args.tidy_args_cfg_file, 'rb') as tidy_cfg:
            config_handler.analyzer_extra_arguments = \
                re.sub(r'\$\((.*?)\)', __replace_env_var,
                       tidy_cfg.read().strip())
    except IOError as ioerr:
        LOG.debug_analyzer(ioerr)
    except AttributeError as aerr:
        # No clang tidy arguments file was given in the command line.
        LOG.debug_analyzer(aerr)

    analyzer = construct_analyzer_type(CLANG_TIDY, config_handler, None)
    check_env = analyzer_env.get_check_env(context.path_env_extra,
                                           context.ld_lib_path_extra)

    checkers = analyzer.get_analyzer_checkers(config_handler, check_env)

    # Read clang-tidy checkers from the config file.
    clang_tidy_checkers = context.checker_config.get(CLANG_TIDY + '_checkers')

    try:
        cmdline_checkers = args.ordered_checkers
    except AttributeError:
        LOG.debug_analyzer('No checkers were defined in '
                           'the command line for ' + CLANG_TIDY)
        cmdline_checkers = None

    initialize_checkers(config_handler, context.available_profiles,
                        context.package_root, checkers, clang_tidy_checkers,
                        cmdline_checkers, 'enable_all' in args
                        and args.enable_all)

    return config_handler
Esempio n. 2
0
def collect_build_action(params):
    """ Preprocess sources by generating all data needed by CTU analysis. """

    action, context, analyzer_config_map, skip_handler, \
        ctu_temp_fnmap_folder = params

    try:
        for source in action.sources:
            if skip_handler and skip_handler.should_skip(source):
                continue
            if action.analyzer_type != analyzer_types.CLANG_SA:
                continue
            config = analyzer_config_map.get(analyzer_types.CLANG_SA)
            analyzer_environment = analyzer_env.get_check_env(
                context.path_env_extra, context.ld_lib_path_extra)
            triple_arch = ctu_triple_arch.get_triple_arch(
                action, source, config, analyzer_environment)
            if not config.ctu_in_memory:
                generate_ast(triple_arch, action, source, config,
                             analyzer_environment)
            map_functions(triple_arch, action, source, config,
                          analyzer_environment, context.ctu_func_map_cmd,
                          ctu_temp_fnmap_folder)
    except Exception as ex:
        LOG.debug_analyzer(str(ex))
        traceback.print_exc(file=sys.stdout)
        raise
Esempio n. 3
0
def check_supported_analyzers(analyzers, context):
    """
    Check if the selected analyzers are supported.
    """

    check_env = analyzer_env.get_check_env(context.path_env_extra,
                                           context.ld_lib_path_extra)

    analyzer_binaries = context.analyzer_binaries

    enabled_analyzers = set()

    for analyzer_name in analyzers:
        if analyzer_name not in supported_analyzers:
            LOG.error('Unsupported analyzer ' + analyzer_name + ' !')
            sys.exit(1)

        # Get the compiler binary to check if it can run.
        available_analyzer = True
        analyzer_bin = analyzer_binaries.get(analyzer_name)
        if not analyzer_bin:
            LOG.warning('Failed to detect analyzer binary ' + analyzer_name)
            available_analyzer = False
        elif not host_check.check_clang(analyzer_bin, check_env):
            LOG.warning('Failed to run analyzer ' + analyzer_name + ' !')
            available_analyzer = False
        if available_analyzer:
            enabled_analyzers.add(analyzer_name)

    return enabled_analyzers
Esempio n. 4
0
def handle_check(args):
    """
    Runs the original build and logs the buildactions.
    Based on the log runs the analysis.
    """
    try:
        if not host_check.check_zlib():
            sys.exit(1)

        args.workspace = os.path.abspath(args.workspace)
        if not os.path.isdir(args.workspace):
            os.mkdir(args.workspace)

        context = generic_package_context.get_context()
        context.codechecker_workspace = args.workspace
        context.db_username = args.dbusername

        log_file, set_in_cmdline = build_manager.check_log_file(args, context)

        if not log_file:
            LOG.error("Failed to generate compilation command file: " +
                      log_file)
            sys.exit(1)

        actions = log_parser.parse_log(log_file, args.add_compiler_defaults)

        check_env = analyzer_env.get_check_env(context.path_env_extra,
                                               context.ld_lib_path_extra)

        sql_server = SQLServer.from_cmdline_args(args,
                                                 context.codechecker_workspace,
                                                 context.migration_root,
                                                 check_env)

        conn_mgr = client.ConnectionManager(sql_server, 'localhost',
                                            util.get_free_port())

        sql_server.start(context.db_version_info,
                         wait_for_start=True,
                         init=True)

        conn_mgr.start_report_server()

        LOG.debug("Checker server started.")

        analyzer.run_check(args, actions, context)

        LOG.info("Analysis has finished.")

        log_startserver_hint(args)

    except Exception as ex:
        LOG.error(ex)
        import traceback
        print(traceback.format_exc())
    finally:
        if not args.keep_tmp:
            if log_file and not set_in_cmdline:
                LOG.debug('Removing temporary log file: ' + log_file)
                os.remove(log_file)
Esempio n. 5
0
def is_statistics_capable():
    """ Detects if the current clang is Statistics compatible. """
    context = generic_package_context.get_context()

    analyzer = "clangsa"
    enabled_analyzers = [analyzer]
    cfg_handlers = analyzer_types.build_config_handlers({}, context,
                                                        enabled_analyzers)

    clangsa_cfg = cfg_handlers[analyzer]
    analyzer = analyzer_types.construct_analyzer_type(analyzer, clangsa_cfg,
                                                      None)

    check_env = analyzer_env.get_check_env(context.path_env_extra,
                                           context.ld_lib_path_extra)

    checkers = analyzer.get_analyzer_checkers(clangsa_cfg, check_env)

    stat_checkers_pattern = re.compile(r'.+statisticscollector.+')

    for checker_name, _ in checkers:
        if stat_checkers_pattern.match(checker_name):
            return True

    return False
Esempio n. 6
0
def __build_clangsa_config_handler(args, context):
    """
    Build the config handler for clang static analyzer.
    Handle config options from the command line and config files.
    """

    config_handler = config_handler_clangsa.ClangSAConfigHandler()
    config_handler.analyzer_plugins_dir = context.checker_plugin
    config_handler.analyzer_binary = context.analyzer_binaries.get(CLANG_SA)
    config_handler.compiler_resource_dir =\
        __get_compiler_resource_dir(context, config_handler.analyzer_binary)

    check_env = analyzer_env.get_check_env(context.path_env_extra,
                                           context.ld_lib_path_extra)

    if 'ctu_phases' in args:
        config_handler.ctu_dir = os.path.join(args.output_path, args.ctu_dir)

        config_handler.ctu_has_analyzer_display_ctu_progress = \
            host_check.has_analyzer_feature(
                context.analyzer_binaries.get(CLANG_SA),
                '-analyzer-display-ctu-progress',
                check_env)
        config_handler.log_file = args.logfile
        config_handler.path_env_extra = context.path_env_extra
        config_handler.ld_lib_path_extra = context.ld_lib_path_extra

    try:
        with open(args.clangsa_args_cfg_file, 'rb') as sa_cfg:
            config_handler.analyzer_extra_arguments = \
                re.sub(r'\$\((.*?)\)',
                       __replace_env_var(args.clangsa_args_cfg_file),
                       sa_cfg.read().strip())
    except IOError as ioerr:
        LOG.debug_analyzer(ioerr)
    except AttributeError as aerr:
        # No clangsa arguments file was given in the command line.
        LOG.debug_analyzer(aerr)

    analyzer = construct_analyzer_type(CLANG_SA, config_handler, None)

    checkers = analyzer.get_analyzer_checkers(config_handler, check_env)

    # Read clang-sa checkers from the config file.
    clang_sa_checkers = context.checker_config.get(CLANG_SA + '_checkers')

    try:
        cmdline_checkers = args.ordered_checkers
    except AttributeError:
        LOG.debug_analyzer('No checkers were defined in '
                           'the command line for ' + CLANG_SA)
        cmdline_checkers = None

    initialize_checkers(config_handler, context.available_profiles,
                        context.package_root, checkers, clang_sa_checkers,
                        cmdline_checkers, 'enable_all' in args
                        and args.enable_all)

    return config_handler
Esempio n. 7
0
    def construct_config_handler(cls, args, context):
        handler = config_handler_clangsa.ClangSAConfigHandler()
        handler.analyzer_plugins_dir = context.checker_plugin
        handler.analyzer_binary = context.analyzer_binaries.get(
            cls.ANALYZER_NAME)
        handler.compiler_resource_dir = \
            host_check.get_resource_dir(handler.analyzer_binary, context)

        handler.report_hash = args.report_hash \
            if 'report_hash' in args else None

        check_env = analyzer_env.get_check_env(context.path_env_extra,
                                               context.ld_lib_path_extra)

        if 'ctu_phases' in args:
            handler.ctu_dir = os.path.join(args.output_path, args.ctu_dir)

            handler.ctu_has_analyzer_display_ctu_progress = \
                host_check.has_analyzer_feature(
                    context.analyzer_binaries.get(cls.ANALYZER_NAME),
                    '-analyzer-display-ctu-progress',
                    check_env)
            handler.log_file = args.logfile
            handler.path_env_extra = context.path_env_extra
            handler.ld_lib_path_extra = context.ld_lib_path_extra

        try:
            with open(args.clangsa_args_cfg_file, 'rb') as sa_cfg:
                handler.analyzer_extra_arguments = \
                    re.sub(r'\$\((.*?)\)',
                           replace_env_var(args.clangsa_args_cfg_file),
                           sa_cfg.read().strip())
        except IOError as ioerr:
            LOG.debug_analyzer(ioerr)
        except AttributeError as aerr:
            # No clangsa arguments file was given in the command line.
            LOG.debug_analyzer(aerr)

        checkers = ClangSA.get_analyzer_checkers(handler, check_env)

        # Read clang-sa checkers from the config file.
        clang_sa_checkers = context.checker_config.get(cls.ANALYZER_NAME +
                                                       '_checkers')

        try:
            cmdline_checkers = args.ordered_checkers
        except AttributeError:
            LOG.debug_analyzer('No checkers were defined in '
                               'the command line for ' + cls.ANALYZER_NAME)
            cmdline_checkers = None

        handler.initialize_checkers(context.available_profiles,
                                    context.package_root, checkers,
                                    clang_sa_checkers, cmdline_checkers,
                                    'enable_all' in args and args.enable_all)

        return handler
Esempio n. 8
0
def pre_analyze(params):

    action, context, analyzer_config_map, skip_handler, \
        ctu_data, statistics_data = params

    analyzer_environment = analyzer_env.get_check_env(
        context.path_env_extra, context.ld_lib_path_extra)

    progress_checked_num.value += 1

    for source in action.sources:
        if skip_handler and skip_handler.should_skip(source):
            continue
        if action.analyzer_type != analyzer_types.CLANG_SA:
            continue

        _, source_filename = os.path.split(source)

        source = util.escape_source_path(source)

        LOG.info("[%d/%d] %s" % (progress_checked_num.value,
                                 progress_actions.value, source_filename))

        config = analyzer_config_map.get(analyzer_types.CLANG_SA)

        try:
            if ctu_data:
                LOG.debug("running CTU pre analysis")
                ctu_temp_fnmap_folder = ctu_data.get('ctu_temp_fnmap_folder')

                triple_arch = \
                    ctu_triple_arch.get_triple_arch(action, source,
                                                    config,
                                                    analyzer_environment)
                if not config.ctu_in_memory:
                    ctu_manager.generate_ast(triple_arch, action, source,
                                             config, analyzer_environment)
                ctu_manager.map_functions(triple_arch, action, source, config,
                                          analyzer_environment,
                                          context.ctu_func_map_cmd,
                                          ctu_temp_fnmap_folder)

        except Exception as ex:
            LOG.debug_analyzer(str(ex))
            traceback.print_exc(file=sys.stdout)
            raise

        try:
            if statistics_data:
                LOG.debug("running statistics pre analysis")
                collect_statistics(action, source, config,
                                   analyzer_environment, statistics_data)

        except Exception as ex:
            LOG.debug_analyzer(str(ex))
            traceback.print_exc(file=sys.stdout)
            raise
Esempio n. 9
0
 def get_ctu_dir(self):
     """
     Returns the path of the ctu directory (containing the triple).
     """
     config = self.config_handler
     env = analyzer_env.get_check_env(config.path_env_extra,
                                      config.ld_lib_path_extra)
     triple_arch = ctu_triple_arch.get_triple_arch(self.buildaction,
                                                   self.source_file,
                                                   config, env)
     ctu_dir = os.path.join(config.ctu_dir, triple_arch)
     return ctu_dir
Esempio n. 10
0
def check_product_db_status(cfg_sql_server, context):
    """
    Check the products for database statuses.

    :returns: dictionary of product endpoints with database statuses
    """

    migration_root = context.run_migration_root

    engine = cfg_sql_server.create_engine()
    config_session = sessionmaker(bind=engine)
    sess = config_session()

    try:
        products = sess.query(ORMProduct).all()
    except Exception as ex:
        LOG.debug(ex)
        LOG.error("Failed to get product configurations from the database.")
        LOG.error("Please check your command arguments.")
        sys.exit(1)

    package_schema = get_schema_version_from_package(migration_root)

    db_errors = [DBStatus.FAILED_TO_CONNECT,
                 DBStatus.MISSING,
                 DBStatus.SCHEMA_INIT_ERROR,
                 DBStatus.SCHEMA_MISSING]

    cc_env = analyzer_env.get_check_env(context.path_env_extra,
                                        context.ld_lib_path_extra)
    prod_status = {}
    for pd in products:
        db = database.SQLServer.from_connection_string(pd.connection,
                                                       RUN_META,
                                                       migration_root,
                                                       interactive=False,
                                                       env=cc_env)
        db_location = db.get_db_location()
        ret = db.connect()
        s_ver = db.get_schema_version()
        if s_ver in db_errors:
            s_ver = None
        prod_status[pd.endpoint] = (ret, s_ver, package_schema, db_location)

    sess.commit()
    sess.close()
    engine.dispose()

    return prod_status
Esempio n. 11
0
def __build_clang_tidy_config_handler(args, context):
    """
    Build the config handler for clang tidy analyzer.
    Handle config options from the command line and config files.
    """

    config_handler = config_handler_clang_tidy.ClangTidyConfigHandler()
    config_handler.analyzer_binary = context.analyzer_binaries.get(CLANG_TIDY)
    config_handler.compiler_resource_dir = context.compiler_resource_dir

    try:
        with open(args.tidy_args_cfg_file, 'rb') as tidy_cfg:
            config_handler.analyzer_extra_arguments = \
                re.sub('\$\((.*?)\)', __replace_env_var,
                       tidy_cfg.read().strip())
    except IOError as ioerr:
        LOG.debug_analyzer(ioerr)
    except AttributeError as aerr:
        # No clang tidy arguments file was given in the command line.
        LOG.debug_analyzer(aerr)

    analyzer = construct_analyzer_type(CLANG_TIDY, config_handler, None)
    check_env = analyzer_env.get_check_env(context.path_env_extra,
                                           context.ld_lib_path_extra)

    checkers = analyzer.get_analyzer_checkers(config_handler, check_env)

    # Read clang-tidy checkers from the config file.
    clang_tidy_checkers = context.checker_config.get(CLANG_TIDY + '_checkers')

    try:
        cmdline_checkers = args.ordered_checkers
    except AttributeError:
        LOG.debug_analyzer('No checkers were defined in '
                           'the command line for ' +
                           CLANG_TIDY)
        cmdline_checkers = None

    initialize_checkers(config_handler,
                        context.available_profiles,
                        context.package_root,
                        checkers,
                        clang_tidy_checkers,
                        cmdline_checkers,
                        'enable_all' in args and args.enable_all)

    return config_handler
Esempio n. 12
0
def handle_list_checkers(args):
    """
    List the supported checkers by the analyzers.
    List the default enabled and disabled checkers in the config.
    """
    context = generic_package_context.get_context()
    # If nothing is set, list checkers for all supported analyzers.
    analyzers = args.analyzers or analyzer_types.supported_analyzers
    enabled_analyzers, _ = analyzer_types\
        .check_supported_analyzers(analyzers, context)
    analyzer_environment = analyzer_env.get_check_env(
        context.path_env_extra,
        context.ld_lib_path_extra)

    for ea in enabled_analyzers:
        if ea not in analyzer_types.supported_analyzers:
            LOG.error('Unsupported analyzer ' + str(ea))
            sys.exit(1)

    analyzer_config_map = \
        analyzer_types.build_config_handlers(args,
                                             context,
                                             enabled_analyzers)

    for ea in enabled_analyzers:
        # Get the config.
        config_handler = analyzer_config_map.get(ea)
        source_analyzer = \
            analyzer_types.construct_analyzer_type(ea,
                                                   config_handler,
                                                   None)

        checkers = source_analyzer.get_analyzer_checkers(config_handler,
                                                         analyzer_environment)

        default_checker_cfg = context.default_checkers_config.get(
            ea + '_checkers')

        analyzer_types.initialize_checkers(config_handler,
                                           checkers,
                                           default_checker_cfg)
        for checker_name, value in config_handler.checks().items():
            enabled, description = value
            if enabled:
                print(' + {0:50} {1}'.format(checker_name, description))
            else:
                print(' - {0:50} {1}'.format(checker_name, description))
Esempio n. 13
0
def __build_clangsa_config_handler(args, context):
    """
    Build the config handler for clang static analyzer.
    Handle config options from the command line and config files.
    """

    config_handler = config_handler_clangsa.ClangSAConfigHandler()
    config_handler.analyzer_plugins_dir = context.checker_plugin
    config_handler.analyzer_binary = context.analyzer_binaries.get(CLANG_SA)
    config_handler.compiler_resource_dir = context.compiler_resource_dir
    config_handler.compiler_sysroot = context.compiler_sysroot
    config_handler.system_includes = context.extra_system_includes
    config_handler.includes = context.extra_includes
    try:
        with open(args.clangsa_args_cfg_file, 'rb') as sa_cfg:
            config_handler.analyzer_extra_arguments = \
                re.sub('\$\((.*?)\)',
                       __replace_env_var(args.clangsa_args_cfg_file),
                       sa_cfg.read().strip())
    except IOError as ioerr:
        LOG.debug_analyzer(ioerr)
    except AttributeError as aerr:
        # No clangsa arguments file was given in the command line.
        LOG.debug_analyzer(aerr)

    analyzer = construct_analyzer_type(CLANG_SA, config_handler, None)

    check_env = analyzer_env.get_check_env(context.path_env_extra,
                                           context.ld_lib_path_extra)

    checkers = analyzer.get_analyzer_checkers(config_handler, check_env)

    # Read clang-sa checkers from the config file.
    clang_sa_checkers = context.default_checkers_config.get(CLANG_SA +
                                                            '_checkers')
    try:
        cmdline_checkers = args.ordered_checkers
    except AttributeError:
        LOG.debug_analyzer('No checkers were defined in '
                           'the command line for ' + CLANG_SA)
        cmdline_checkers = None

    initialize_checkers(config_handler, checkers, clang_sa_checkers,
                        cmdline_checkers, 'enable_all' in args
                        and args.enable_all)

    return config_handler
Esempio n. 14
0
def handle_plist(args):
    context = generic_package_context.get_context()
    context.codechecker_workspace = args.workspace
    context.db_username = args.dbusername

    if not args.stdout:
        args.workspace = os.path.realpath(args.workspace)
        if not os.path.isdir(args.workspace):
            os.mkdir(args.workspace)

        check_env = analyzer_env.get_check_env(context.path_env_extra,
                                               context.ld_lib_path_extra)

        sql_server = SQLServer.from_cmdline_args(args,
                                                 context.codechecker_workspace,
                                                 context.migration_root,
                                                 check_env)

        conn_mgr = client.ConnectionManager(sql_server, 'localhost',
                                            util.get_free_port())

        sql_server.start(context.db_version_info,
                         wait_for_start=True,
                         init=True)

        conn_mgr.start_report_server()

        with client.get_connection() as connection:
            context.run_id = connection.add_checker_run(
                ' '.join(sys.argv), args.name, context.version, args.force)

    pool = multiprocessing.Pool(args.jobs)

    try:
        items = [(plist, args, context)
                 for plist in os.listdir(args.directory)]
        pool.map_async(consume_plist, items, 1).get(float('inf'))
        pool.close()
    except Exception:
        pool.terminate()
        raise
    finally:
        pool.join()

    if not args.stdout:
        log_startserver_hint(args)
Esempio n. 15
0
def is_statistics_capable(context):
    """ Detects if the current clang is Statistics compatible. """
    # Resolve potentially missing binaries.
    check_supported_analyzers([ClangSA.ANALYZER_NAME], context)
    clangsa_cfg = ClangSA.construct_config_handler([], context)

    check_env = analyzer_env.get_check_env(context.path_env_extra,
                                           context.ld_lib_path_extra)

    checkers = ClangSA.get_analyzer_checkers(clangsa_cfg, check_env)

    stat_checkers_pattern = re.compile(r'.+statisticscollector.+')

    for checker_name, _ in checkers:
        if stat_checkers_pattern.match(checker_name):
            return True

    return False
Esempio n. 16
0
def __print_analyzer_version(context, analyzer_config_map):
    """
    Print the path and the version of the analyzer binary.
    """
    check_env = analyzer_env.get_check_env(context.path_env_extra,
                                           context.ld_lib_path_extra)

    # Get the analyzer binaries from the config_map which
    # contains only the checked and available analyzers.
    for analyzer_name, analyzer_cfg in analyzer_config_map.items():
        LOG.info("Using analyzer:")
        analyzer_bin = analyzer_cfg.analyzer_binary
        print(analyzer_bin)
        version = [analyzer_bin, u' --version']
        try:
            subprocess.call(shlex.split(' '.join(version)), env=check_env)
        except OSError as oerr:
            LOG.warning("Failed to get analyzer version: " + ' '.join(version))
            LOG.warning(oerr.strerror)
Esempio n. 17
0
def handle_debug(args):
    """
    Runs a debug command on the buildactions where the analysis
    failed for some reason.
    """
    context = generic_package_context.get_context()

    context.codechecker_workspace = args.workspace
    context.db_username = args.dbusername

    check_env = analyzer_env.get_check_env(context.path_env_extra,
                                           context.ld_lib_path_extra)

    sql_server = SQLServer.from_cmdline_args(args,
                                             context.migration_root,
                                             check_env)
    sql_server.start(context.db_version_info, wait_for_start=True, init=False)

    debug_reporter.debug(context, sql_server.get_connection_string(),
                         args.force)
Esempio n. 18
0
def __get_analyzer_version(context, analyzer_config_map):
    """
    Get the path and the version of the analyzer binaries.
    """
    check_env = analyzer_env.get_check_env(context.path_env_extra,
                                           context.ld_lib_path_extra)

    # Get the analyzer binaries from the config_map which
    # contains only the checked and available analyzers.
    versions = {}
    for _, analyzer_cfg in analyzer_config_map.items():
        analyzer_bin = analyzer_cfg.analyzer_binary
        version = [analyzer_bin, u' --version']
        try:
            output = subprocess.check_output(shlex.split(' '.join(version)),
                                             env=check_env)
            versions[analyzer_bin] = output
        except (subprocess.CalledProcessError, OSError) as oerr:
            LOG.warning("Failed to get analyzer version: " + ' '.join(version))
            LOG.warning(oerr.strerror)

    return versions
def start_workers(actions_map, actions, context, analyzer_config_map,
                  jobs, output_path, skip_handler, metadata,
                  quiet_analyze, capture_analysis_output, timeout,
                  ctu_reanalyze_on_failure, statistics_data, manager):
    """
    Start the workers in the process pool.
    For every build action there is worker which makes the analysis.
    """

    # Handle SIGINT to stop this script running.
    def signal_handler(*arg, **kwarg):
        try:
            pool.terminate()
            manager.shutdown()
        finally:
            sys.exit(1)

    signal.signal(signal.SIGINT, signal_handler)

    # Start checking parallel.
    checked_var = multiprocessing.Value('i', 1)
    actions_num = multiprocessing.Value('i', len(actions))
    pool = multiprocessing.Pool(jobs,
                                initializer=init_worker,
                                initargs=(checked_var,
                                          actions_num))

    failed_dir = os.path.join(output_path, "failed")
    # If the analysis has failed, we help debugging.
    if not os.path.exists(failed_dir):
        os.makedirs(failed_dir)

    success_dir = os.path.join(output_path, "success")

    # Analysis was successful processing results.
    if not os.path.exists(success_dir):
        os.makedirs(success_dir)

    output_dirs = {'success': success_dir,
                   'failed': failed_dir}

    # Construct analyzer env.
    analyzer_environment = analyzer_env.get_check_env(
        context.path_env_extra,
        context.ld_lib_path_extra)

    try:
        # Workaround, equivalent of map.
        # The main script does not get signal
        # while map or map_async function is running.
        # It is a python bug, this does not happen if a timeout is specified;
        # then receive the interrupt immediately.

        analyzed_actions = [(actions_map,
                             build_action,
                             context,
                             analyzer_config_map,
                             output_path,
                             skip_handler,
                             quiet_analyze,
                             capture_analysis_output,
                             timeout,
                             analyzer_environment,
                             ctu_reanalyze_on_failure,
                             output_dirs,
                             statistics_data)
                            for build_action in actions]

        pool.map_async(check,
                       analyzed_actions,
                       1,
                       callback=lambda results: worker_result_handler(
                           results, metadata, output_path,
                           context.analyzer_binaries)
                       ).get(float('inf'))

        pool.close()
    except Exception:
        pool.terminate()
        raise
    finally:
        pool.join()

    if not os.listdir(success_dir):
        shutil.rmtree(success_dir)

    if not os.listdir(failed_dir):
        shutil.rmtree(failed_dir)
Esempio n. 20
0
def check(check_data):
    """
    Invoke clang with an action which called by processes.
    Different analyzer object belongs to for each build action.

    skiplist handler is None if no skip file was configured.
    """
    args, action, context, analyzer_config_map, skp_handler, \
        report_output_dir, use_db = check_data

    skipped = False
    try:
        # If one analysis fails the check fails.
        return_codes = 0
        skipped = False
        for source in action.sources:

            # If there is no skiplist handler there was no skip list file
            # in the command line.
            # C++ file skipping is handled here.
            _, source_file_name = ntpath.split(source)

            if skp_handler and skp_handler.should_skip(source):
                LOG.debug_analyzer(source_file_name + ' is skipped')
                skipped = True
                continue

            # Construct analyzer env.
            analyzer_environment = analyzer_env.get_check_env(
                context.path_env_extra, context.ld_lib_path_extra)
            run_id = context.run_id

            rh = analyzer_types.construct_result_handler(
                args, action, run_id, report_output_dir, context.severity_map,
                skp_handler, progress_lock, use_db)

            # Create a source analyzer.
            source_analyzer = \
                analyzer_types.construct_analyzer(action,
                                                  analyzer_config_map)

            # Source is the currently analyzed source file
            # there can be more in one buildaction.
            source_analyzer.source_file = source

            # Fills up the result handler with the analyzer information.
            source_analyzer.analyze(rh, analyzer_environment)

            if rh.analyzer_returncode == 0:
                # Analysis was successful processing results.
                if rh.analyzer_stdout != '':
                    LOG.debug_analyzer('\n' + rh.analyzer_stdout)
                if rh.analyzer_stderr != '':
                    LOG.debug_analyzer('\n' + rh.analyzer_stderr)
                rh.postprocess_result()
                rh.handle_results()

                LOG.info("[%d/%d] %s analyzed %s successfully." %
                         (progress_checked_num.value, progress_actions.value,
                          action.analyzer_type, source_file_name))
            else:
                # Analysis failed.
                LOG.error('Analyzing ' + source_file_name + ' with ' +
                          action.analyzer_type + ' failed.')
                if rh.analyzer_stdout != '':
                    LOG.error(rh.analyzer_stdout)
                if rh.analyzer_stderr != '':
                    LOG.error(rh.analyzer_stderr)
                return_codes = rh.analyzer_returncode

            if not args.keep_tmp:
                rh.clean_results()

        progress_checked_num.value += 1

        return return_codes, skipped, action.analyzer_type

    except Exception as e:
        LOG.debug_analyzer(str(e))
        traceback.print_exc(file=sys.stdout)
        return 1, skipped, action.analyzer_type
Esempio n. 21
0
def main(args):
    """
    List the checkers available in the specified (or all supported) analyzers
    alongside with their description or enabled status in various formats.
    """

    logger.setup_logger(args.verbose if 'verbose' in args else None)

    # If nothing is set, list checkers for all supported analyzers.
    analyzers = args.analyzers \
        if 'analyzers' in args \
        else analyzer_types.supported_analyzers

    context = package_context.get_context()
    working, errored = analyzer_types.check_supported_analyzers(
        analyzers, context)

    analyzer_environment = analyzer_env.get_check_env(
        context.path_env_extra, context.ld_lib_path_extra)

    analyzer_config_map = analyzer_types.build_config_handlers(
        args, context, working)
    # List available checker profiles.
    if 'profile' in args and args.profile == 'list':
        if 'details' not in args:
            if args.output_format not in ['csv', 'json']:
                header = ['Profile name']
            else:
                header = ['profile_name']
        else:
            if args.output_format not in ['csv', 'json']:
                header = ['Profile name', 'Description']
            else:
                header = ['profile_name', 'description']

        rows = []
        for (profile, description) in context.available_profiles.items():
            if 'details' not in args:
                rows.append([profile])
            else:
                rows.append([profile, description])

        print(output_formatters.twodim_to_str(args.output_format, header,
                                              rows))
        return

    # Use good looking different headers based on format.
    if 'details' not in args:
        if args.output_format not in ['csv', 'json']:
            header = ['Name']
        else:
            header = ['name']
    else:
        if args.output_format not in ['csv', 'json']:
            header = ['', 'Name', 'Analyzer', 'Severity', 'Description']
        else:
            header = ['enabled', 'name', 'analyzer', 'severity', 'description']

    rows = []
    for analyzer in working:
        config_handler = analyzer_config_map.get(analyzer)
        analyzer_class = \
            analyzer_types.supported_analyzers[analyzer]

        checkers = analyzer_class.get_analyzer_checkers(
            config_handler, analyzer_environment)
        default_checker_cfg = context.checker_config.get(analyzer +
                                                         '_checkers')

        profile_checkers = None
        if 'profile' in args:
            if args.profile not in context.available_profiles:
                LOG.error("Checker profile '" + args.profile +
                          "' does not exist!")
                LOG.error("To list available profiles, use '--profile list'.")
                return

            profile_checkers = [(args.profile, True)]

        config_handler.initialize_checkers(context.available_profiles,
                                           context.package_root, checkers,
                                           default_checker_cfg,
                                           profile_checkers)

        for checker_name, value in config_handler.checks().items():
            enabled, description = value

            if not enabled and 'profile' in args:
                continue

            if enabled and 'only_disabled' in args:
                continue
            elif not enabled and 'only_enabled' in args:
                continue

            if args.output_format != 'json':
                enabled = '+' if enabled else '-'

            if 'details' not in args:
                rows.append([checker_name])
            else:
                severity = context.severity_map.get(checker_name)
                rows.append(
                    [enabled, checker_name, analyzer, severity, description])

    if len(rows) > 0:
        print(output_formatters.twodim_to_str(args.output_format, header,
                                              rows))

    for analyzer_binary, reason in errored:
        LOG.error("Failed to get checkers for '" + analyzer_binary +
                  "'! The error reason was: '" + reason + "'")
        LOG.error("Please check your installation and the "
                  "'config/package_layout.json' file!")
Esempio n. 22
0
def debug(context, connection_string, force):
    try:
        engine = database_handler.SQLServer.create_engine(connection_string)
        session = sqlalchemy.orm.scoped_session(
            sqlalchemy.orm.sessionmaker(bind=engine))

        # Get latest run id.
        last_run = session.query(Run).order_by(Run.id.desc()).first()

        # Get all failed actions.
        actions = session.query(BuildAction).filter(
            and_(BuildAction.run_id == last_run.id,
                 sqlalchemy.sql.func.length(BuildAction.failure_txt) != 0))

        debug_env = analyzer_env.get_check_env(context.path_env_extra,
                                               context.ld_lib_path_extra)

        crash_handler = analyzer_crash_handler.AnalyzerCrashHandler(
            context, debug_env)

        dumps_dir = context.dump_output_dir
        if not os.path.exists(dumps_dir):
            os.mkdir(dumps_dir)

        LOG.info('Generating gdb dump files to : ' + dumps_dir)

        for action in actions:
            LOG.info('Processing action ' + str(action.id) + '.')
            debug_log_file = \
                os.path.join(dumps_dir,
                             get_dump_file_name(last_run.id, action.id))

            if not force and os.path.exists(debug_log_file):
                LOG.info('This file already exists.')
                continue

            LOG.info('Generating stacktrace with gdb.')

            gdb_result = \
                crash_handler.get_crash_info(str(action.check_cmd).split())

            LOG.info('Writing debug info to file.')

            with open(debug_log_file, 'w') as log_file:
                log_file.write('========================\n')
                log_file.write('Build command hash: \n')
                log_file.write('========================\n')
                log_file.write(action.build_cmd_hash + '\n')
                log_file.write('===============\n')
                log_file.write('Check command: \n')
                log_file.write('===============\n')
                log_file.write(action.check_cmd + '\n')
                log_file.write('==============\n')
                log_file.write('Failure text: \n')
                log_file.write('==============\n')
                log_file.write(action.failure_txt + '\n')
                log_file.write('==========\n')
                log_file.write('GDB info: \n')
                log_file.write('==========\n')
                log_file.write(gdb_result)

        LOG.info('All new debug files are placed in ' + dumps_dir)

    except KeyboardInterrupt as kb_exc:
        LOG.error(str(kb_exc))
        sys.exit(1)
Esempio n. 23
0
def main(args):
    """
    Start or manage a CodeChecker report server.
    """

    if 'list' in args or 'stop' in args or 'stop_all' in args:
        __instance_management(args)
        sys.exit(0)

    # Actual server starting from this point.
    if not host_check.check_zlib():
        raise Exception("zlib is not available on the system!")

    # WARNING
    # In case of SQLite args.dbaddress default value is used
    # for which the is_localhost should return true.
    if util.is_localhost(args.dbaddress) and \
            not os.path.exists(args.config_directory):
        os.makedirs(args.config_directory)

    # Make sure the SQLite file can be created if it not exists.
    if 'sqlite' in args and \
            not os.path.isdir(os.path.dirname(args.sqlite)):
        os.makedirs(os.path.dirname(args.sqlite))

    suppress_handler = generic_package_suppress_handler. \
        GenericSuppressHandler(None, False)

    if 'reset_root' in args:
        try:
            os.remove(os.path.join(args.config_directory, 'root.user'))
            LOG.info("Master superuser (root) credentials invalidated and "
                     "deleted. New ones will be generated...")
        except OSError:
            # File doesn't exist.
            pass

    if 'force_auth' in args:
        LOG.info("'--force-authentication' was passed as a command-line "
                 "option. The server will ask for users to authenticate!")

    context = generic_package_context.get_context()
    context.codechecker_workspace = args.config_directory
    session_manager.SessionManager.CodeChecker_Workspace = \
        args.config_directory
    context.db_username = args.dbusername

    check_env = analyzer_env.get_check_env(context.path_env_extra,
                                           context.ld_lib_path_extra)

    # Create the main database link from the arguments passed over the
    # command line.
    default_product_path = os.path.join(args.config_directory,
                                        'Default.sqlite')
    create_default_product = 'sqlite' in args and \
                             not os.path.exists(args.sqlite) and \
                             not os.path.exists(default_product_path)

    sql_server = database.SQLServer.from_cmdline_args(
        vars(args), CONFIG_META, context.config_migration_root,
        interactive=True, env=check_env)

    LOG.debug("Connecting to product configuration database.")
    sql_server.connect(context.product_db_version_info, init=True)

    if create_default_product:
        # Create a default product and add it to the configuration database.

        LOG.debug("Create default product...")
        LOG.debug("Configuring schema and migration...")

        prod_server = database.SQLiteDatabase(
            default_product_path, RUN_META,
            context.run_migration_root, check_env)
        prod_server.connect(context.run_db_version_info, init=True)

        LOG.debug("Connecting database engine for default product")
        product_conn_string = prod_server.get_connection_string()
        LOG.debug("Default database created and connected.")

        server.add_initial_run_database(
            sql_server, product_conn_string)

        LOG.info("Product 'Default' at '{0}' created and set up."
                 .format(default_product_path))

    # Start database viewer.
    checker_md_docs = os.path.join(context.doc_root, 'checker_md_docs')
    checker_md_docs_map = os.path.join(checker_md_docs,
                                       'checker_doc_map.json')
    with open(checker_md_docs_map, 'r') as dFile:
        checker_md_docs_map = json.load(dFile)

    package_data = {'www_root': context.www_root,
                    'doc_root': context.doc_root,
                    'checker_md_docs': checker_md_docs,
                    'checker_md_docs_map': checker_md_docs_map,
                    'version': context.package_git_tag}

    try:
        server.start_server(args.config_directory,
                            package_data,
                            args.view_port,
                            sql_server,
                            suppress_handler,
                            args.listen_address,
                            'force_auth' in args,
                            'skip_db_cleanup' not in args,
                            context,
                            check_env)
    except socket.error as err:
        if err.errno == errno.EADDRINUSE:
            LOG.error("Server can't be started, maybe the given port number "
                      "({}) is already used. Check the connection "
                      "parameters.".format(args.view_port))
            sys.exit(1)
        else:
            raise
Esempio n. 24
0
def handle_server(args):
    """
    Starts the report viewer server.
    """
    if not host_check.check_zlib():
        sys.exit(1)

    workspace = args.workspace

    if (args.list or args.stop or args.stop_all) and \
            not (args.list ^ args.stop ^ args.stop_all):
        print("CodeChecker server: error: argument -l/--list and -s/--stop"
              "and --stop-all are mutually exclusive.")
        sys.exit(2)

    if args.list:
        instances = instance_manager.list()

        instances_on_multiple_hosts = any(True for inst in instances
                                          if inst['hostname'] !=
                                          socket.gethostname())
        if not instances_on_multiple_hosts:
            rows = [('Workspace', 'View port')]
        else:
            rows = [('Workspace', 'Computer host', 'View port')]

        for instance in instance_manager.list():
            if not instances_on_multiple_hosts:
                rows.append((instance['workspace'], str(instance['port'])))
            else:
                rows.append((instance['workspace'],
                             instance['hostname']
                             if instance['hostname'] != socket.gethostname()
                             else '',
                             str(instance['port'])))

        print("Your running CodeChecker servers:")
        print(util.twodim_to_table(rows))
        sys.exit(0)
    elif args.stop or args.stop_all:
        for i in instance_manager.list():
            # A STOP only stops the server associated with the given workspace
            # and view-port.
            if i['hostname'] != socket.gethostname() or (
                args.stop and not (i['port'] == args.view_port and
                                   os.path.abspath(i['workspace']) ==
                                   os.path.abspath(workspace))):
                continue

            try:
                util.kill_process_tree(i['pid'])
                LOG.info("Stopped CodeChecker server running on port {0} "
                         "in workspace {1} (PID: {2})".
                         format(i['port'], i['workspace'], i['pid']))
            except:
                # Let the exception come out if the commands fail
                LOG.error("Couldn't stop process PID #" + str(i['pid']))
                raise
        sys.exit(0)

    # WARNING
    # In case of SQLite args.dbaddress default value is used
    # for which the is_localhost should return true.
    if util.is_localhost(args.dbaddress) and not os.path.exists(workspace):
        os.makedirs(workspace)

    suppress_handler = generic_package_suppress_handler.\
        GenericSuppressHandler(None)
    if args.suppress is None:
        LOG.warning('No suppress file was given, suppressed results will '
                    'be only stored in the database.')
    else:
        if not os.path.exists(args.suppress):
            LOG.error('Suppress file ' + args.suppress + ' not found!')
            sys.exit(1)

    context = generic_package_context.get_context()
    context.codechecker_workspace = workspace
    session_manager.SessionManager.CodeChecker_Workspace = workspace
    context.db_username = args.dbusername

    check_env = analyzer_env.get_check_env(context.path_env_extra,
                                           context.ld_lib_path_extra)

    sql_server = SQLServer.from_cmdline_args(args,
                                             context.migration_root,
                                             check_env)
    conn_mgr = client.ConnectionManager(sql_server, args.check_address,
                                        args.check_port)
    if args.check_port:
        LOG.debug('Starting CodeChecker server and database server.')
        sql_server.start(context.db_version_info, wait_for_start=True,
                         init=True)
        conn_mgr.start_report_server()
    else:
        LOG.debug('Starting database.')
        sql_server.start(context.db_version_info, wait_for_start=True,
                         init=True)

    # Start database viewer.
    db_connection_string = sql_server.get_connection_string()

    suppress_handler.suppress_file = args.suppress
    LOG.debug('Using suppress file: ' + str(suppress_handler.suppress_file))

    checker_md_docs = os.path.join(context.doc_root, 'checker_md_docs')
    checker_md_docs_map = os.path.join(checker_md_docs,
                                       'checker_doc_map.json')
    with open(checker_md_docs_map, 'r') as dFile:
        checker_md_docs_map = json.load(dFile)

    package_data = {'www_root': context.www_root,
                    'doc_root': context.doc_root,
                    'checker_md_docs': checker_md_docs,
                    'checker_md_docs_map': checker_md_docs_map,
                    'version': context.package_git_tag}

    try:
        client_db_access_server.start_server(package_data,
                                             args.view_port,
                                             db_connection_string,
                                             suppress_handler,
                                             args.not_host_only,
                                             context)
    except socket.error as err:
        if err.errno == errno.EADDRINUSE:
            LOG.error("Server can't be started, maybe the given port number "
                      "({}) is already used. Check the connection "
                      "parameters.".format(args.view_port))
            sys.exit(1)
        else:
            raise
Esempio n. 25
0
    def construct_analyzer_cmd(self, res_handler):
        """
        Called by the analyzer method.
        Construct the analyzer command.
        """
        try:
            # Get an output file from the result handler.
            analyzer_output_file = res_handler.analyzer_result_file

            # Get the checkers list from the config_handler.
            # Checker order matters.
            config = self.config_handler

            analyzer_cmd = [config.analyzer_binary]

            analyzer_cmd.extend(self.buildaction.compiler_defines)
            analyzer_cmd.extend(self.buildaction.compiler_includes)

            if len(config.compiler_resource_dir) > 0:
                analyzer_cmd.extend([
                    '-resource-dir', config.compiler_resource_dir, '-isystem',
                    config.compiler_resource_dir
                ])

            # Compiling is enough.
            analyzer_cmd.append('-c')

            analyzer_cmd.append('--analyze')

            # Turn off clang hardcoded checkers list.
            analyzer_cmd.append('--analyzer-no-default-checks')

            for plugin in config.analyzer_plugins:
                analyzer_cmd.extend([
                    "-Xclang", "-plugin", "-Xclang", "checkercfg", "-Xclang",
                    "-load", "-Xclang", plugin
                ])

            analyzer_mode = 'plist-multi-file'
            analyzer_cmd.extend([
                '-Xclang', '-analyzer-opt-analyze-headers', '-Xclang',
                '-analyzer-output=' + analyzer_mode
            ])

            if config.compiler_sysroot:
                analyzer_cmd.extend(['--sysroot', config.compiler_sysroot])

            for path in config.system_includes:
                analyzer_cmd.extend(['-isystem', path])

            for path in config.includes:
                analyzer_cmd.extend(['-I', path])

            analyzer_cmd.extend(['-o', analyzer_output_file])

            # Config handler stores which checkers are enabled or disabled.
            for checker_name, value in config.checks().items():
                enabled, _ = value
                if enabled:
                    analyzer_cmd.extend(
                        ['-Xclang', '-analyzer-checker=' + checker_name])
                else:
                    analyzer_cmd.extend([
                        '-Xclang', '-analyzer-disable-checker', '-Xclang',
                        checker_name
                    ])

            if config.ctu_dir:
                env = analyzer_env.get_check_env(config.path_env_extra,
                                                 config.ld_lib_path_extra)
                triple_arch = ctu_triple_arch.get_triple_arch(
                    self.buildaction, self.source_file, config, env)
                analyzer_cmd.extend([
                    '-Xclang', '-analyzer-config', '-Xclang',
                    'xtu-dir=' + os.path.join(config.ctu_dir, triple_arch),
                    '-Xclang', '-analyzer-config', '-Xclang',
                    'reanalyze-xtu-visited=true'
                ])
                if config.ctu_in_memory:
                    analyzer_cmd.extend([
                        '-Xclang', '-analyzer-config', '-Xclang',
                        'xtu-reparse=' + os.path.abspath(config.log_file[0])
                    ])

            # Set language.
            analyzer_cmd.extend(['-x', self.buildaction.lang])

            analyzer_cmd.append(config.analyzer_extra_arguments)

            analyzer_cmd.extend(self.buildaction.analyzer_options)

            analyzer_cmd.append(self.source_file)

            return analyzer_cmd

        except Exception as ex:
            LOG.error(ex)
            return []
Esempio n. 26
0
def server_init_start(args):
    """
    Start or manage a CodeChecker report server.
    """

    if 'list' in args or 'stop' in args or 'stop_all' in args:
        __instance_management(args)
        sys.exit(0)

    if 'reload' in args:
        __reload_config(args)
        sys.exit(0)

    # Actual server starting from this point.
    if not host_check.check_zlib():
        raise Exception("zlib is not available on the system!")

    # WARNING
    # In case of SQLite args.dbaddress default value is used
    # for which the is_localhost should return true.
    if util.is_localhost(args.dbaddress) and \
            not os.path.exists(args.config_directory):
        os.makedirs(args.config_directory)

    # Make sure the SQLite file can be created if it not exists.
    if 'sqlite' in args and \
            not os.path.isdir(os.path.dirname(args.sqlite)):
        os.makedirs(os.path.dirname(args.sqlite))

    if 'reset_root' in args:
        try:
            os.remove(os.path.join(args.config_directory, 'root.user'))
            LOG.info("Master superuser (root) credentials invalidated and "
                     "deleted. New ones will be generated...")
        except OSError:
            # File doesn't exist.
            pass

    if 'force_auth' in args:
        LOG.info("'--force-authentication' was passed as a command-line "
                 "option. The server will ask for users to authenticate!")

    context = package_context.get_context()
    context.codechecker_workspace = args.config_directory
    context.db_username = args.dbusername

    check_env = analyzer_env.get_check_env(context.path_env_extra,
                                           context.ld_lib_path_extra)

    cfg_sql_server = database.SQLServer.from_cmdline_args(
        vars(args), CONFIG_META, context.config_migration_root,
        interactive=True, env=check_env)

    LOG.info("Checking configuration database ...")
    db_status = cfg_sql_server.connect()
    db_status_msg = database_status.db_status_msg.get(db_status)
    LOG.info(db_status_msg)

    if db_status == DBStatus.SCHEMA_MISSING:
        LOG.debug("Config database schema is missing, initializing new.")
        db_status = cfg_sql_server.connect(init=True)
        if db_status != DBStatus.OK:
            LOG.error("Config database initialization failed!")
            LOG.error("Please check debug logs.")
            sys.exit(1)

    if db_status == DBStatus.SCHEMA_MISMATCH_NO:
        LOG.debug("Configuration database schema mismatch.")
        LOG.debug("No schema upgrade is possible.")
        sys.exit(1)

    force_upgrade = True if 'force_upgrade' in args else False

    if db_status == DBStatus.SCHEMA_MISMATCH_OK:
        LOG.debug("Configuration database schema mismatch.")
        LOG.debug("Schema upgrade is possible.")
        LOG.warning("Please note after migration only "
                    "newer CodeChecker versions can be used"
                    "to start the server")
        LOG.warning("It is advised to make a full backup of your "
                    "configuration database")

        LOG.warning(cfg_sql_server.get_db_location())

        question = 'Do you want to upgrade to the new schema?' \
                   ' Y(es)/n(o) '
        if force_upgrade or util.get_user_input(question):
            print("Upgrading schema ...")
            ret = cfg_sql_server.upgrade()
            msg = database_status.db_status_msg.get(
                ret, 'Unknown database status')
            print(msg)
            if ret != DBStatus.OK:
                LOG.error("Schema migration failed")
                sys.exit(ret)
        else:
            LOG.info("No schema migration was done.")
            sys.exit(0)

    if db_status == DBStatus.MISSING:
        LOG.error("Missing configuration database.")
        LOG.error("Server can not be started.")
        sys.exit(1)

    # Configuration database setup and check is needed before database
    # statuses can be checked.
    try:
        if args.status:
            ret = __db_status_check(cfg_sql_server, context, args.status)
            sys.exit(ret)
    except AttributeError:
        LOG.debug('Status was not in the arguments.')

    try:
        if args.product_to_upgrade:
            ret = __db_migration(cfg_sql_server, context,
                                 args.product_to_upgrade, force_upgrade)
            sys.exit(ret)
    except AttributeError:
        LOG.debug('Product upgrade was not in the arguments.')

    # Create the main database link from the arguments passed over the
    # command line.
    cfg_dir = os.path.abspath(args.config_directory)
    default_product_path = os.path.join(cfg_dir, 'Default.sqlite')
    create_default_product = 'sqlite' in args and \
                             not os.path.exists(default_product_path)

    if create_default_product:
        # Create a default product and add it to the configuration database.

        LOG.debug("Create default product...")
        LOG.debug("Configuring schema and migration...")

        prod_server = database.SQLiteDatabase(
            default_product_path, RUN_META,
            context.run_migration_root, check_env)

        LOG.debug("Checking 'Default' product database.")
        db_status = prod_server.connect()
        if db_status != DBStatus.MISSING:
            db_status = prod_server.connect(init=True)
            LOG.debug(database_status.db_status_msg.get(db_status))
            if db_status != DBStatus.OK:
                LOG.error("Failed to configure default product")
                sys.exit(1)

        product_conn_string = prod_server.get_connection_string()

        server.add_initial_run_database(
            cfg_sql_server, product_conn_string)

        LOG.info("Product 'Default' at '{0}' created and set up."
                 .format(default_product_path))

    prod_statuses = check_product_db_status(cfg_sql_server, context)

    upgrade_available = {}
    for k, v in prod_statuses.items():
        db_status, _, _, _ = v
        if db_status == DBStatus.SCHEMA_MISMATCH_OK or \
                db_status == DBStatus.SCHEMA_MISSING:
            upgrade_available[k] = v

    if upgrade_available:
        print_prod_status(prod_statuses)
        LOG.warning("Multiple products can be upgraded, make a backup!")
        __db_migration(cfg_sql_server, context, 'all', force_upgrade)

    prod_statuses = check_product_db_status(cfg_sql_server, context)
    print_prod_status(prod_statuses)

    non_ok_db = False
    for k, v in prod_statuses.items():
        db_status, _, _, _ = v
        if db_status != DBStatus.OK:
            non_ok_db = True
        break

    if non_ok_db:
        msg = "There are some database issues. " \
              "Do you want to start the " \
              "server? Y(es)/n(o) "
        if not util.get_user_input(msg):
            sys.exit(1)

    # Start database viewer.
    checker_md_docs = os.path.join(context.doc_root, 'checker_md_docs')
    checker_md_docs_map = os.path.join(checker_md_docs,
                                       'checker_doc_map.json')
    checker_md_docs_map = util.load_json_or_empty(checker_md_docs_map, {})

    package_data = {'www_root': context.www_root,
                    'doc_root': context.doc_root,
                    'checker_md_docs': checker_md_docs,
                    'checker_md_docs_map': checker_md_docs_map,
                    'version': context.package_git_tag}

    suppr_handler = suppress_handler. \
        GenericSuppressHandler(None, False)

    try:
        server.start_server(args.config_directory,
                            package_data,
                            args.view_port,
                            cfg_sql_server,
                            suppr_handler,
                            args.listen_address,
                            'force_auth' in args,
                            args.skip_db_cleanup,
                            context,
                            check_env)
    except socket.error as err:
        if err.errno == errno.EADDRINUSE:
            LOG.error("Server can't be started, maybe the given port number "
                      "({}) is already used. Check the connection "
                      "parameters.".format(args.view_port))
            sys.exit(1)
        else:
            raise
Esempio n. 27
0
def __db_migration(cfg_sql_server, context, product_to_upgrade='all',
                   force_upgrade=False):
    """
    Handle database management.
    Schema checking and migration.
    """
    LOG.info("Preparing schema upgrade for " + str(product_to_upgrade))
    product_name = product_to_upgrade

    prod_statuses = check_product_db_status(cfg_sql_server, context)
    prod_to_upgrade = []

    if product_name != 'all':
        avail = prod_statuses.get(product_name)
        if not avail:
            LOG.error("No product was found with this endpoint: " +
                      product_name)
            return 1
        prod_to_upgrade.append(product_name)
    else:
        prod_to_upgrade = list(prod_statuses.keys())

    migration_root = context.run_migration_root

    LOG.warning("Please note after migration only "
                "newer CodeChecker versions can be used "
                "to start the server")
    LOG.warning("It is advised to make a full backup of your "
                "run databases.")

    cc_env = analyzer_env.get_check_env(context.path_env_extra,
                                        context.ld_lib_path_extra)
    for prod in prod_to_upgrade:
        LOG.info("========================")
        LOG.info("Checking: " + prod)
        engine = cfg_sql_server.create_engine()
        config_session = sessionmaker(bind=engine)
        sess = config_session()

        product = sess.query(ORMProduct).filter(
                ORMProduct.endpoint == prod).first()
        db = database.SQLServer.from_connection_string(product.connection,
                                                       RUN_META,
                                                       migration_root,
                                                       interactive=False,
                                                       env=cc_env)

        db_status = db.connect()

        msg = database_status.db_status_msg.get(db_status,
                                                'Unknown database status')

        LOG.info(msg)
        if db_status == DBStatus.SCHEMA_MISSING:
            question = 'Do you want to initialize a new schema for ' \
                        + product.endpoint + '? Y(es)/n(o) '
            if force_upgrade or util.get_user_input(question):
                ret = db.connect(init=True)
                msg = database_status.db_status_msg.get(
                    ret, 'Unknown database status')
            else:
                LOG.info("No schema initialization was done.")

        elif db_status == DBStatus.SCHEMA_MISMATCH_OK:
            question = 'Do you want to upgrade to new schema for ' \
                        + product.endpoint + '? Y(es)/n(o) '
            if force_upgrade or util.get_user_input(question):
                LOG.info("Upgrading schema ...")
                ret = db.upgrade()
                LOG.info("Done.")
                msg = database_status.db_status_msg.get(
                    ret, 'Unknown database status')
            else:
                LOG.info("No schema migration was done.")

        sess.commit()
        sess.close()
        engine.dispose()
        LOG.info("========================")
    return 0
Esempio n. 28
0
def check(check_data):
    """
    Invoke clang with an action which called by processes.
    Different analyzer object belongs to for each build action.

    skiplist handler is None if no skip file was configured.
    """

    action, context, analyzer_config_map, \
        output_dir, skip_handler, quiet_output_on_stdout, \
        capture_analysis_output = check_data

    skipped = False
    reanalyzed = False
    try:
        # If one analysis fails the check fails.
        return_codes = 0
        skipped = False

        result_file = ''
        for source in action.sources:

            # If there is no skiplist handler there was no skip list file
            # in the command line.
            # C++ file skipping is handled here.
            source_file_name = os.path.basename(source)

            if skip_handler and skip_handler.should_skip(source):
                LOG.debug_analyzer(source_file_name + ' is skipped')
                skipped = True
                continue

            # Escape the spaces in the source path, but make sure not to
            # over-escape already escaped spaces.
            if ' ' in source:
                space_locations = [i for i, c in enumerate(source) if c == ' ']
                # If a \ is added to the text, the following indexes must be
                # shifted by one.
                rolling_offset = 0

                for orig_idx in space_locations:
                    idx = orig_idx + rolling_offset
                    if idx != 0 and source[idx - 1] != '\\':
                        source = source[:idx] + '\ ' + source[idx + 1:]
                        rolling_offset += 1

            # Construct analyzer env.
            analyzer_environment = analyzer_env.get_check_env(
                context.path_env_extra, context.ld_lib_path_extra)

            # Create a source analyzer.
            source_analyzer = \
                analyzer_types.construct_analyzer(action,
                                                  analyzer_config_map)

            # Source is the currently analyzed source file
            # there can be more in one buildaction.
            source_analyzer.source_file = source

            # The result handler for analysis is an empty result handler
            # which only returns metadata, but can't process the results.
            rh = analyzer_types.construct_analyze_handler(
                action, output_dir, context.severity_map, skip_handler)

            rh.analyzed_source_file = source
            if os.path.exists(rh.analyzer_result_file):
                reanalyzed = True

            # Fills up the result handler with the analyzer information.
            source_analyzer.analyze(rh, analyzer_environment)

            # If source file contains escaped spaces ("\ " tokens), then
            # clangSA writes the plist file with removing this escape
            # sequence, whereas clang-tidy does not. We rewrite the file
            # names to contain no escape sequences for every analyzer.
            result_file = rh.analyzer_result_file.replace(r'\ ', ' ')
            result_base = os.path.basename(result_file)
            failed_dir = os.path.join(output_dir, "failed")

            if rh.analyzer_returncode == 0:
                # Analysis was successful processing results.
                if capture_analysis_output:
                    success_dir = os.path.join(output_dir, "success")
                    if not os.path.exists(success_dir):
                        os.makedirs(success_dir)

                if len(rh.analyzer_stdout) > 0:
                    if not quiet_output_on_stdout:
                        LOG.debug_analyzer('\n' + rh.analyzer_stdout)

                    if capture_analysis_output:
                        with open(
                                os.path.join(success_dir, result_base) +
                                ".stdout.txt", 'w') as outf:
                            outf.write(rh.analyzer_stdout)

                if len(rh.analyzer_stderr) > 0:
                    if not quiet_output_on_stdout:
                        LOG.debug_analyzer('\n' + rh.analyzer_stderr)

                    if capture_analysis_output:
                        with open(
                                os.path.join(success_dir, result_base) +
                                ".stderr.txt", 'w') as outf:
                            outf.write(rh.analyzer_stderr)

                rh.postprocess_result()
                # Generated reports will be handled separately at store.

                # Save some extra information next to the plist, .source
                # acting as an extra metadata file.
                with open(result_file + ".source", 'w') as orig:
                    orig.write(
                        rh.analyzed_source_file.replace(r'\ ', ' ') + "\n")

                if os.path.exists(rh.analyzer_result_file) and \
                        not os.path.exists(result_file):
                    os.rename(rh.analyzer_result_file, result_file)

                LOG.info("[%d/%d] %s analyzed %s successfully." %
                         (progress_checked_num.value, progress_actions.value,
                          action.analyzer_type, source_file_name))

                # Remove the previously generated error file.
                if os.path.exists(failed_dir):
                    err_file = os.path.join(failed_dir, result_base + '.zip')
                    if os.path.exists(err_file):
                        os.remove(err_file)

                if skip_handler:
                    # We need to check the plist content because skipping
                    # reports in headers can be done only this way.
                    plist_parser.skip_report_from_plist(
                        result_file, skip_handler)

            else:
                # If the analysis has failed, we help debugging.
                if not os.path.exists(failed_dir):
                    os.makedirs(failed_dir)
                LOG.debug("Writing error debugging to '" + failed_dir + "'")

                zip_file = result_base + '.zip'
                with zipfile.ZipFile(os.path.join(failed_dir, zip_file),
                                     'w') as archive:
                    if len(rh.analyzer_stdout) > 0:
                        LOG.debug("[ZIP] Writing analyzer STDOUT to /stdout")
                        archive.writestr("stdout", rh.analyzer_stdout)

                        if not quiet_output_on_stdout:
                            LOG.debug_analyzer('\n' + rh.analyzer_stdout)

                    if len(rh.analyzer_stderr) > 0:
                        LOG.debug("[ZIP] Writing analyzer STDERR to /stderr")
                        archive.writestr("stderr", rh.analyzer_stderr)

                        if not quiet_output_on_stdout:
                            LOG.debug_analyzer('\n' + rh.analyzer_stderr)

                    LOG.debug("Generating dependent headers via compiler...")
                    try:
                        dependencies = set(create_dependencies(rh.buildaction))
                    except Exception as ex:
                        LOG.debug("Couldn't create dependencies:")
                        LOG.debug(str(ex))
                        archive.writestr("no-sources", str(ex))
                        dependencies = set()

                    LOG.debug("Fetching other dependent files from analyzer "
                              "output...")
                    try:
                        other_files = set()
                        if len(rh.analyzer_stdout) > 0:
                            other_files.update(
                                source_analyzer.get_analyzer_mentioned_files(
                                    rh.analyzer_stdout))

                        if len(rh.analyzer_stderr) > 0:
                            other_files.update(
                                source_analyzer.get_analyzer_mentioned_files(
                                    rh.analyzer_stderr))
                    except Exception as ex:
                        LOG.debug("Couldn't generate list of other files "
                                  "from analyzer output:")
                        LOG.debug(str(ex))
                        other_files = set()

                    dependencies.update(other_files)

                    LOG.debug("Writing dependent files to archive.")
                    for dependent_source in dependencies:
                        dependent_source = os.path.join(
                            action.directory, dependent_source)
                        if not os.path.isabs(dependent_source):
                            dependent_source = \
                                os.path.abspath(dependent_source)
                        LOG.debug("[ZIP] Writing '" + dependent_source + "' "
                                  "to the archive.")
                        archive_path = dependent_source.lstrip('/')

                        try:
                            archive.write(
                                dependent_source,
                                os.path.join("sources-root", archive_path),
                                zipfile.ZIP_DEFLATED)
                        except Exception as ex:
                            # In certain cases, the output could contain
                            # invalid tokens (such as error messages that were
                            # printed even though the dependency generation
                            # returned 0).
                            LOG.debug("[ZIP] Couldn't write, because " +
                                      str(ex))
                            archive.writestr(
                                os.path.join("failed-sources-root",
                                             archive_path),
                                "Couldn't write this file, because:\n" +
                                str(ex))

                    LOG.debug("[ZIP] Writing extra information...")

                    archive.writestr("build-action",
                                     rh.buildaction.original_command)
                    archive.writestr("analyzer-command",
                                     ' '.join(rh.analyzer_cmd))
                    archive.writestr("return-code",
                                     str(rh.analyzer_returncode))

                LOG.debug("ZIP file written at '" +
                          os.path.join(failed_dir, zip_file) + "'")
                LOG.error("Analyzing '" + source_file_name + "' with " +
                          action.analyzer_type + " failed.")
                if rh.analyzer_stdout != '' and not quiet_output_on_stdout:
                    LOG.error(rh.analyzer_stdout)
                if rh.analyzer_stderr != '' and not quiet_output_on_stdout:
                    LOG.error(rh.analyzer_stderr)
                return_codes = rh.analyzer_returncode

                # Remove files that successfully analyzed earlier on.
                plist_file = result_base + ".plist"
                if os.path.exists(plist_file):
                    os.remove(plist_file)

        progress_checked_num.value += 1

        return return_codes, skipped, reanalyzed, action.analyzer_type, \
            result_file

    except Exception as e:
        LOG.debug_analyzer(str(e))
        traceback.print_exc(file=sys.stdout)
        return 1, skipped, reanalyzed, action.analyzer_type, None
Esempio n. 29
0
def check_supported_analyzers(analyzers, context):
    """
    Checks the given analyzers in the current context for their executability
    and support in CodeChecker.

    This method also updates the given context.analyzer_binaries if the
    context's configuration is bogus but had been resolved.

    :return: (enabled, failed) where enabled is a list of analyzer names
     and failed is a list of (analyzer, reason) tuple.
    """

    check_env = analyzer_env.get_check_env(context.path_env_extra,
                                           context.ld_lib_path_extra)

    analyzer_binaries = context.analyzer_binaries

    enabled_analyzers = set()
    failed_analyzers = set()

    for analyzer_name in analyzers:
        if analyzer_name not in supported_analyzers:
            failed_analyzers.add(
                (analyzer_name, "Analyzer unsupported by CodeChecker."))
            continue

        # Get the compiler binary to check if it can run.
        available_analyzer = True
        analyzer_bin = analyzer_binaries.get(analyzer_name)
        if not analyzer_bin:
            failed_analyzers.add(
                (analyzer_name, "Failed to detect analyzer binary."))
            available_analyzer = False
        elif not os.path.isabs(analyzer_bin):
            # If the analyzer is not in an absolute path, try to find it...
            found_bin = supported_analyzers[analyzer_name].\
                resolve_missing_binary(analyzer_bin, check_env)

            # found_bin is an absolute path, an executable in one of the
            # PATH folders.
            # If found_bin is the same as the original binary, ie., normally
            # calling the binary without any search would have resulted in
            # the same binary being called, it's NOT a "not found".
            if found_bin and os.path.basename(found_bin) != analyzer_bin:
                LOG.debug(
                    "Configured binary '{0}' for analyzer '{1}' was "
                    "not found, but environment PATH contains '{2}'.".format(
                        analyzer_bin, analyzer_name, found_bin))
                context.analyzer_binaries[analyzer_name] = found_bin

            analyzer_bin = found_bin

        if not analyzer_bin or \
           not host_check.check_clang(analyzer_bin, check_env):
            # Analyzers unavailable under absolute paths are deliberately a
            # configuration problem.
            failed_analyzers.add(
                (analyzer_name, "Cannot execute analyzer binary."))
            available_analyzer = False

        if available_analyzer:
            enabled_analyzers.add(analyzer_name)

    return enabled_analyzers, failed_analyzers
Esempio n. 30
0
def main(args):
    """
    List the checkers available in the specified (or all supported) analyzers
    alongside with their description or enabled status in various formats.
    """

    # If nothing is set, list checkers for all supported analyzers.
    analyzers = args.analyzers \
        if 'analyzers' in args \
        else analyzer_types.supported_analyzers

    context = generic_package_context.get_context()
    working, errored = analyzer_types.check_supported_analyzers(analyzers,
                                                                context)

    analyzer_environment = analyzer_env.get_check_env(
        context.path_env_extra, context.ld_lib_path_extra)

    analyzer_config_map = analyzer_types.build_config_handlers(args,
                                                               context,
                                                               working)

    # Use good looking different headers based on format.
    if not args.details:
        if args.output_format not in ['csv', 'json']:
            header = ['Name']
        else:
            header = ['name']
    else:
        if args.output_format not in ['csv', 'json']:
            header = ['', 'Name', 'Analyzer', 'Severity', 'Description']
        else:
            header = ['enabled', 'name', 'analyzer', 'severity', 'description']

    rows = []
    for analyzer in working:
        config_handler = analyzer_config_map.get(analyzer)
        source_analyzer = \
            analyzer_types.construct_analyzer_type(analyzer,
                                                   config_handler,
                                                   None)

        checkers = source_analyzer.get_analyzer_checkers(config_handler,
                                                         analyzer_environment)
        default_checker_cfg = context.default_checkers_config.get(
            analyzer + '_checkers')
        analyzer_types.initialize_checkers(config_handler,
                                           checkers,
                                           default_checker_cfg)

        for checker_name, value in config_handler.checks().items():
            enabled, description = value

            if enabled and args.only_disabled:
                continue
            elif not enabled and args.only_enabled:
                continue

            if args.output_format != 'json':
                enabled = '+' if enabled else '-'

            if not args.details:
                rows.append([checker_name])
            else:
                severity = context.severity_map.get(checker_name,
                                                    'UNSPECIFIED')
                rows.append([enabled, checker_name, analyzer,
                             severity, description])

    if len(rows) > 0:
        print(output_formatters.twodim_to_str(args.output_format,
                                              header, rows))

    for analyzer_binary, reason in errored:
        LOG.error("Failed to get checkers for '" + analyzer_binary +
                  "'! The error reason was: '" + reason + "'")
        LOG.error("Please check your installation and the "
                  "'config/package_layout.json' file!")