示例#1
0
def handle_list_run_histories(args):
    init_logger(args.verbose if 'verbose' in args else None)

    client = setup_client(args.product_url)
    run_ids = None
    if 'names' in args:
        runs = get_runs(client, args.names)
        run_ids = [r.runId for r in runs]

    run_history = client.getRunHistory(run_ids, None, None, None)

    if args.output_format == 'json':
        print(CmdLineOutputEncoder().encode(run_history))
    else:  # plaintext, csv
        header = ['Date', 'Run name', 'Version tag', 'User',
                  'CodeChecker version', 'Analyzer statistics']
        rows = []
        for h in run_history:
            analyzer_statistics = []
            for analyzer in h.analyzerStatistics:
                stat = h.analyzerStatistics[analyzer]
                num_of_all_files = stat.successful + stat.failed
                analyzer_statistics.append(analyzer + ' (' +
                                           str(num_of_all_files) + '/' +
                                           str(stat.successful) + ')')

            rows.append((h.time,
                         h.runName,
                         h.versionTag if h.versionTag else '',
                         h.user,
                         h.codeCheckerVersion if h.codeCheckerVersion else '',
                         ', '.join(analyzer_statistics)))

        print(twodim_to_str(args.output_format, header, rows))
示例#2
0
def print_prod_status(prod_status):
    """
    Print the database statuses for each of the products.
    """

    header = [
        'Product endpoint', 'Database status', 'Database location',
        'Schema version in the database', 'Schema version in the package'
    ]
    rows = []

    for k, v in prod_status.items():
        db_status, schema_ver, package_ver, db_location = v
        db_status_msg = database_status.db_status_msg.get(db_status)
        if schema_ver == package_ver:
            schema_ver += " (up to date)"
        rows.append(
            [k, db_status_msg, db_location,
             str(schema_ver), package_ver])

    prod_status = output_formatters.twodim_to_str('table',
                                                  header,
                                                  rows,
                                                  sort_by_column_number=0)
    LOG.info('Status of products:\n%s', prod_status)
def handle_list_components(args):
    # If the given output format is not 'table', redirect logger's output to
    # the stderr.
    stream = None
    if 'output_format' in args and args.output_format != 'table':
        stream = 'stderr'

    init_logger(args.verbose if 'verbose' in args else None, stream)

    client = setup_client(args.product_url)
    components = client.getSourceComponents(None)

    if args.output_format == 'json':
        print(CmdLineOutputEncoder().encode(components))
    else:  # plaintext, csv
        header = ['Name', 'Value', 'Description']
        rows = []
        for res in components:
            for idx, value in enumerate(res.value.split('\n')):
                name = res.name if idx == 0 else ''
                description = res.description \
                    if idx == 0 and res.description else ''
                rows.append((name, value, description))

        print(twodim_to_str(args.output_format, header, rows))
示例#4
0
def handle_list_run_histories(args):
    init_logger(args.verbose if 'verbose' in args else None)

    client = setup_client(args.product_url)
    run_ids = None
    if 'names' in args:
        runs = get_runs(client, args.names)
        run_ids = [r.runId for r in runs]

    run_history = client.getRunHistory(run_ids, None, None, None)

    if args.output_format == 'json':
        print(CmdLineOutputEncoder().encode(run_history))
    else:  # plaintext, csv
        header = [
            'Date', 'Run name', 'Version tag', 'User', 'CodeChecker version',
            'Analyzer statistics'
        ]
        rows = []
        for h in run_history:
            analyzer_statistics = []
            for analyzer in h.analyzerStatistics:
                stat = h.analyzerStatistics[analyzer]
                num_of_all_files = stat.successful + stat.failed
                analyzer_statistics.append(analyzer + ' (' +
                                           str(num_of_all_files) + '/' +
                                           str(stat.successful) + ')')

            rows.append(
                (h.time, h.runName, h.versionTag if h.versionTag else '',
                 h.user, h.codeCheckerVersion if h.codeCheckerVersion else '',
                 ', '.join(analyzer_statistics)))

        print(twodim_to_str(args.output_format, header, rows))
示例#5
0
def handle_list_tokens(args):
    """
    List personal access tokens of the currently logged in user.
    """
    # If the given output format is not 'table', redirect logger's output to
    # the stderr.
    stream = None
    if 'output_format' in args and args.output_format != 'table':
        stream = 'stderr'

    init_logger(args.verbose if 'verbose' in args else None, stream)

    protocol, host, port = split_server_url(args.server_url)
    client, _ = setup_auth_client(protocol, host, port)
    tokens = client.getTokens()

    if args.output_format == 'json':
        print(CmdLineOutputEncoder().encode(tokens))
    else:  # plaintext, csv
        header = ['Token', 'Description', 'Last access']
        rows = []
        for res in tokens:
            rows.append((res.token, res.description if res.description else '',
                         res.lastAccess))

        print(twodim_to_str(args.output_format, header, rows))
示例#6
0
def print_version(output_format=None):
    """
    Print web server version information in the given format.
    """
    context = webserver_context.get_context()

    server_versions = ['{0}.{1}'.format(major, minor) for
                       major, minor in version.SUPPORTED_VERSIONS.items()]

    if output_format != 'json':
        server_versions = ', '.join(server_versions)

    rows = [
        ("Base package version", context.version),
        ("Package build date", context.package_build_date),
        ("Git commit ID (hash)", context.package_git_hash),
        ("Git tag information", context.package_git_tag),
        ("Configuration schema", str(context.product_db_version_info)),
        ("Database schema", str(context.run_db_version_info)),
        ("Server supported API (Thrift)", server_versions),
        ("Client API (Thrift)", version.CLIENT_API)
    ]

    if output_format != "json":
        print(output_formatters.twodim_to_str(output_format,
                                              ["Kind", "Version"],
                                              rows))
    elif output_format == "json":
        # Use a special JSON format here, instead of
        # [ {"kind": "something", "version": "0.0.0"}, {"kind": "foo", ... } ]
        # do
        # { "something": "0.0.0", "foo": ... }
        print(json.dumps(dict(rows)))
def handle_list_products(args):

    init_logger(args.verbose if 'verbose' in args else None)

    protocol, host, port = split_server_url(args.server_url)
    client = setup_product_client(protocol, host, port)
    products = client.getProducts(None, None)

    if args.output_format == 'json':
        results = []
        for product in products:
            results.append({product.endpoint: product})
        print(CmdLineOutputEncoder().encode(results))
    else:  # plaintext, csv
        header = ['Database status', 'Endpoint', 'Name', 'Description']
        rows = []
        for product in products:
            name = base64.b64decode(product.displayedName_b64) \
                if product.displayedName_b64 else ''
            description = base64.b64decode(product.description_b64) \
                if product.description_b64 else ''

            if not product.accessible:
                db_status_msg = 'No access.'
            else:
                db_status = product.databaseStatus
                db_status_msg = database_status.db_status_msg.get(
                    db_status, 'Unknown database status')

            rows.append((db_status_msg, product.endpoint, name, description))

        print(twodim_to_str(args.output_format, header, rows))
示例#8
0
def main(args):
    """
    List the analyzers' basic information supported by CodeChecker.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    context = analyzer_context.get_context()
    working, errored = \
        analyzer_types.check_supported_analyzers(
            analyzer_types.supported_analyzers,
            context)

    if args.dump_config:
        binary = context.analyzer_binaries.get(args.dump_config)

        if args.dump_config == 'clang-tidy':
            subprocess.call([binary, '-dump-config', '-checks=*'])
        elif args.dump_config == 'clangsa':
            # TODO: Not supported by ClangSA yet!
            LOG.warning("'--dump-config clangsa' is not supported yet.")

        return

    if args.output_format not in ['csv', 'json']:
        if 'details' not in args:
            header = ['Name']
        else:
            header = ['Name', 'Path', 'Version']
    else:
        if 'details' not in args:
            header = ['name']
        else:
            header = ['name', 'path', 'version_string']

    rows = []
    for analyzer in working:
        if 'details' not in args:
            rows.append([analyzer])
        else:
            binary = context.analyzer_binaries.get(analyzer)
            try:
                version = subprocess.check_output([binary, '--version'])
            except (subprocess.CalledProcessError, OSError):
                version = 'ERROR'

            rows.append([analyzer, binary, version])

    if 'all' in args:
        for analyzer, err_reason in errored:
            if 'details' not in args:
                rows.append([analyzer])
            else:
                rows.append([
                    analyzer,
                    context.analyzer_binaries.get(analyzer), err_reason
                ])

    if len(rows) > 0:
        print(output_formatters.twodim_to_str(args.output_format, header,
                                              rows))
示例#9
0
def handle_list_products(args):

    init_logger(args.verbose if 'verbose' in args else None)

    protocol, host, port = split_server_url(args.server_url)
    client = setup_product_client(protocol, host, port)
    products = client.getProducts(None, None)

    if args.output_format == 'json':
        results = []
        for product in products:
            results.append({product.endpoint: product})
        print(CmdLineOutputEncoder().encode(results))
    else:  # plaintext, csv
        header = ['Database status', 'Endpoint', 'Name', 'Description']
        rows = []
        for product in products:
            name = base64.b64decode(product.displayedName_b64) \
                if product.displayedName_b64 else ''
            description = base64.b64decode(product.description_b64) \
                if product.description_b64 else ''

            if not product.accessible:
                db_status_msg = 'No access.'
            else:
                db_status = product.databaseStatus
                db_status_msg = database_status.db_status_msg.get(
                    db_status, 'Unknown database status')

            rows.append((db_status_msg,
                         product.endpoint, name, description))

        print(twodim_to_str(args.output_format, header, rows))
示例#10
0
def print_version(output_format=None):
    """
    Print web server version information in the given format.
    """
    context = webserver_context.get_context()

    server_versions = [
        '{0}.{1}'.format(major, minor)
        for major, minor in version.SUPPORTED_VERSIONS.items()
    ]

    if output_format != 'json':
        server_versions = ', '.join(server_versions)

    rows = [("Base package version", context.version),
            ("Package build date", context.package_build_date),
            ("Git commit ID (hash)", context.package_git_hash),
            ("Git tag information", context.package_git_tag),
            ("Server supported API (Thrift)", server_versions),
            ("Client API (Thrift)", version.CLIENT_API)]

    if output_format != "json":
        print(
            output_formatters.twodim_to_str(output_format, ["Kind", "Version"],
                                            rows))
    elif output_format == "json":
        # Use a special JSON format here, instead of
        # [ {"kind": "something", "version": "0.0.0"}, {"kind": "foo", ... } ]
        # do
        # { "something": "0.0.0", "foo": ... }
        print(json.dumps(dict(rows)))
示例#11
0
def handle_list_results(args):
    init_logger(args.verbose if 'verbose' in args else None)
    check_deprecated_arg_usage(args)

    client = setup_client(args.product_url)

    run_names = map(lambda x: x.strip(), args.name.split(':'))
    run_ids = [run.runId for run in get_runs(client, run_names)]

    if not len(run_ids):
        LOG.warning("No runs were found!")
        sys.exit(1)

    limit = constants.MAX_QUERY_SIZE
    offset = 0

    report_filter = ttypes.ReportFilter()

    add_filter_conditions(client, report_filter, args)

    all_results = []
    results = client.getRunResults(run_ids, limit, offset, None, report_filter,
                                   None)

    while results:
        all_results.extend(results)
        offset += limit
        results = client.getRunResults(run_ids, limit, offset, None,
                                       report_filter, None)

    if args.output_format == 'json':
        print(CmdLineOutputEncoder().encode(all_results))
    else:
        header = [
            'File', 'Checker', 'Severity', 'Msg', 'Review status',
            'Detection status'
        ]

        rows = []
        for res in all_results:
            bug_line = res.line
            checked_file = res.checkedFile
            if bug_line is not None:
                checked_file += ' @ ' + str(bug_line)

            sev = ttypes.Severity._VALUES_TO_NAMES[res.severity]
            rw_status = \
                ttypes.ReviewStatus._VALUES_TO_NAMES[res.reviewData.status]

            dt_status = 'N/A'

            status = res.detectionStatus
            if status is not None:
                dt_status = ttypes.DetectionStatus._VALUES_TO_NAMES[status]

            rows.append((checked_file, res.checkerId, sev, res.checkerMsg,
                         rw_status, dt_status))

        print(twodim_to_str(args.output_format, header, rows))
示例#12
0
def handle_list_results(args):
    init_logger(args.verbose if 'verbose' in args else None)
    check_deprecated_arg_usage(args)

    client = setup_client(args.product_url)

    run_names = map(lambda x: x.strip(), args.name.split(':'))
    run_ids = [run.runId for run in get_runs(client, run_names)]

    if not len(run_ids):
        LOG.warning("No runs were found!")
        sys.exit(1)

    limit = constants.MAX_QUERY_SIZE
    offset = 0

    report_filter = ttypes.ReportFilter()

    add_filter_conditions(client, report_filter, args)

    all_results = []
    results = client.getRunResults(run_ids, limit, offset, None,
                                   report_filter, None)

    while results:
        all_results.extend(results)
        offset += limit
        results = client.getRunResults(run_ids, limit, offset, None,
                                       report_filter, None)

    if args.output_format == 'json':
        print(CmdLineOutputEncoder().encode(all_results))
    else:
        header = ['File', 'Checker', 'Severity', 'Msg', 'Review status',
                  'Detection status']

        rows = []
        for res in all_results:
            bug_line = res.line
            checked_file = res.checkedFile
            if bug_line is not None:
                checked_file += ' @ ' + str(bug_line)

            sev = ttypes.Severity._VALUES_TO_NAMES[res.severity]
            rw_status = \
                ttypes.ReviewStatus._VALUES_TO_NAMES[res.reviewData.status]

            dt_status = 'N/A'

            status = res.detectionStatus
            if status is not None:
                dt_status = ttypes.DetectionStatus._VALUES_TO_NAMES[status]

            rows.append((checked_file, res.checkerId, sev,
                         res.checkerMsg, rw_status, dt_status))

        print(twodim_to_str(args.output_format, header, rows))
示例#13
0
def handle_list_runs(args):
    # If the given output format is not 'table', redirect logger's output to
    # the stderr.
    stream = None
    if 'output_format' in args and args.output_format != 'table':
        stream = 'stderr'

    init_logger(args.verbose if 'verbose' in args else None, stream)

    client = setup_client(args.product_url)

    run_filter = None
    if 'names' in args:
        run_filter = ttypes.RunFilter()
        run_filter.names = args.names

    runs = client.getRunData(run_filter)

    if args.output_format == 'json':
        results = []
        for run in runs:
            results.append({run.name: run})
        print(CmdLineOutputEncoder().encode(results))

    else:  # plaintext, csv
        header = ['Name', 'Number of unresolved reports',
                  'Analyzer statistics', 'Storage date', 'Version tag',
                  'Duration', 'CodeChecker version']
        rows = []
        for run in runs:
            duration = str(timedelta(seconds=run.duration)) \
                if run.duration > -1 else 'Not finished'

            analyzer_statistics = []
            for analyzer in run.analyzerStatistics:
                stat = run.analyzerStatistics[analyzer]
                num_of_all_files = stat.successful + stat.failed
                analyzer_statistics.append(analyzer + ' (' +
                                           str(num_of_all_files) + '/' +
                                           str(stat.successful) + ')')

            codechecker_version = run.codeCheckerVersion \
                if run.codeCheckerVersion else ''

            rows.append((run.name,
                         str(run.resultCount),
                         ', '.join(analyzer_statistics),
                         run.runDate,
                         run.versionTag if run.versionTag else '',
                         duration,
                         codechecker_version))

        print(twodim_to_str(args.output_format, header, rows))
示例#14
0
def __instance_management(args):
    """Handles the instance-manager commands --list/--stop/--stop-all."""

    # TODO: The server stopping and listing must be revised on its invocation
    # once "workspace", as a concept, is removed.
    # QUESTION: What is the bestest way here to identify a server for the user?
    if 'list' in args:
        instances = instance_manager.get_instances()

        instances_on_multiple_hosts = any(True for inst in instances
                                          if inst['hostname'] !=
                                          socket.gethostname())
        if not instances_on_multiple_hosts:
            head = ['Workspace', 'View port']
        else:
            head = ['Workspace', 'Computer host', 'View port']

        rows = []
        for instance in instances:
            if not instances_on_multiple_hosts:
                rows.append((instance['workspace'], str(instance['port'])))
            else:
                rows.append((instance['workspace'],
                             instance['hostname']
                             if instance['hostname'] != socket.gethostname()
                             else '',
                             str(instance['port'])))

        print("Your running CodeChecker servers:")
        print(output_formatters.twodim_to_str('table', head, rows))
    elif 'stop' in args or 'stop_all' in args:
        for i in instance_manager.get_instances():
            if i['hostname'] != socket.gethostname():
                continue

            # A STOP only stops the server associated with the given workspace
            # and view-port.
            if 'stop' in args and \
                not (i['port'] == args.view_port and
                     os.path.abspath(i['workspace']) ==
                     os.path.abspath(args.config_directory)):
                continue

            try:
                util.kill_process_tree(i['pid'])
                LOG.info("Stopped CodeChecker server running on port %s "
                         "in workspace %s (PID: %s)",
                         i['port'], i['workspace'], i['pid'])
            except Exception:
                # Let the exception come out if the commands fail
                LOG.error("Couldn't stop process PID #%s", str(i['pid']))
                raise
示例#15
0
def __instance_management(args):
    """Handles the instance-manager commands --list/--stop/--stop-all."""

    # TODO: The server stopping and listing must be revised on its invocation
    # once "workspace", as a concept, is removed.
    # QUESTION: What is the bestest way here to identify a server for the user?
    if 'list' in args:
        instances = instance_manager.get_instances()

        instances_on_multiple_hosts = any(
            True for inst in instances
            if inst['hostname'] != socket.gethostname())
        if not instances_on_multiple_hosts:
            head = ['Workspace', 'View port']
        else:
            head = ['Workspace', 'Computer host', 'View port']

        rows = []
        for instance in instances:
            if not instances_on_multiple_hosts:
                rows.append((instance['workspace'], str(instance['port'])))
            else:
                rows.append(
                    (instance['workspace'], instance['hostname']
                     if instance['hostname'] != socket.gethostname() else '',
                     str(instance['port'])))

        print("Your running CodeChecker servers:")
        print(output_formatters.twodim_to_str('table', head, rows))
    elif 'stop' in args or 'stop_all' in args:
        for i in instance_manager.get_instances():
            if i['hostname'] != socket.gethostname():
                continue

            # A STOP only stops the server associated with the given workspace
            # and view-port.
            if 'stop' in args and \
                not (i['port'] == args.view_port and
                     os.path.abspath(i['workspace']) ==
                     os.path.abspath(args.config_directory)):
                continue

            try:
                util.kill_process_tree(i['pid'])
                LOG.info(
                    "Stopped CodeChecker server running on port %s "
                    "in workspace %s (PID: %s)", i['port'], i['workspace'],
                    i['pid'])
            except Exception:
                # Let the exception come out if the commands fail
                LOG.error("Couldn't stop process PID #%s", str(i['pid']))
                raise
示例#16
0
def handle_list_runs(args):

    init_logger(args.verbose if 'verbose' in args else None)

    client = setup_client(args.product_url)

    run_filter = None
    if 'names' in args:
        run_filter = ttypes.RunFilter()
        run_filter.names = args.names

    runs = client.getRunData(run_filter)

    if args.output_format == 'json':
        results = []
        for run in runs:
            results.append({run.name: run})
        print(CmdLineOutputEncoder().encode(results))

    else:  # plaintext, csv
        header = ['Name', 'Number of unresolved reports',
                  'Analyzer statistics', 'Storage date', 'Version tag',
                  'Duration', 'CodeChecker version']
        rows = []
        for run in runs:
            duration = str(timedelta(seconds=run.duration)) \
                if run.duration > -1 else 'Not finished'

            analyzer_statistics = []
            for analyzer in run.analyzerStatistics:
                stat = run.analyzerStatistics[analyzer]
                num_of_all_files = stat.successful + stat.failed
                analyzer_statistics.append(analyzer + ' (' +
                                           str(num_of_all_files) + '/' +
                                           str(stat.successful) + ')')

            codechecker_version = run.codeCheckerVersion \
                if run.codeCheckerVersion else ''

            rows.append((run.name,
                         str(run.resultCount),
                         ', '.join(analyzer_statistics),
                         run.runDate,
                         run.versionTag if run.versionTag else '',
                         duration,
                         codechecker_version))

        print(twodim_to_str(args.output_format, header, rows))
def handle_list_components(args):
    init_logger(args.verbose if 'verbose' in args else None)

    client = setup_client(args.product_url)
    components = client.getSourceComponents(None)

    if args.output_format == 'json':
        print(CmdLineOutputEncoder().encode(components))
    else:  # plaintext, csv
        header = ['Name', 'Value', 'Description']
        rows = []
        for res in components:
            for idx, value in enumerate(res.value.split('\n')):
                name = res.name if idx == 0 else ''
                description = res.description \
                    if idx == 0 and res.description else ''
                rows.append((name, value, description))

        print(twodim_to_str(args.output_format, header, rows))
示例#18
0
def handle_list_components(args):
    init_logger(args.verbose if 'verbose' in args else None)

    client = setup_client(args.product_url)
    components = client.getSourceComponents(None)

    if args.output_format == 'json':
        print(CmdLineOutputEncoder().encode(components))
    else:  # plaintext, csv
        header = ['Name', 'Value', 'Description']
        rows = []
        for res in components:
            for idx, value in enumerate(res.value.split('\n')):
                name = res.name if idx == 0 else ''
                description = res.description \
                    if idx == 0 and res.description else ''
                rows.append((name, value, description))

        print(twodim_to_str(args.output_format, header, rows))
示例#19
0
def handle_list_tokens(args):
    """
    List personal access tokens of the currently logged in user.
    """
    init_logger(args.verbose if 'verbose' in args else None)

    protocol, host, port = split_server_url(args.server_url)
    client, curr_token = setup_auth_client(protocol, host, port)
    tokens = client.getTokens()

    if args.output_format == 'json':
        print(CmdLineOutputEncoder().encode(tokens))
    else:  # plaintext, csv
        header = ['Token', 'Description', 'Last access']
        rows = []
        for res in tokens:
            rows.append((res.token, res.description if res.description else '',
                         res.lastAccess))

        print(twodim_to_str(args.output_format, header, rows))
示例#20
0
def print_version(output_format=None):
    """
    Print analyzer version information in the given format.
    """
    context = analyzer_context.get_context()

    rows = [("Base package version", context.version),
            ("Package build date", context.package_build_date),
            ("Git commit ID (hash)", context.package_git_hash),
            ("Git tag information", context.package_git_tag)]

    if output_format != "json":
        print(
            output_formatters.twodim_to_str(output_format, ["Kind", "Version"],
                                            rows))
    elif output_format == "json":
        # Use a special JSON format here, instead of
        # [ {"kind": "something", "version": "0.0.0"}, {"kind": "foo", ... } ]
        # do
        # { "something": "0.0.0", "foo": ... }
        print(json.dumps(dict(rows)))
示例#21
0
def handle_list_tokens(args):
    """
    List personal access tokens of the currently logged in user.
    """
    init_logger(args.verbose if 'verbose' in args else None)

    protocol, host, port = split_server_url(args.server_url)
    client, curr_token = setup_auth_client(protocol, host, port)
    tokens = client.getTokens()

    if args.output_format == 'json':
        print(CmdLineOutputEncoder().encode(tokens))
    else:  # plaintext, csv
        header = ['Token', 'Description', 'Last access']
        rows = []
        for res in tokens:
            rows.append((res.token,
                         res.description if res.description else '',
                         res.lastAccess))

        print(twodim_to_str(args.output_format, header, rows))
示例#22
0
def handle_list_products(args):

    # If the given output format is not 'table', redirect logger's output to
    # the stderr.
    stream = None
    if 'output_format' in args and args.output_format != 'table':
        stream = 'stderr'

    init_logger(args.verbose if 'verbose' in args else None, stream)

    protocol, host, port = split_server_url(args.server_url)
    client = setup_product_client(protocol, host, port)
    products = client.getProducts(None, None)

    if args.output_format == 'json':
        results = []
        for product in products:
            results.append({product.endpoint: product})
        print(CmdLineOutputEncoder().encode(results))
    else:  # plaintext, csv
        header = ['Database status', 'Endpoint', 'Name', 'Description']
        rows = []
        for product in products:
            name = convert.from_b64(product.displayedName_b64) \
                if product.displayedName_b64 else ''
            description = convert.from_b64(product.description_b64) \
                if product.description_b64 else ''

            if not product.accessible:
                db_status_msg = 'No access.'
            else:
                db_status = product.databaseStatus
                db_status_msg = database_status.db_status_msg.get(
                    db_status, 'Unknown database status')

            rows.append((db_status_msg,
                         product.endpoint, name, description))

        print(twodim_to_str(args.output_format, header, rows))
示例#23
0
def print_prod_status(prod_status):
    """
    Print the database statuses for each of the products.
    """

    header = ['Product endpoint', 'Database status',
              'Database location',
              'Schema version in the database',
              'Schema version in the package']
    rows = []

    for k, v in prod_status.items():
        db_status, schema_ver, package_ver, db_location = v
        db_status_msg = database_status.db_status_msg.get(db_status)
        if schema_ver == package_ver:
            schema_ver += " (up to date)"
        rows.append([k, db_status_msg, db_location, schema_ver, package_ver])

    prod_status = output_formatters.twodim_to_str('table',
                                                  header,
                                                  rows,
                                                  sort_by_column_number=0)
    LOG.info('Status of products:\n%s', prod_status)
示例#24
0
def handle_list_result_types(args):

    init_logger(args.verbose if 'verbose' in args else None)
    check_deprecated_arg_usage(args)

    if 'disable_unique' in args:
        LOG.warning("--disable-unique option is deprecated. Please use the "
                    "'--uniqueing on' option to get uniqueing results.")
        args.uniqueing = 'off'

    def get_statistics(client, run_ids, field, values):
        report_filter = ttypes.ReportFilter()
        add_filter_conditions(client, report_filter, args)

        setattr(report_filter, field, values)
        checkers = client.getCheckerCounts(run_ids,
                                           report_filter,
                                           None,
                                           None,
                                           0)

        return dict((res.name, res.count) for res in checkers)

    def checker_count(checker_dict, key):
        return checker_dict.get(key, 0)

    client = setup_client(args.product_url)

    run_ids = None
    if 'all_results' not in args:
        run_ids = [run.runId for run in get_runs(client, args.names)]
        if not len(run_ids):
            LOG.warning("No runs were found!")
            sys.exit(1)

    all_checkers_report_filter = ttypes.ReportFilter()
    add_filter_conditions(client, all_checkers_report_filter, args)

    all_checkers = client.getCheckerCounts(run_ids,
                                           all_checkers_report_filter,
                                           None,
                                           None,
                                           0)
    all_checkers_dict = dict((res.name, res) for res in all_checkers)

    unrev_checkers = get_statistics(client, run_ids, 'reviewStatus',
                                    [ttypes.ReviewStatus.UNREVIEWED])

    confirmed_checkers = get_statistics(client, run_ids, 'reviewStatus',
                                        [ttypes.ReviewStatus.CONFIRMED])

    false_checkers = get_statistics(client, run_ids, 'reviewStatus',
                                    [ttypes.ReviewStatus.FALSE_POSITIVE])

    intentional_checkers = get_statistics(client, run_ids, 'reviewStatus',
                                          [ttypes.ReviewStatus.INTENTIONAL])

    resolved_checkers = get_statistics(client, run_ids, 'detectionStatus',
                                       [ttypes.DetectionStatus.RESOLVED])

    # Get severity counts
    report_filter = ttypes.ReportFilter()
    add_filter_conditions(client, report_filter, args)

    sev_count = client.getSeverityCounts(run_ids, report_filter, None)
    severities = []
    severity_total = 0
    for key, count in sorted(sev_count.items(),
                             reverse=True):
        severities.append(dict(
            severity=ttypes.Severity._VALUES_TO_NAMES[key],
            reports=count))
        severity_total += count

    all_results = []
    total = defaultdict(int)
    for key, checker_data in sorted(all_checkers_dict.items(),
                                    key=lambda x: x[1].severity,
                                    reverse=True):
        all_results.append(dict(
            checker=key,
            severity=ttypes.Severity._VALUES_TO_NAMES[checker_data.severity],
            reports=checker_data.count,
            unreviewed=checker_count(unrev_checkers, key),
            confirmed=checker_count(confirmed_checkers, key),
            false_positive=checker_count(false_checkers, key),
            intentional=checker_count(intentional_checkers, key),
            resolved=checker_count(resolved_checkers, key),
         ))
        total['total_reports'] += checker_data.count
        total['total_resolved'] += checker_count(resolved_checkers, key)
        total['total_unreviewed'] += checker_count(unrev_checkers, key)
        total['total_confirmed'] += checker_count(confirmed_checkers, key)
        total['total_false_positive'] += checker_count(false_checkers, key)
        total['total_intentional'] += checker_count(intentional_checkers, key)

    if args.output_format == 'json':
        print(CmdLineOutputEncoder().encode(all_results))
    else:
        header = ['Checker', 'Severity', 'All reports', 'Resolved',
                  'Unreviewed', 'Confirmed', 'False positive', "Intentional"]

        rows = []
        for stat in all_results:
            rows.append((stat['checker'],
                         stat['severity'],
                         str(stat['reports']),
                         str(stat['resolved']),
                         str(stat['unreviewed']),
                         str(stat['confirmed']),
                         str(stat['false_positive']),
                         str(stat['intentional'])))

        rows.append(('Total', '-', str(total['total_reports']),
                     str(total['total_resolved']),
                     str(total['total_unreviewed']),
                     str(total['total_confirmed']),
                     str(total['total_false_positive']),
                     str(total['total_intentional'])))

        print(twodim_to_str(args.output_format, header, rows,
                            separate_footer=True))

        # Print severity counts
        header = ['Severity', 'All reports']

        rows = []
        for stat in severities:
            rows.append((stat['severity'],
                         str(stat['reports'])))

        rows.append(('Total', str(severity_total)))

        print(twodim_to_str(args.output_format, header, rows,
                            separate_footer=True))
示例#25
0
def main(args):
    """
    List the analyzers' basic information supported by CodeChecker.
    """
    # If the given output format is not 'table', redirect logger's output to
    # the stderr.
    stream = None
    if 'output_format' in args and args.output_format != 'table':
        stream = 'stderr'

    logger.setup_logger(args.verbose if 'verbose' in args else None, stream)

    context = analyzer_context.get_context()
    working_analyzers, errored = \
        analyzer_types.check_supported_analyzers(
            analyzer_types.supported_analyzers,
            context)

    if args.dump_config:
        binary = context.analyzer_binaries.get(args.dump_config)

        if args.dump_config == 'clang-tidy':
            subprocess.call([binary, '-dump-config', '-checks=*'],
                            encoding="utf-8", errors="ignore")
        elif args.dump_config == 'clangsa':
            ret = subprocess.call([binary,
                                   '-cc1',
                                   '-analyzer-checker-option-help',
                                   '-analyzer-checker-option-help-alpha'],
                                  stderr=subprocess.PIPE,
                                  encoding="utf-8",
                                  errors="ignore")

            if ret:
                # This flag is supported from Clang 9.
                LOG.warning("'--dump-config clangsa' is not supported yet. "
                            "Please make sure that you are using Clang 9 or "
                            "newer.")

        return

    analyzer_environment = env.extend(context.path_env_extra,
                                      context.ld_lib_path_extra)
    analyzer_config_map = analyzer_types.build_config_handlers(
        args, context, working_analyzers)

    def uglify(text):
        """
        csv and json format output contain this non human readable header
        string: no CamelCase and no space.
        """
        return text.lower().replace(' ', '_')

    if 'analyzer_config' in args:
        if 'details' in args:
            header = ['Option', 'Description']
        else:
            header = ['Option']

        if args.output_format in ['csv', 'json']:
            header = list(map(uglify, header))

        analyzer = args.analyzer_config
        config_handler = analyzer_config_map.get(analyzer)
        analyzer_class = analyzer_types.supported_analyzers[analyzer]

        configs = analyzer_class.get_analyzer_config(config_handler,
                                                     analyzer_environment)
        rows = [(':'.join((analyzer, c[0])), c[1]) if 'details' in args
                else (':'.join((analyzer, c[0])),) for c in configs]

        print(output_formatters.twodim_to_str(args.output_format,
                                              header, rows))

        return

    if 'details' in args:
        header = ['Name', 'Path', 'Version']
    else:
        header = ['Name']

    if args.output_format in ['csv', 'json']:
        header = list(map(uglify, header))

    rows = []
    for analyzer in working_analyzers:
        if 'details' not in args:
            rows.append([analyzer])
        else:
            binary = context.analyzer_binaries.get(analyzer)
            try:
                version = subprocess.check_output(
                    [binary, '--version'], encoding="utf-8", errors="ignore")
            except (subprocess.CalledProcessError, OSError):
                version = 'ERROR'

            rows.append([analyzer,
                         binary,
                         version])

    if 'all' in args:
        for analyzer, err_reason in errored:
            if 'details' not in args:
                rows.append([analyzer])
            else:
                rows.append([analyzer,
                             context.analyzer_binaries.get(analyzer),
                             err_reason])

    if rows:
        print(output_formatters.twodim_to_str(args.output_format,
                                              header, rows))
示例#26
0
def main(args):
    """
    List the checkers available in the specified (or all supported) analyzers
    alongside with their description or enabled status in various formats.
    """

    # If the given output format is not 'table', redirect logger's output to
    # the stderr.
    logger.setup_logger(args.verbose if 'verbose' in args else None,
                        None if args.output_format == 'table' else 'stderr')

    context = analyzer_context.get_context()
    working_analyzers, errored = analyzer_types.check_supported_analyzers(
        args.analyzers, context)

    analyzer_environment = env.extend(context.path_env_extra,
                                      context.ld_lib_path_extra)

    analyzer_config_map = analyzer_types.build_config_handlers(
        args, context, working_analyzers)

    def uglify(text):
        """
        csv and json format output contain this non human readable header
        string: no CamelCase and no space.
        """
        return text.lower().replace(' ', '_')

    def match_guideline(checker_name, selected_guidelines):
        """
        Returns True if checker_name gives reports related to any of the
        selected guideline rule.
        checker_name -- A full checker name.
        selected_guidelines -- A list of guideline names or guideline rule IDs.
        """
        guideline = context.guideline_map.get(checker_name, {})
        guideline_set = set(guideline)
        for value in guideline.values():
            guideline_set |= set(value)

        return any(g in guideline_set for g in selected_guidelines)

    def format_guideline(guideline):
        """
        Convert guideline rules to human-readable format.
        guideline -- Dictionary in the following format:
                     {"guideline_1": ["rule_1", "rule_2"]}
        """
        return ' '.join('Related {} rules: {}'.format(g, ', '.join(r))
                        for g, r in guideline.items())

    # List available checker profiles.
    if 'profile' in args and args.profile == 'list':
        if 'details' in args:
            header = ['Profile name', 'Description']
            rows = context.available_profiles.items()
        else:
            header = ['Profile name']
            rows = [(key, "") for key in context.available_profiles.keys()]

        if args.output_format in ['csv', 'json']:
            header = list(map(uglify, header))

        print(output_formatters.twodim_to_str(args.output_format, header,
                                              rows))
        return

    # List checker config options.
    if 'checker_config' in args:
        if 'details' in args:
            header = ['Option', 'Description']
        else:
            header = ['Option']

        if args.output_format in ['csv', 'json']:
            header = list(map(uglify, header))

        rows = []
        for analyzer in working_analyzers:
            config_handler = analyzer_config_map.get(analyzer)
            analyzer_class = analyzer_types.supported_analyzers[analyzer]

            configs = analyzer_class.get_checker_config(
                config_handler, analyzer_environment)
            rows.extend(
                (':'.join((analyzer, c[0])),
                 c[1]) if 'details' in args else (':'.join((analyzer, c[0])), )
                for c in configs)

        print(output_formatters.twodim_to_str(args.output_format, header,
                                              rows))
        return

    if args.guideline is not None and len(args.guideline) == 0:
        result = defaultdict(set)

        for _, guidelines in context.guideline_map.items():
            for guideline, rules in guidelines.items():
                result[guideline] |= set(rules)

        header = ['Guideline', 'Rules']
        if args.output_format in ['csv', 'json']:
            header = list(map(uglify, header))

        if args.output_format == 'json':
            rows = [(g, sorted(list(r))) for g, r in result.items()]
        else:
            rows = [(g, ', '.join(sorted(r))) for g, r in result.items()]

        if args.output_format == 'rows':
            for row in rows:
                print('Guideline: {}'.format(row[0]))
                print('Rules: {}'.format(row[1]))
        else:
            print(
                output_formatters.twodim_to_str(args.output_format, header,
                                                rows))
        return

    # List available checkers.
    if 'details' in args:
        header = [
            'Enabled', 'Name', 'Analyzer', 'Severity', 'Guideline',
            'Description'
        ]
    else:
        header = ['Name']

    if args.output_format in ['csv', 'json']:
        header = list(map(uglify, header))

    rows = []
    for analyzer in working_analyzers:
        config_handler = analyzer_config_map.get(analyzer)
        analyzer_class = analyzer_types.supported_analyzers[analyzer]

        checkers = analyzer_class.get_analyzer_checkers(
            config_handler, analyzer_environment)
        default_checker_cfg = context.checker_config.get(analyzer +
                                                         '_checkers')

        profile_checkers = None
        if 'profile' in args:
            if args.profile not in context.available_profiles:
                LOG.error("Checker profile '%s' does not exist!", args.profile)
                LOG.error("To list available profiles, use '--profile list'.")
                sys.exit(1)

            profile_checkers = [(args.profile, True)]

        config_handler.initialize_checkers(context.available_profiles,
                                           context.package_root, checkers,
                                           default_checker_cfg,
                                           profile_checkers)

        for checker_name, value in config_handler.checks().items():
            state, description = value

            if state != CheckerState.enabled and 'profile' in args:
                continue

            if state == CheckerState.enabled and 'only_disabled' in args:
                continue
            elif state != CheckerState.enabled and 'only_enabled' in args:
                continue

            if args.output_format == 'json':
                state = state == CheckerState.enabled
            else:
                state = '+' if state == CheckerState.enabled else '-'

            if args.guideline is not None:
                if not match_guideline(checker_name, args.guideline):
                    continue

            if 'details' in args:
                severity = context.severity_map.get(checker_name)
                guideline = context.guideline_map.get(checker_name, {})
                if args.output_format != 'json':
                    guideline = format_guideline(guideline)
                rows.append([
                    state, checker_name, analyzer, severity, guideline,
                    description
                ])
            else:
                rows.append([checker_name])

    if 'show_warnings' in args:
        severity = context.severity_map.get('clang-diagnostic-')
        for warning in get_warnings(analyzer_environment):
            warning = 'clang-diagnostic-' + warning[2:]

            if 'guideline' in args:
                if not match_guideline(warning, args.guideline):
                    continue

            guideline = context.guideline_map.get(warning, {})
            if args.output_format != 'json':
                guideline = format_guideline(guideline)

            if 'details' in args:
                rows.append(['', warning, '-', severity, guideline, '-'])
            else:
                rows.append([warning])

    if rows:
        print(output_formatters.twodim_to_str(args.output_format, header,
                                              rows))

    for analyzer_binary, reason in errored:
        LOG.error(
            "Failed to get checkers for '%s'!"
            "The error reason was: '%s'", analyzer_binary, reason)
        LOG.error("Please check your installation and the "
                  "'config/package_layout.json' file!")
示例#27
0
    def print_reports(client, reports, output_format):
        output_dir = args.export_dir if 'export_dir' in args else None
        if 'clean' in args and os.path.isdir(output_dir):
            print("Previous analysis results in '{0}' have been removed, "
                  "overwriting with current results.".format(output_dir))
            shutil.rmtree(output_dir)

        if output_format == 'json':
            output = []
            for report in reports:
                if isinstance(report, Report):
                    output.append(report.main)
                else:
                    output.append(report)
            print(CmdLineOutputEncoder().encode(output))
            return

        if output_format == 'html':
            output_dir = args.export_dir
            if not os.path.exists(output_dir):
                os.makedirs(output_dir)

            print("Generating HTML output files to file://{0} directory:\n"
                  .format(output_dir))

            report_to_html(client, reports, output_dir)

            print('\nTo view the results in a browser run:\n'
                  '  $ firefox {0}'.format(os.path.join(args.export_dir,
                                                        'index.html')))
            return

        header = ['File', 'Checker', 'Severity', 'Msg', 'Source']
        rows = []

        source_lines = defaultdict(set)
        for report in reports:
            if not isinstance(report, Report) and report.line is not None:
                source_lines[report.fileId].add(report.line)

        lines_in_files_requested = []
        for key in source_lines:
            lines_in_files_requested.append(
                ttypes.LinesInFilesRequested(fileId=key,
                                             lines=source_lines[key]))

        for report in reports:
            source_line = ''
            if isinstance(report, Report):
                # report is coming from a plist file.
                bug_line = report.main['location']['line']
                bug_col = report.main['location']['col']
                checked_file = report.main['location']['file_name']\
                    + ':' + str(bug_line) + ":" + str(bug_col)
                check_name = report.main['check_name']
                sev = context.severity_map.get(check_name)
                check_msg = report.main['description']
                source_line =\
                    get_line_from_file(report.main['location']['file_name'],
                                       bug_line)
            else:
                # report is of ReportData type coming from CodeChecker server.
                bug_line = report.line
                bug_col = report.column
                sev = ttypes.Severity._VALUES_TO_NAMES[report.severity]
                checked_file = report.checkedFile
                if bug_line is not None:
                    checked_file += ':' + str(bug_line) + ":" + str(bug_col)

                check_name = report.checkerId
                check_msg = report.checkerMsg

                if lines_in_files_requested:
                    source_line_contents = client.getLinesInSourceFileContents(
                        lines_in_files_requested, ttypes.Encoding.BASE64)
                    source_line = base64.b64decode(
                        source_line_contents[report.fileId][bug_line])
            rows.append(
                (sev, checked_file, check_msg, check_name, source_line))
        if output_format == 'plaintext':
            for row in rows:
                print("[{0}] {1}: {2} [{3}]\n{4}\n".format(
                    row[0], row[1], row[2], row[3], row[4]))
        else:
            print(twodim_to_str(output_format, header, rows))
示例#28
0
def main(args):
    """
    Store the defect results in the specified input list as bug reports in the
    database.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    try:
        cmd_config.check_config_file(args)
    except FileNotFoundError as fnerr:
        LOG.error(fnerr)
        sys.exit(1)

    if not host_check.check_zlib():
        raise Exception("zlib is not available on the system!")

    # To ensure the help message prints the default folder properly,
    # the 'default' for 'args.input' is a string, not a list.
    # But we need lists for the foreach here to work.
    if isinstance(args.input, str):
        args.input = [args.input]

    if 'name' not in args:
        LOG.debug("Generating name for analysis...")
        generated = __get_run_name(args.input)
        if generated:
            setattr(args, 'name', generated)
        else:
            LOG.error("No suitable name was found in the inputs for the "
                      "analysis run. Please specify one by passing argument "
                      "--name run_name in the invocation.")
            sys.exit(2)  # argparse returns error code 2 for bad invocations.

    LOG.info("Storing analysis results for run '" + args.name + "'")

    if 'force' in args:
        LOG.info("argument --force was specified: the run with name '" +
                 args.name + "' will be deleted.")

    # Setup connection to the remote server.
    client = libclient.setup_client(args.product_url)

    _, zip_file = tempfile.mkstemp('.zip')
    LOG.debug("Will write mass store ZIP to '%s'...", zip_file)

    try:
        LOG.debug("Assembling zip file.")
        try:
            assemble_zip(args.input, zip_file, client)
        except Exception as ex:
            print(ex)
            import traceback
            traceback.print_stack()

        zip_size = os.stat(zip_file).st_size
        LOG.debug("Assembling zip done, size is %s",
                  sizeof_fmt(zip_size))

        if zip_size > MAX_UPLOAD_SIZE:
            LOG.error("The result list to upload is too big (max: %s).",
                      sizeof_fmt(MAX_UPLOAD_SIZE))
            sys.exit(1)

        with open(zip_file, 'rb') as zf:
            b64zip = base64.b64encode(zf.read()).decode("utf-8")

        context = webserver_context.get_context()

        trim_path_prefixes = args.trim_path_prefix if \
            'trim_path_prefix' in args else None

        description = args.description if 'description' in args else None

        LOG.info("Storing results to the server...")
        client.massStoreRun(args.name,
                            args.tag if 'tag' in args else None,
                            str(context.version),
                            b64zip,
                            'force' in args,
                            trim_path_prefixes,
                            description)

        # Storing analysis statistics if the server allows them.
        if client.allowsStoringAnalysisStatistics():
            storing_analysis_statistics(client, args.input, args.name)

        LOG.info("Storage finished successfully.")
    except RequestFailed as reqfail:
        if reqfail.errorCode == ErrorCode.SOURCE_FILE:
            header = ['File', 'Line', 'Checker name']
            table = twodim_to_str('table',
                                  header,
                                  [c.split('|') for c in reqfail.extraInfo])
            LOG.warning("Setting the review statuses for some reports failed "
                        "because of non valid source code comments: "
                        "%s\n %s", reqfail.message, table)
        sys.exit(1)
    except Exception as ex:
        import traceback
        traceback.print_stack()
        LOG.info("Storage failed: %s", str(ex))
        sys.exit(1)
    finally:
        os.remove(zip_file)
示例#29
0
def main(args):
    """
    Store the defect results in the specified input list as bug reports in the
    database.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    if not host_check.check_zlib():
        raise Exception("zlib is not available on the system!")

    # To ensure the help message prints the default folder properly,
    # the 'default' for 'args.input' is a string, not a list.
    # But we need lists for the foreach here to work.
    if isinstance(args.input, str):
        args.input = [args.input]

    if 'name' not in args:
        LOG.debug("Generating name for analysis...")
        generated = __get_run_name(args.input)
        if generated:
            setattr(args, 'name', generated)
        else:
            LOG.error("No suitable name was found in the inputs for the "
                      "analysis run. Please specify one by passing argument "
                      "--name run_name in the invocation.")
            sys.exit(2)  # argparse returns error code 2 for bad invocations.

    LOG.info("Storing analysis results for run '" + args.name + "'")

    if 'force' in args:
        LOG.info("argument --force was specified: the run with name '" +
                 args.name + "' will be deleted.")

    protocol, host, port, product_name = split_product_url(args.product_url)

    # Before any transmission happens, check if we have the PRODUCT_STORE
    # permission to prevent a possibly long ZIP operation only to get an
    # error later on.
    product_client = libclient.setup_product_client(protocol,
                                                    host, port, product_name)
    product_id = product_client.getCurrentProduct().id

    auth_client, _ = libclient.setup_auth_client(protocol, host, port)
    has_perm = libclient.check_permission(
        auth_client, Permission.PRODUCT_STORE, {'productID': product_id})
    if not has_perm:
        LOG.error("You are not authorised to store analysis results in "
                  "product '%s'", product_name)
        sys.exit(1)

    # Setup connection to the remote server.
    client = libclient.setup_client(args.product_url, product_client=False)

    LOG.debug("Initializing client connecting to %s:%d/%s done.",
              host, port, product_name)

    _, zip_file = tempfile.mkstemp('.zip')
    LOG.debug("Will write mass store ZIP to '%s'...", zip_file)

    try:
        assemble_zip(args.input, zip_file, client)

        if os.stat(zip_file).st_size > MAX_UPLOAD_SIZE:
            LOG.error("The result list to upload is too big (max: %s).",
                      sizeof_fmt(MAX_UPLOAD_SIZE))
            sys.exit(1)

        with open(zip_file, 'rb') as zf:
            b64zip = base64.b64encode(zf.read())

        context = webserver_context.get_context()

        trim_path_prefixes = args.trim_path_prefix if \
            'trim_path_prefix' in args else None

        client.massStoreRun(args.name,
                            args.tag if 'tag' in args else None,
                            str(context.version),
                            b64zip,
                            'force' in args,
                            trim_path_prefixes)

        # Storing analysis statistics if the server allows them.
        if client.allowsStoringAnalysisStatistics():
            storing_analysis_statistics(client, args.input, args.name)

        LOG.info("Storage finished successfully.")
    except RequestFailed as reqfail:
        if reqfail.errorCode == ErrorCode.SOURCE_FILE:
            header = ['File', 'Line', 'Checker name']
            table = twodim_to_str('table',
                                  header,
                                  [c.split('|') for c in reqfail.extraInfo])
            LOG.warning("Setting the review statuses for some reports failed "
                        "because of non valid source code comments: "
                        "%s\n %s", reqfail.message, table)
        sys.exit(1)
    except Exception as ex:
        LOG.info("Storage failed: %s", str(ex))
        sys.exit(1)
    finally:
        os.remove(zip_file)
示例#30
0
def main(args):
    """
    List the checkers available in the specified (or all supported) analyzers
    alongside with their description or enabled status in various formats.
    """

    logger.setup_logger(args.verbose if 'verbose' in args else None)

    # If nothing is set, list checkers for all supported analyzers.
    analyzers = args.analyzers \
        if 'analyzers' in args \
        else analyzer_types.supported_analyzers

    context = analyzer_context.get_context()
    working, errored = analyzer_types.check_supported_analyzers(analyzers,
                                                                context)

    analyzer_environment = get_check_env(context.path_env_extra,
                                         context.ld_lib_path_extra)

    analyzer_config_map = analyzer_types.build_config_handlers(args,
                                                               context,
                                                               working)
    # List available checker profiles.
    if 'profile' in args and args.profile == 'list':
        if 'details' not in args:
            if args.output_format not in ['csv', 'json']:
                header = ['Profile name']
            else:
                header = ['profile_name']
        else:
            if args.output_format not in ['csv', 'json']:
                header = ['Profile name', 'Description']
            else:
                header = ['profile_name', 'description']

        rows = []
        for (profile, description) in context.available_profiles.items():
            if 'details' not in args:
                rows.append([profile])
            else:
                rows.append([profile, description])

        print(output_formatters.twodim_to_str(args.output_format,
                                              header, rows))
        return

    # Use good looking different headers based on format.
    if 'details' not in args:
        if args.output_format not in ['csv', 'json']:
            header = ['Name']
        else:
            header = ['name']
    else:
        if args.output_format not in ['csv', 'json']:
            header = ['', 'Name', 'Analyzer', 'Severity', 'Description']
        else:
            header = ['enabled', 'name', 'analyzer', 'severity', 'description']

    rows = []
    for analyzer in working:
        config_handler = analyzer_config_map.get(analyzer)
        analyzer_class = \
            analyzer_types.supported_analyzers[analyzer]

        checkers = analyzer_class.get_analyzer_checkers(config_handler,
                                                        analyzer_environment)
        default_checker_cfg = context.checker_config.get(
            analyzer + '_checkers')

        profile_checkers = None
        if 'profile' in args:
            if args.profile not in context.available_profiles:
                LOG.error("Checker profile '%s' does not exist!",
                          args.profile)
                LOG.error("To list available profiles, use '--profile list'.")
                sys.exit(1)

            profile_checkers = [(args.profile, True)]

        config_handler.initialize_checkers(context.available_profiles,
                                           context.package_root,
                                           checkers,
                                           default_checker_cfg,
                                           profile_checkers)

        for checker_name, value in config_handler.checks().items():
            enabled, description = value

            if not enabled and 'profile' in args:
                continue

            if enabled and 'only_disabled' in args:
                continue
            elif not enabled and 'only_enabled' in args:
                continue

            if args.output_format != 'json':
                enabled = '+' if enabled else '-'

            if 'details' not in args:
                rows.append([checker_name])
            else:
                severity = context.severity_map.get(checker_name)
                rows.append([enabled, checker_name, analyzer,
                             severity, description])

    if len(rows) > 0:
        print(output_formatters.twodim_to_str(args.output_format,
                                              header, rows))

    for analyzer_binary, reason in errored:
        LOG.error("Failed to get checkers for '%s'!"
                  "The error reason was: '%s'", analyzer_binary, reason)
        LOG.error("Please check your installation and the "
                  "'config/package_layout.json' file!")
示例#31
0
def main(args):
    """
    Entry point for parsing some analysis results and printing them to the
    stdout in a human-readable format.
    """

    logger.setup_logger(args.verbose if 'verbose' in args else None)

    context = analyzer_context.get_context()

    # To ensure the help message prints the default folder properly,
    # the 'default' for 'args.input' is a string, not a list.
    # But we need lists for the foreach here to work.
    if isinstance(args.input, str):
        args.input = [args.input]

    original_cwd = os.getcwd()

    suppr_handler = None
    if 'suppress' in args:
        __make_handler = False
        if not os.path.isfile(args.suppress):
            if 'create_suppress' in args:
                with open(args.suppress, 'w') as _:
                    # Just create the file.
                    __make_handler = True
                    LOG.info("Will write source-code suppressions to "
                             "suppress file: %s", args.suppress)
            else:
                LOG.warning("Suppress file '%s' given, but it does not exist"
                            " -- will not suppress anything.", args.suppress)
        else:
            __make_handler = True

        if __make_handler:
            suppr_handler = suppress_handler.\
                GenericSuppressHandler(args.suppress,
                                       'create_suppress' in args)
    elif 'create_suppress' in args:
        LOG.error("Can't use '--export-source-suppress' unless '--suppress "
                  "SUPPRESS_FILE' is also given.")
        sys.exit(2)

    processed_path_hashes = set()

    skip_handler = None
    if 'skipfile' in args:
        with open(args.skipfile, 'r') as skip_file:
            skip_handler = SkipListHandler(skip_file.read())

    trim_path_prefixes = args.trim_path_prefix if \
        'trim_path_prefix' in args else None

    def trim_path_prefixes_handler(source_file):
        """
        Callback to util.trim_path_prefixes to prevent module dependency
        of plist_to_html
        """
        return util.trim_path_prefixes(source_file, trim_path_prefixes)

    html_builder = None

    def skip_html_report_data_handler(report_hash, source_file, report_line,
                                      checker_name, diag, files):
        """
        Report handler which skips bugs which were suppressed by source code
        comments.
        """
        report = Report(None, diag['path'], files)
        path_hash = get_report_path_hash(report, files)
        if path_hash in processed_path_hashes:
            LOG.debug("Skip report because it is a deduplication of an "
                      "already processed report!")
            LOG.debug("Path hash: %s", path_hash)
            LOG.debug(diag)
            return True

        skip = skip_report(report_hash,
                           source_file,
                           report_line,
                           checker_name,
                           suppr_handler)
        if skip_handler:
            skip |= skip_handler.should_skip(source_file)

        if not skip:
            processed_path_hashes.add(path_hash)

        return skip

    file_change = set()
    severity_stats = defaultdict(int)
    file_stats = defaultdict(int)
    report_count = 0

    for input_path in args.input:

        input_path = os.path.abspath(input_path)
        os.chdir(original_cwd)
        LOG.debug("Parsing input argument: '%s'", input_path)

        export = args.export if 'export' in args else None
        if export is not None and export == 'html':
            output_path = os.path.abspath(args.output_path)

            if not html_builder:
                html_builder = \
                    PlistToHtml.HtmlBuilder(context.path_plist_to_html_dist,
                                            context.severity_map)

            LOG.info("Generating html output files:")
            PlistToHtml.parse(input_path,
                              output_path,
                              context.path_plist_to_html_dist,
                              skip_html_report_data_handler,
                              html_builder,
                              trim_path_prefixes_handler)
            continue

        files = []
        metadata_dict = {}
        if os.path.isfile(input_path):
            files.append(input_path)

        elif os.path.isdir(input_path):
            metadata_file = os.path.join(input_path, "metadata.json")
            if os.path.exists(metadata_file):
                metadata_dict = util.load_json_or_empty(metadata_file)
                LOG.debug(metadata_dict)

                if 'working_directory' in metadata_dict:
                    working_dir = metadata_dict['working_directory']
                    try:
                        os.chdir(working_dir)
                    except OSError as oerr:
                        LOG.debug(oerr)
                        LOG.error("Working directory %s is missing.\n"
                                  "Can not parse reports safely.", working_dir)
                        sys.exit(1)

            _, _, file_names = next(os.walk(input_path), ([], [], []))
            files = [os.path.join(input_path, file_name) for file_name
                     in file_names]

        file_report_map = defaultdict(list)

        rh = PlistToPlaintextFormatter(suppr_handler,
                                       skip_handler,
                                       context.severity_map,
                                       processed_path_hashes,
                                       trim_path_prefixes)
        rh.print_steps = 'print_steps' in args

        for file_path in files:
            f_change = parse(file_path, metadata_dict, rh, file_report_map)
            file_change = file_change.union(f_change)

        report_stats = rh.write(file_report_map)
        sev_stats = report_stats.get('severity')
        for severity in sev_stats:
            severity_stats[severity] += sev_stats[severity]

        f_stats = report_stats.get('files')
        for file_path in f_stats:
            file_stats[file_path] += f_stats[file_path]

        rep_stats = report_stats.get('reports')
        report_count += rep_stats.get("report_count", 0)

    print("\n----==== Summary ====----")
    if file_stats:
        vals = [[os.path.basename(k), v] for k, v in
                dict(file_stats).items()]
        keys = ['Filename', 'Report count']
        table = twodim_to_str('table', keys, vals, 1, True)
        print(table)

    if severity_stats:
        vals = [[k, v] for k, v in dict(severity_stats).items()]
        keys = ['Severity', 'Report count']
        table = twodim_to_str('table', keys, vals, 1, True)
        print(table)

    print("----=================----")
    print("Total number of reports: {}".format(report_count))
    print("----=================----")

    if file_change:
        changed_files = '\n'.join([' - ' + f for f in file_change])
        LOG.warning("The following source file contents changed since the "
                    "latest analysis:\n%s\nMultiple reports were not "
                    "shown and skipped from the statistics. Please "
                    "analyze your project again to update the "
                    "reports!", changed_files)

    os.chdir(original_cwd)

    # Create index.html and statistics.html for the generated html files.
    if html_builder:
        html_builder.create_index_html(args.output_path)
        html_builder.create_statistics_html(args.output_path)

        print('\nTo view statistics in a browser run:\n> firefox {0}'.format(
            os.path.join(args.output_path, 'statistics.html')))

        print('\nTo view the results in a browser run:\n> firefox {0}'.format(
            os.path.join(args.output_path, 'index.html')))
示例#32
0
    def print_reports(client, reports, output_format):
        output_dir = args.export_dir if 'export_dir' in args else None
        if 'clean' in args and os.path.isdir(output_dir):
            print("Previous analysis results in '{0}' have been removed, "
                  "overwriting with current results.".format(output_dir))
            shutil.rmtree(output_dir)

        if output_format == 'json':
            output = []
            for report in reports:
                if isinstance(report, Report):
                    output.append(report.main)
                else:
                    output.append(report)
            print(CmdLineOutputEncoder().encode(output))
            return

        if output_format == 'html':
            output_dir = args.export_dir
            if not os.path.exists(output_dir):
                os.makedirs(output_dir)

            print("Generating HTML output files to file://{0} directory:\n".
                  format(output_dir))

            report_to_html(client, reports, output_dir)

            print('\nTo view the results in a browser run:\n'
                  '  $ firefox {0}'.format(
                      os.path.join(args.export_dir, 'index.html')))
            return

        header = ['File', 'Checker', 'Severity', 'Msg', 'Source']
        rows = []

        source_lines = defaultdict(set)
        for report in reports:
            if not isinstance(report, Report) and report.line is not None:
                source_lines[report.fileId].add(report.line)

        lines_in_files_requested = []
        for key in source_lines:
            lines_in_files_requested.append(
                ttypes.LinesInFilesRequested(fileId=key,
                                             lines=source_lines[key]))

        for report in reports:
            source_line = ''
            if isinstance(report, Report):
                # report is coming from a plist file.
                bug_line = report.main['location']['line']
                bug_col = report.main['location']['col']
                checked_file = report.main['location']['file_name']\
                    + ':' + str(bug_line) + ":" + str(bug_col)
                check_name = report.main['check_name']
                sev = context.severity_map.get(check_name)
                check_msg = report.main['description']
                source_line =\
                    get_line_from_file(report.main['location']['file_name'],
                                       bug_line)
            else:
                # report is of ReportData type coming from CodeChecker server.
                bug_line = report.line
                bug_col = report.column
                sev = ttypes.Severity._VALUES_TO_NAMES[report.severity]
                checked_file = report.checkedFile
                if bug_line is not None:
                    checked_file += ':' + str(bug_line) + ":" + str(bug_col)

                check_name = report.checkerId
                check_msg = report.checkerMsg

                if lines_in_files_requested:
                    source_line_contents = client.getLinesInSourceFileContents(
                        lines_in_files_requested, ttypes.Encoding.BASE64)
                    source_line = base64.b64decode(
                        source_line_contents[report.fileId][bug_line])
            rows.append(
                (sev, checked_file, check_msg, check_name, source_line))
        if output_format == 'plaintext':
            for row in rows:
                print("[{0}] {1}: {2} [{3}]\n{4}\n".format(
                    row[0], row[1], row[2], row[3], row[4]))
        else:
            print(twodim_to_str(output_format, header, rows))
示例#33
0
def main(args):
    """
    List the analyzers' basic information supported by CodeChecker.
    """
    logger.setup_logger(args.verbose if 'verbose' in args else None)

    context = analyzer_context.get_context()
    working, errored = \
        analyzer_types.check_supported_analyzers(
            analyzer_types.supported_analyzers,
            context)

    if args.dump_config:
        binary = context.analyzer_binaries.get(args.dump_config)

        if args.dump_config == 'clang-tidy':
            subprocess.call([binary, '-dump-config', '-checks=*'])
        elif args.dump_config == 'clangsa':
            # TODO: Not supported by ClangSA yet!
            LOG.warning("'--dump-config clangsa' is not supported yet.")

        return

    if args.output_format not in ['csv', 'json']:
        if 'details' not in args:
            header = ['Name']
        else:
            header = ['Name', 'Path', 'Version']
    else:
        if 'details' not in args:
            header = ['name']
        else:
            header = ['name', 'path', 'version_string']

    rows = []
    for analyzer in working:
        if 'details' not in args:
            rows.append([analyzer])
        else:
            binary = context.analyzer_binaries.get(analyzer)
            try:
                version = subprocess.check_output([binary,
                                                   '--version'])
            except (subprocess.CalledProcessError, OSError):
                version = 'ERROR'

            rows.append([analyzer,
                         binary,
                         version])

    if 'all' in args:
        for analyzer, err_reason in errored:
            if 'details' not in args:
                rows.append([analyzer])
            else:
                rows.append([analyzer,
                             context.analyzer_binaries.get(analyzer),
                             err_reason])

    if len(rows) > 0:
        print(output_formatters.twodim_to_str(args.output_format,
                                              header, rows))
示例#34
0
def handle_list_result_types(args):

    init_logger(args.verbose if 'verbose' in args else None)
    check_deprecated_arg_usage(args)

    if 'disable_unique' in args:
        LOG.warning("--disable-unique option is deprecated. Please use the "
                    "'--uniqueing on' option to get uniqueing results.")
        args.uniqueing = 'off'

    def get_statistics(client, run_ids, field, values):
        report_filter = ttypes.ReportFilter()
        add_filter_conditions(client, report_filter, args)

        setattr(report_filter, field, values)
        checkers = client.getCheckerCounts(run_ids, report_filter, None, None,
                                           0)

        return dict((res.name, res.count) for res in checkers)

    def checker_count(checker_dict, key):
        return checker_dict.get(key, 0)

    client = setup_client(args.product_url)

    run_ids = None
    if 'all_results' not in args:
        run_ids = [run.runId for run in get_runs(client, args.names)]
        if not len(run_ids):
            LOG.warning("No runs were found!")
            sys.exit(1)

    all_checkers_report_filter = ttypes.ReportFilter()
    add_filter_conditions(client, all_checkers_report_filter, args)

    all_checkers = client.getCheckerCounts(run_ids, all_checkers_report_filter,
                                           None, None, 0)
    all_checkers_dict = dict((res.name, res) for res in all_checkers)

    unrev_checkers = get_statistics(client, run_ids, 'reviewStatus',
                                    [ttypes.ReviewStatus.UNREVIEWED])

    confirmed_checkers = get_statistics(client, run_ids, 'reviewStatus',
                                        [ttypes.ReviewStatus.CONFIRMED])

    false_checkers = get_statistics(client, run_ids, 'reviewStatus',
                                    [ttypes.ReviewStatus.FALSE_POSITIVE])

    intentional_checkers = get_statistics(client, run_ids, 'reviewStatus',
                                          [ttypes.ReviewStatus.INTENTIONAL])

    resolved_checkers = get_statistics(client, run_ids, 'detectionStatus',
                                       [ttypes.DetectionStatus.RESOLVED])

    # Get severity counts
    report_filter = ttypes.ReportFilter()
    add_filter_conditions(client, report_filter, args)

    sev_count = client.getSeverityCounts(run_ids, report_filter, None)
    severities = []
    severity_total = 0
    for key, count in sorted(sev_count.items(), reverse=True):
        severities.append(
            dict(severity=ttypes.Severity._VALUES_TO_NAMES[key],
                 reports=count))
        severity_total += count

    all_results = []
    total = defaultdict(int)
    for key, checker_data in sorted(all_checkers_dict.items(),
                                    key=lambda x: x[1].severity,
                                    reverse=True):
        all_results.append(
            dict(
                checker=key,
                severity=ttypes.Severity._VALUES_TO_NAMES[
                    checker_data.severity],
                reports=checker_data.count,
                unreviewed=checker_count(unrev_checkers, key),
                confirmed=checker_count(confirmed_checkers, key),
                false_positive=checker_count(false_checkers, key),
                intentional=checker_count(intentional_checkers, key),
                resolved=checker_count(resolved_checkers, key),
            ))
        total['total_reports'] += checker_data.count
        total['total_resolved'] += checker_count(resolved_checkers, key)
        total['total_unreviewed'] += checker_count(unrev_checkers, key)
        total['total_confirmed'] += checker_count(confirmed_checkers, key)
        total['total_false_positive'] += checker_count(false_checkers, key)
        total['total_intentional'] += checker_count(intentional_checkers, key)

    if args.output_format == 'json':
        print(CmdLineOutputEncoder().encode(all_results))
    else:
        header = [
            'Checker', 'Severity', 'All reports', 'Resolved', 'Unreviewed',
            'Confirmed', 'False positive', "Intentional"
        ]

        rows = []
        for stat in all_results:
            rows.append(
                (stat['checker'], stat['severity'], str(stat['reports']),
                 str(stat['resolved']), str(stat['unreviewed']),
                 str(stat['confirmed']), str(stat['false_positive']),
                 str(stat['intentional'])))

        rows.append(
            ('Total', '-', str(total['total_reports']),
             str(total['total_resolved']), str(total['total_unreviewed']),
             str(total['total_confirmed']), str(total['total_false_positive']),
             str(total['total_intentional'])))

        print(
            twodim_to_str(args.output_format,
                          header,
                          rows,
                          separate_footer=True))

        # Print severity counts
        header = ['Severity', 'All reports']

        rows = []
        for stat in severities:
            rows.append((stat['severity'], str(stat['reports'])))

        rows.append(('Total', str(severity_total)))

        print(
            twodim_to_str(args.output_format,
                          header,
                          rows,
                          separate_footer=True))
示例#35
0
def main(args):
    """
    Entry point for parsing some analysis results and printing them to the
    stdout in a human-readable format.
    """

    logger.setup_logger(args.verbose if 'verbose' in args else None)

    context = analyzer_context.get_context()

    # To ensure the help message prints the default folder properly,
    # the 'default' for 'args.input' is a string, not a list.
    # But we need lists for the foreach here to work.
    if isinstance(args.input, str):
        args.input = [args.input]

    original_cwd = os.getcwd()

    suppr_handler = None
    if 'suppress' in args:
        __make_handler = False
        if not os.path.isfile(args.suppress):
            if 'create_suppress' in args:
                with open(args.suppress, 'w') as _:
                    # Just create the file.
                    __make_handler = True
                    LOG.info("Will write source-code suppressions to "
                             "suppress file.")
            else:
                LOG.warning("Suppress file '%s' given, but it does not exist"
                            " -- will not suppress anything.", args.suppress)
        else:
            __make_handler = True

        if __make_handler:
            suppr_handler = suppress_handler.\
                GenericSuppressHandler(args.suppress,
                                       'create_suppress' in args)
    elif 'create_suppress' in args:
        LOG.error("Can't use '--export-source-suppress' unless '--suppress "
                  "SUPPRESS_FILE' is also given.")
        sys.exit(2)

    processed_path_hashes = set()

    skip_handler = None
    if 'skipfile' in args:
        with open(args.skipfile, 'r') as skip_file:
            skip_handler = SkipListHandler(skip_file.read())

    trim_path_prefixes = args.trim_path_prefix if \
        'trim_path_prefix' in args else None

    def trim_path_prefixes_handler(source_file):
        """
        Callback to util.trim_path_prefixes to prevent module dependency
        of plist_to_html
        """
        return util.trim_path_prefixes(source_file, trim_path_prefixes)

    html_builder = None

    def skip_html_report_data_handler(report_hash, source_file, report_line,
                                      checker_name, diag, files):
        """
        Report handler which skips bugs which were suppressed by source code
        comments.
        """
        report = Report(None, diag['path'], files)
        path_hash = get_report_path_hash(report, files)
        if path_hash in processed_path_hashes:
            LOG.debug("Skip report because it is a deduplication of an "
                      "already processed report!")
            LOG.debug("Path hash: %s", path_hash)
            LOG.debug(diag)
            return True

        skip = plist_parser.skip_report(report_hash,
                                        source_file,
                                        report_line,
                                        checker_name,
                                        suppr_handler)
        if skip_handler:
            skip |= skip_handler.should_skip(source_file)

        if not skip:
            processed_path_hashes.add(path_hash)

        return skip

    for input_path in args.input:

        input_path = os.path.abspath(input_path)
        os.chdir(original_cwd)
        LOG.debug("Parsing input argument: '%s'", input_path)

        export = args.export if 'export' in args else None
        if export is not None and export == 'html':
            output_path = os.path.abspath(args.output_path)

            if not html_builder:
                html_builder = \
                    PlistToHtml.HtmlBuilder(context.path_plist_to_html_dist,
                                            context.severity_map)

            LOG.info("Generating html output files:")
            PlistToHtml.parse(input_path,
                              output_path,
                              context.path_plist_to_html_dist,
                              skip_html_report_data_handler,
                              html_builder,
                              trim_path_prefixes_handler)
            continue

        files = []
        metadata_dict = {}
        if os.path.isfile(input_path):
            files.append(input_path)

        elif os.path.isdir(input_path):
            metadata_file = os.path.join(input_path, "metadata.json")
            if os.path.exists(metadata_file):
                metadata_dict = util.load_json_or_empty(metadata_file)
                LOG.debug(metadata_dict)

                if 'working_directory' in metadata_dict:
                    working_dir = metadata_dict['working_directory']
                    try:
                        os.chdir(working_dir)
                    except OSError as oerr:
                        LOG.debug(oerr)
                        LOG.error("Working directory %s is missing.\n"
                                  "Can not parse reports safely.", working_dir)
                        sys.exit(1)

            _, _, file_names = next(os.walk(input_path), ([], [], []))
            files = [os.path.join(input_path, file_name) for file_name
                     in file_names]

        file_change = set()
        file_report_map = defaultdict(list)

        rh = plist_parser.PlistToPlaintextFormatter(suppr_handler,
                                                    skip_handler,
                                                    context.severity_map,
                                                    processed_path_hashes,
                                                    trim_path_prefixes)
        rh.print_steps = 'print_steps' in args

        for file_path in files:
            f_change = parse(file_path, metadata_dict, rh, file_report_map)
            file_change = file_change.union(f_change)

        report_stats = rh.write(file_report_map)
        severity_stats = report_stats.get('severity')
        file_stats = report_stats.get('files')
        reports_stats = report_stats.get('reports')

        print("\n----==== Summary ====----")
        if file_stats:
            vals = [[os.path.basename(k), v] for k, v in
                    dict(file_stats).items()]
            keys = ['Filename', 'Report count']
            table = twodim_to_str('table', keys, vals, 1, True)
            print(table)

        if severity_stats:
            vals = [[k, v] for k, v in dict(severity_stats).items()]
            keys = ['Severity', 'Report count']
            table = twodim_to_str('table', keys, vals, 1, True)
            print(table)

        report_count = reports_stats.get("report_count", 0)
        print("----=================----")
        print("Total number of reports: {}".format(report_count))
        print("----=================----")

        if file_change:
            changed_files = '\n'.join([' - ' + f for f in file_change])
            LOG.warning("The following source file contents changed since the "
                        "latest analysis:\n%s\nMultiple reports were not "
                        "shown and skipped from the statistics. Please "
                        "analyze your project again to update the "
                        "reports!", changed_files)

    os.chdir(original_cwd)

    # Create index.html and statistics.html for the generated html files.
    if html_builder:
        html_builder.create_index_html(args.output_path)
        html_builder.create_statistics_html(args.output_path)

        print('\nTo view statistics in a browser run:\n> firefox {0}'.format(
            os.path.join(args.output_path, 'statistics.html')))

        print('\nTo view the results in a browser run:\n> firefox {0}'.format(
            os.path.join(args.output_path, 'index.html')))
示例#36
0
def main(args):
    """
    List the analyzers' basic information supported by CodeChecker.
    """
    # If the given output format is not 'table', redirect logger's output to
    # the stderr.
    stream = None
    if 'output_format' in args and args.output_format != 'table':
        stream = 'stderr'

    logger.setup_logger(args.verbose if 'verbose' in args else None, stream)

    context = analyzer_context.get_context()
    working, errored = \
        analyzer_types.check_supported_analyzers(
            analyzer_types.supported_analyzers,
            context)

    if args.dump_config:
        binary = context.analyzer_binaries.get(args.dump_config)

        if args.dump_config == 'clang-tidy':
            subprocess.call([binary, '-dump-config', '-checks=*'])
        elif args.dump_config == 'clangsa':
            ret = subprocess.call([binary, '-cc1',
                                   '-analyzer-checker-option-help',
                                   '-analyzer-checker-option-help-alpha'],
                                  stderr=subprocess.PIPE)

            if ret:
                # This flag is supported from Clang 9.
                LOG.warning("'--dump-config clangsa' is not supported yet. "
                            "Please make sure that you are using Clang 9 or "
                            "newer.")

        return

    if args.output_format not in ['csv', 'json']:
        if 'details' not in args:
            header = ['Name']
        else:
            header = ['Name', 'Path', 'Version']
    else:
        if 'details' not in args:
            header = ['name']
        else:
            header = ['name', 'path', 'version_string']

    rows = []
    for analyzer in working:
        if 'details' not in args:
            rows.append([analyzer])
        else:
            binary = context.analyzer_binaries.get(analyzer)
            try:
                version = subprocess.check_output([binary,
                                                   '--version'])
            except (subprocess.CalledProcessError, OSError):
                version = 'ERROR'

            rows.append([analyzer,
                         binary,
                         version])

    if 'all' in args:
        for analyzer, err_reason in errored:
            if 'details' not in args:
                rows.append([analyzer])
            else:
                rows.append([analyzer,
                             context.analyzer_binaries.get(analyzer),
                             err_reason])

    if rows:
        print(output_formatters.twodim_to_str(args.output_format,
                                              header, rows))
示例#37
0
def main(args):
    """
    List the checkers available in the specified (or all supported) analyzers
    alongside with their description or enabled status in various formats.
    """

    # If the given output format is not 'table', redirect logger's output to
    # the stderr.
    logger.setup_logger(args.verbose if 'verbose' in args else None,
                        None if args.output_format == 'table' else 'stderr')

    context = analyzer_context.get_context()
    working_analyzers, errored = analyzer_types.check_supported_analyzers(
        args.analyzers, context)

    analyzer_environment = env.extend(context.path_env_extra,
                                      context.ld_lib_path_extra)

    analyzer_config_map = analyzer_types.build_config_handlers(
        args, context, working_analyzers)

    def uglify(text):
        """
        csv and json format output contain this non human readable header
        string: no CamelCase and no space.
        """
        return text.lower().replace(' ', '_')

    # List available checker profiles.
    if 'profile' in args and args.profile == 'list':
        if 'details' in args:
            header = ['Profile name', 'Description']
            rows = context.available_profiles.items()
        else:
            header = ['Profile name']
            rows = [(key, "") for key in context.available_profiles.keys()]

        if args.output_format in ['csv', 'json']:
            header = list(map(uglify, header))

        print(output_formatters.twodim_to_str(args.output_format, header,
                                              rows))
        return

    # List checker config options.
    if 'checker_config' in args:
        if 'details' in args:
            header = ['Option', 'Description']
        else:
            header = ['Option']

        if args.output_format in ['csv', 'json']:
            header = list(map(uglify, header))

        rows = []
        for analyzer in working_analyzers:
            config_handler = analyzer_config_map.get(analyzer)
            analyzer_class = analyzer_types.supported_analyzers[analyzer]

            configs = analyzer_class.get_checker_config(
                config_handler, analyzer_environment)
            rows.extend(
                (':'.join((analyzer, c[0])),
                 c[1]) if 'details' in args else (':'.join((analyzer, c[0])), )
                for c in configs)

        print(output_formatters.twodim_to_str(args.output_format, header,
                                              rows))
        return

    # List available checkers.
    if 'details' in args:
        header = ['Enabled', 'Name', 'Analyzer', 'Severity', 'Description']
    else:
        header = ['Name']

    if args.output_format in ['csv', 'json']:
        header = list(map(uglify, header))

    rows = []
    for analyzer in working_analyzers:
        config_handler = analyzer_config_map.get(analyzer)
        analyzer_class = analyzer_types.supported_analyzers[analyzer]

        checkers = analyzer_class.get_analyzer_checkers(
            config_handler, analyzer_environment)
        default_checker_cfg = context.checker_config.get(analyzer +
                                                         '_checkers')

        profile_checkers = None
        if 'profile' in args:
            if args.profile not in context.available_profiles:
                LOG.error("Checker profile '%s' does not exist!", args.profile)
                LOG.error("To list available profiles, use '--profile list'.")
                sys.exit(1)

            profile_checkers = [(args.profile, True)]

        config_handler.initialize_checkers(context.available_profiles,
                                           context.package_root, checkers,
                                           default_checker_cfg,
                                           profile_checkers)

        for checker_name, value in config_handler.checks().items():
            state, description = value

            if state != CheckerState.enabled and 'profile' in args:
                continue

            if state == CheckerState.enabled and 'only_disabled' in args:
                continue
            elif state != CheckerState.enabled and 'only_enabled' in args:
                continue

            if args.output_format == 'json':
                state = state == CheckerState.enabled
            else:
                state = '+' if state == CheckerState.enabled else '-'

            if 'details' in args:
                severity = context.severity_map.get(checker_name)
                rows.append(
                    [state, checker_name, analyzer, severity, description])
            else:
                rows.append([checker_name])

        if 'show_warnings' in args:
            severity = context.severity_map.get('clang-diagnostic-')
            for warning in get_warnings(analyzer_environment):
                if 'details' in args:
                    rows.append(['', warning, '-', severity, '-'])
                else:
                    rows.append([warning])

    if rows:
        print(output_formatters.twodim_to_str(args.output_format, header,
                                              rows))

    for analyzer_binary, reason in errored:
        LOG.error(
            "Failed to get checkers for '%s'!"
            "The error reason was: '%s'", analyzer_binary, reason)
        LOG.error("Please check your installation and the "
                  "'config/package_layout.json' file!")
示例#38
0
def main(args):
    """
    List the checkers available in the specified (or all supported) analyzers
    alongside with their description or enabled status in various formats.
    """

    logger.setup_logger(args.verbose if 'verbose' in args else None)

    # If nothing is set, list checkers for all supported analyzers.
    analyzers = args.analyzers \
        if 'analyzers' in args \
        else analyzer_types.supported_analyzers

    context = analyzer_context.get_context()
    working, errored = analyzer_types.check_supported_analyzers(analyzers,
                                                                context)

    analyzer_environment = get_check_env(context.path_env_extra,
                                         context.ld_lib_path_extra)

    analyzer_config_map = analyzer_types.build_config_handlers(args,
                                                               context,
                                                               working)
    # List available checker profiles.
    if 'profile' in args and args.profile == 'list':
        if 'details' not in args:
            if args.output_format not in ['csv', 'json']:
                header = ['Profile name']
            else:
                header = ['profile_name']
        else:
            if args.output_format not in ['csv', 'json']:
                header = ['Profile name', 'Description']
            else:
                header = ['profile_name', 'description']

        rows = []
        for (profile, description) in context.available_profiles.items():
            if 'details' not in args:
                rows.append([profile])
            else:
                rows.append([profile, description])

        print(output_formatters.twodim_to_str(args.output_format,
                                              header, rows))
        return

    # Use good looking different headers based on format.
    if 'details' not in args:
        if args.output_format not in ['csv', 'json']:
            header = ['Name']
        else:
            header = ['name']
    else:
        if args.output_format not in ['csv', 'json']:
            header = ['', 'Name', 'Analyzer', 'Severity', 'Description']
        else:
            header = ['enabled', 'name', 'analyzer', 'severity', 'description']

    rows = []
    for analyzer in working:
        config_handler = analyzer_config_map.get(analyzer)
        analyzer_class = \
            analyzer_types.supported_analyzers[analyzer]

        checkers = analyzer_class.get_analyzer_checkers(config_handler,
                                                        analyzer_environment)
        default_checker_cfg = context.checker_config.get(
            analyzer + '_checkers')

        profile_checkers = None
        if 'profile' in args:
            if args.profile not in context.available_profiles:
                LOG.error("Checker profile '%s' does not exist!",
                          args.profile)
                LOG.error("To list available profiles, use '--profile list'.")
                return

            profile_checkers = [(args.profile, True)]

        config_handler.initialize_checkers(context.available_profiles,
                                           context.package_root,
                                           checkers,
                                           default_checker_cfg,
                                           profile_checkers)

        for checker_name, value in config_handler.checks().items():
            enabled, description = value

            if not enabled and 'profile' in args:
                continue

            if enabled and 'only_disabled' in args:
                continue
            elif not enabled and 'only_enabled' in args:
                continue

            if args.output_format != 'json':
                enabled = '+' if enabled else '-'

            if 'details' not in args:
                rows.append([checker_name])
            else:
                severity = context.severity_map.get(checker_name)
                rows.append([enabled, checker_name, analyzer,
                             severity, description])

    if len(rows) > 0:
        print(output_formatters.twodim_to_str(args.output_format,
                                              header, rows))

    for analyzer_binary, reason in errored:
        LOG.error("Failed to get checkers for '%s'!"
                  "The error reason was: '%s'", analyzer_binary, reason)
        LOG.error("Please check your installation and the "
                  "'config/package_layout.json' file!")