Пример #1
0
def check_patch(fname, verbose=False, show_types=False):
    """Run checkpatch.pl on a file and parse the results.

    Args:
        fname: Filename to check
        verbose: True to print out every line of the checkpatch output as it is
            parsed
        show_types: Tell checkpatch to show the type (number) of each message

    Returns:
        namedtuple containing:
            ok: False=failure, True=ok
            problems: List of problems, each a dict:
                'type'; error or warning
                'msg': text message
                'file' : filename
                'line': line number
            errors: Number of errors
            warnings: Number of warnings
            checks: Number of checks
            lines: Number of lines
            stdout: Full output of checkpatch
    """
    chk = find_check_patch()
    args = [chk, '--no-tree']
    if show_types:
        args.append('--show-types')
    output = command.output(*args, fname, raise_on_error=False)

    return check_patch_parse(output, verbose)
Пример #2
0
    def Unpack(self, fname, dest):
        """Unpack a tar file

        Args:
            fname: Filename to unpack
            dest: Destination directory
        Returns:
            Directory name of the first entry in the archive, without the
            trailing /
        """
        stdout = command.output('tar', 'xvfJ', fname, '-C', dest)
        dirs = stdout.splitlines()[1].split('/')[:2]
        return '/'.join(dirs)
Пример #3
0
    def testMakeElf(self):
        """Test for the MakeElf function"""
        outdir = tempfile.mkdtemp(prefix='elf.')
        expected_text = b'1234'
        expected_data = b'wxyz'
        elf_fname = os.path.join(outdir, 'elf')
        bin_fname = os.path.join(outdir, 'bin')

        # Make an Elf file and then convert it to a fkat binary file. This
        # should produce the original data.
        elf.MakeElf(elf_fname, expected_text, expected_data)
        objcopy, args = tools.get_target_compile_tool('objcopy')
        args += ['-O', 'binary', elf_fname, bin_fname]
        stdout = command.output(objcopy, *args)
        with open(bin_fname, 'rb') as fd:
            data = fd.read()
        self.assertEqual(expected_text + expected_data, data)
        shutil.rmtree(outdir)
Пример #4
0
def get_maintainer(dir_list, fname, verbose=False):
    """Run get_maintainer.pl on a file if we find it.

    We look for get_maintainer.pl in the 'scripts' directory at the top of
    git.  If we find it we'll run it.  If we don't find get_maintainer.pl
    then we fail silently.

    Args:
        dir_list: List of directories to try for the get_maintainer.pl script
        fname: Path to the patch file to run get_maintainer.pl on.

    Returns:
        A list of email addresses to CC to.
    """
    get_maintainer = find_get_maintainer(dir_list)
    if not get_maintainer:
        if verbose:
            print("WARNING: Couldn't find get_maintainer.pl")
        return []

    stdout = command.output(get_maintainer, '--norolestats', fname)
    lines = stdout.splitlines()
    return [x.replace('"', '') for x in lines]
Пример #5
0
def email_patches(series,
                  cover_fname,
                  args,
                  dry_run,
                  warn_on_error,
                  cc_fname,
                  self_only=False,
                  alias=None,
                  in_reply_to=None,
                  thread=False,
                  smtp_server=None):
    """Email a patch series.

    Args:
        series: Series object containing destination info
        cover_fname: filename of cover letter
        args: list of filenames of patch files
        dry_run: Just return the command that would be run
        warn_on_error: True to print a warning when an alias fails to match,
                False to ignore it.
        cc_fname: Filename of Cc file for per-commit Cc
        self_only: True to just email to yourself as a test
        in_reply_to: If set we'll pass this to git as --in-reply-to.
            Should be a message ID that this is in reply to.
        thread: True to add --thread to git send-email (make
            all patches reply to cover-letter or first patch in series)
        smtp_server: SMTP server to use to send patches

    Returns:
        Git command that was/would be run

    # For the duration of this doctest pretend that we ran patman with ./patman
    >>> _old_argv0 = sys.argv[0]
    >>> sys.argv[0] = './patman'

    >>> alias = {}
    >>> alias['fred'] = ['*****@*****.**']
    >>> alias['john'] = ['*****@*****.**']
    >>> alias['mary'] = ['*****@*****.**']
    >>> alias['boys'] = ['fred', ' john']
    >>> alias['all'] = ['fred ', 'john', '   mary   ']
    >>> alias[os.getenv('USER')] = ['*****@*****.**']
    >>> series = {}
    >>> series['to'] = ['fred']
    >>> series['cc'] = ['mary']
    >>> email_patches(series, 'cover', ['p1', 'p2'], True, True, 'cc-fname', \
            False, alias)
    'git send-email --annotate --to "*****@*****.**" --cc \
"*****@*****.**" --cc-cmd "./patman send --cc-cmd cc-fname" cover p1 p2'
    >>> email_patches(series, None, ['p1'], True, True, 'cc-fname', False, \
            alias)
    'git send-email --annotate --to "*****@*****.**" --cc \
"*****@*****.**" --cc-cmd "./patman send --cc-cmd cc-fname" p1'
    >>> series['cc'] = ['all']
    >>> email_patches(series, 'cover', ['p1', 'p2'], True, True, 'cc-fname', \
            True, alias)
    'git send-email --annotate --to "*****@*****.**" --cc-cmd "./patman \
send --cc-cmd cc-fname" cover p1 p2'
    >>> email_patches(series, 'cover', ['p1', 'p2'], True, True, 'cc-fname', \
            False, alias)
    'git send-email --annotate --to "*****@*****.**" --cc \
"*****@*****.**" --cc "*****@*****.**" --cc \
"*****@*****.**" --cc-cmd "./patman send --cc-cmd cc-fname" cover p1 p2'

    # Restore argv[0] since we clobbered it.
    >>> sys.argv[0] = _old_argv0
    """
    to = build_email_list(series.get('to'), '--to', alias, warn_on_error)
    if not to:
        git_config_to = command.output('git',
                                       'config',
                                       'sendemail.to',
                                       raise_on_error=False)
        if not git_config_to:
            print("No recipient.\n"
                  "Please add something like this to a commit\n"
                  "Series-to: Fred Bloggs <*****@*****.**>\n"
                  "Or do something like this\n"
                  "git config sendemail.to [email protected]")
            return
    cc = build_email_list(list(set(series.get('cc')) - set(series.get('to'))),
                          '--cc', alias, warn_on_error)
    if self_only:
        to = build_email_list([os.getenv('USER')], '--to', alias,
                              warn_on_error)
        cc = []
    cmd = ['git', 'send-email', '--annotate']
    if smtp_server:
        cmd.append('--smtp-server=%s' % smtp_server)
    if in_reply_to:
        cmd.append('--in-reply-to="%s"' % in_reply_to)
    if thread:
        cmd.append('--thread')

    cmd += to
    cmd += cc
    cmd += ['--cc-cmd', '"%s send --cc-cmd %s"' % (sys.argv[0], cc_fname)]
    if cover_fname:
        cmd.append(cover_fname)
    cmd += args
    cmdstr = ' '.join(cmd)
    if not dry_run:
        os.system(cmdstr)
    return cmdstr
Пример #6
0
def MakeElf(elf_fname, text, data):
    """Make an elf file with the given data in a single section

    The output file has a several section including '.text' and '.data',
    containing the info provided in arguments.

    Args:
        elf_fname: Output filename
        text: Text (code) to put in the file's .text section
        data: Data to put in the file's .data section
    """
    outdir = tempfile.mkdtemp(prefix='binman.elf.')
    s_file = os.path.join(outdir, 'elf.S')

    # Spilt the text into two parts so that we can make the entry point two
    # bytes after the start of the text section
    text_bytes1 = ['\t.byte\t%#x' % byte for byte in text[:2]]
    text_bytes2 = ['\t.byte\t%#x' % byte for byte in text[2:]]
    data_bytes = ['\t.byte\t%#x' % byte for byte in data]
    with open(s_file, 'w') as fd:
        print(
            '''/* Auto-generated C program to produce an ELF file for testing */

.section .text
.code32
.globl _start
.type _start, @function
%s
_start:
%s
.ident "comment"

.comm fred,8,4

.section .empty
.globl _empty
_empty:
.byte 1

.globl ernie
.data
.type ernie, @object
.size ernie, 4
ernie:
%s
''' % ('\n'.join(text_bytes1), '\n'.join(text_bytes2), '\n'.join(data_bytes)),
            file=fd)
    lds_file = os.path.join(outdir, 'elf.lds')

    # Use a linker script to set the alignment and text address.
    with open(lds_file, 'w') as fd:
        print(
            '''/* Auto-generated linker script to produce an ELF file for testing */

PHDRS
{
    text PT_LOAD ;
    data PT_LOAD ;
    empty PT_LOAD FLAGS ( 6 ) ;
    note PT_NOTE ;
}

SECTIONS
{
    . = 0xfef20000;
    ENTRY(_start)
    .text . : SUBALIGN(0)
    {
        *(.text)
    } :text
    .data : {
        *(.data)
    } :data
    _bss_start = .;
    .empty : {
        *(.empty)
    } :empty
    /DISCARD/ : {
        *(.note.gnu.property)
    }
    .note : {
        *(.comment)
    } :note
    .bss _bss_start  (OVERLAY) : {
        *(.bss)
    }
}
''',
            file=fd)
    # -static: Avoid requiring any shared libraries
    # -nostdlib: Don't link with C library
    # -Wl,--build-id=none: Don't generate a build ID, so that we just get the
    #   text section at the start
    # -m32: Build for 32-bit x86
    # -T...: Specifies the link script, which sets the start address
    cc, args = tools.get_target_compile_tool('cc')
    args += [
        '-static', '-nostdlib', '-Wl,--build-id=none', '-m32', '-T', lds_file,
        '-o', elf_fname, s_file
    ]
    stdout = command.output(cc, *args)
    shutil.rmtree(outdir)
Пример #7
0
def DoBuildman(options,
               args,
               toolchains=None,
               make_func=None,
               boards=None,
               clean_dir=False,
               test_thread_exceptions=False):
    """The main control code for buildman

    Args:
        options: Command line options object
        args: Command line arguments (list of strings)
        toolchains: Toolchains to use - this should be a Toolchains()
                object. If None, then it will be created and scanned
        make_func: Make function to use for the builder. This is called
                to execute 'make'. If this is None, the normal function
                will be used, which calls the 'make' tool with suitable
                arguments. This setting is useful for tests.
        board: Boards() object to use, containing a list of available
                boards. If this is None it will be created and scanned.
        clean_dir: Used for tests only, indicates that the existing output_dir
            should be removed before starting the build
        test_thread_exceptions: Uses for tests only, True to make the threads
            raise an exception instead of reporting their result. This simulates
            a failure in the code somewhere
    """
    global builder

    if options.full_help:
        tools.print_full_help(
            os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])),
                         'README'))
        return 0

    gitutil.setup()
    col = terminal.Color()

    options.git_dir = os.path.join(options.git, '.git')

    no_toolchains = toolchains is None
    if no_toolchains:
        toolchains = toolchain.Toolchains(options.override_toolchain)

    if options.fetch_arch:
        if options.fetch_arch == 'list':
            sorted_list = toolchains.ListArchs()
            print(
                col.build(
                    col.BLUE,
                    'Available architectures: %s\n' % ' '.join(sorted_list)))
            return 0
        else:
            fetch_arch = options.fetch_arch
            if fetch_arch == 'all':
                fetch_arch = ','.join(toolchains.ListArchs())
                print(
                    col.build(col.CYAN,
                              '\nDownloading toolchains: %s' % fetch_arch))
            for arch in fetch_arch.split(','):
                print()
                ret = toolchains.FetchAndInstall(arch)
                if ret:
                    return ret
            return 0

    if no_toolchains:
        toolchains.GetSettings()
        toolchains.Scan(options.list_tool_chains and options.verbose)
    if options.list_tool_chains:
        toolchains.List()
        print()
        return 0

    if options.incremental:
        print(
            col.build(col.RED,
                      'Warning: -I has been removed. See documentation'))
    if not options.output_dir:
        if options.work_in_output:
            sys.exit(col.build(col.RED, '-w requires that you specify -o'))
        options.output_dir = '..'

    # Work out what subset of the boards we are building
    if not boards:
        if not os.path.exists(options.output_dir):
            os.makedirs(options.output_dir)
        board_file = os.path.join(options.output_dir, 'boards.cfg')
        our_path = os.path.dirname(os.path.realpath(__file__))
        genboardscfg = os.path.join(our_path, '../genboardscfg.py')
        if not os.path.exists(genboardscfg):
            genboardscfg = os.path.join(options.git, 'tools/genboardscfg.py')
        status = subprocess.call([genboardscfg, '-q', '-o', board_file])
        if status != 0:
            # Older versions don't support -q
            status = subprocess.call([genboardscfg, '-o', board_file])
            if status != 0:
                sys.exit("Failed to generate boards.cfg")

        boards = board.Boards()
        boards.ReadBoards(board_file)

    exclude = []
    if options.exclude:
        for arg in options.exclude:
            exclude += arg.split(',')

    if options.boards:
        requested_boards = []
        for b in options.boards:
            requested_boards += b.split(',')
    else:
        requested_boards = None
    why_selected, board_warnings = boards.SelectBoards(args, exclude,
                                                       requested_boards)
    selected = boards.GetSelected()
    if not len(selected):
        sys.exit(col.build(col.RED, 'No matching boards found'))

    if options.print_prefix:
        err = ShowToolchainPrefix(boards, toolchains)
        if err:
            sys.exit(col.build(col.RED, err))
        return 0

    # Work out how many commits to build. We want to build everything on the
    # branch. We also build the upstream commit as a control so we can see
    # problems introduced by the first commit on the branch.
    count = options.count
    has_range = options.branch and '..' in options.branch
    if count == -1:
        if not options.branch:
            count = 1
        else:
            if has_range:
                count, msg = gitutil.count_commits_in_range(
                    options.git_dir, options.branch)
            else:
                count, msg = gitutil.count_commits_in_branch(
                    options.git_dir, options.branch)
            if count is None:
                sys.exit(col.build(col.RED, msg))
            elif count == 0:
                sys.exit(
                    col.build(col.RED,
                              "Range '%s' has no commits" % options.branch))
            if msg:
                print(col.build(col.YELLOW, msg))
            count += 1  # Build upstream commit also

    if not count:
        str = ("No commits found to process in branch '%s': "
               "set branch's upstream or use -c flag" % options.branch)
        sys.exit(col.build(col.RED, str))
    if options.work_in_output:
        if len(selected) != 1:
            sys.exit(
                col.build(col.RED, '-w can only be used with a single board'))
        if count != 1:
            sys.exit(
                col.build(col.RED, '-w can only be used with a single commit'))

    # Read the metadata from the commits. First look at the upstream commit,
    # then the ones in the branch. We would like to do something like
    # upstream/master~..branch but that isn't possible if upstream/master is
    # a merge commit (it will list all the commits that form part of the
    # merge)
    # Conflicting tags are not a problem for buildman, since it does not use
    # them. For example, Series-version is not useful for buildman. On the
    # other hand conflicting tags will cause an error. So allow later tags
    # to overwrite earlier ones by setting allow_overwrite=True
    if options.branch:
        if count == -1:
            if has_range:
                range_expr = options.branch
            else:
                range_expr = gitutil.get_range_in_branch(
                    options.git_dir, options.branch)
            upstream_commit = gitutil.get_upstream(options.git_dir,
                                                   options.branch)
            series = patchstream.get_metadata_for_list(upstream_commit,
                                                       options.git_dir,
                                                       1,
                                                       series=None,
                                                       allow_overwrite=True)

            series = patchstream.get_metadata_for_list(range_expr,
                                                       options.git_dir,
                                                       None,
                                                       series,
                                                       allow_overwrite=True)
        else:
            # Honour the count
            series = patchstream.get_metadata_for_list(options.branch,
                                                       options.git_dir,
                                                       count,
                                                       series=None,
                                                       allow_overwrite=True)
    else:
        series = None
        if not options.dry_run:
            options.verbose = True
            if not options.summary:
                options.show_errors = True

    # By default we have one thread per CPU. But if there are not enough jobs
    # we can have fewer threads and use a high '-j' value for make.
    if options.threads is None:
        options.threads = min(multiprocessing.cpu_count(), len(selected))
    if not options.jobs:
        options.jobs = max(1,
                           (multiprocessing.cpu_count() + len(selected) - 1) //
                           len(selected))

    if not options.step:
        options.step = len(series.commits) - 1

    gnu_make = command.output(os.path.join(options.git,
                                           'scripts/show-gnu-make'),
                              raise_on_error=False).rstrip()
    if not gnu_make:
        sys.exit('GNU Make not found')

    # Create a new builder with the selected options.
    output_dir = options.output_dir
    if options.branch:
        dirname = options.branch.replace('/', '_')
        # As a special case allow the board directory to be placed in the
        # output directory itself rather than any subdirectory.
        if not options.no_subdirs:
            output_dir = os.path.join(options.output_dir, dirname)
        if clean_dir and os.path.exists(output_dir):
            shutil.rmtree(output_dir)
    adjust_cfg = cfgutil.convert_list_to_dict(options.adjust_cfg)

    builder = Builder(toolchains,
                      output_dir,
                      options.git_dir,
                      options.threads,
                      options.jobs,
                      gnu_make=gnu_make,
                      checkout=True,
                      show_unknown=options.show_unknown,
                      step=options.step,
                      no_subdirs=options.no_subdirs,
                      full_path=options.full_path,
                      verbose_build=options.verbose_build,
                      mrproper=options.mrproper,
                      per_board_out_dir=options.per_board_out_dir,
                      config_only=options.config_only,
                      squash_config_y=not options.preserve_config_y,
                      warnings_as_errors=options.warnings_as_errors,
                      work_in_output=options.work_in_output,
                      test_thread_exceptions=test_thread_exceptions,
                      adjust_cfg=adjust_cfg)
    builder.force_config_on_failure = not options.quick
    if make_func:
        builder.do_make = make_func

    # For a dry run, just show our actions as a sanity check
    if options.dry_run:
        ShowActions(series, why_selected, selected, builder, options,
                    board_warnings)
    else:
        builder.force_build = options.force_build
        builder.force_build_failures = options.force_build_failures
        builder.force_reconfig = options.force_reconfig
        builder.in_tree = options.in_tree

        # Work out which boards to build
        board_selected = boards.GetSelectedDict()

        if series:
            commits = series.commits
            # Number the commits for test purposes
            for commit in range(len(commits)):
                commits[commit].sequence = commit
        else:
            commits = None

        tprint(
            GetActionSummary(options.summary, commits, board_selected,
                             options))

        # We can't show function sizes without board details at present
        if options.show_bloat:
            options.show_detail = True
        builder.SetDisplayOptions(
            options.show_errors, options.show_sizes, options.show_detail,
            options.show_bloat, options.list_error_boards, options.show_config,
            options.show_environment, options.filter_dtb_warnings,
            options.filter_migration_warnings)
        if options.summary:
            builder.ShowSummary(commits, board_selected)
        else:
            fail, warned, excs = builder.BuildBoards(commits, board_selected,
                                                     options.keep_outputs,
                                                     options.verbose)
            if excs:
                return 102
            elif fail:
                return 100
            elif warned and not options.ignore_warnings:
                return 101
    return 0
Пример #8
0
def run_test_coverage(prog,
                      filter_fname,
                      exclude_list,
                      build_dir,
                      required=None,
                      extra_args=None):
    """Run tests and check that we get 100% coverage

    Args:
        prog: Program to run (with be passed a '-t' argument to run tests
        filter_fname: Normally all *.py files in the program's directory will
            be included. If this is not None, then it is used to filter the
            list so that only filenames that don't contain filter_fname are
            included.
        exclude_list: List of file patterns to exclude from the coverage
            calculation
        build_dir: Build directory, used to locate libfdt.py
        required: List of modules which must be in the coverage report
        extra_args (str): Extra arguments to pass to the tool before the -t/test
            arg

    Raises:
        ValueError if the code coverage is not 100%
    """
    # This uses the build output from sandbox_spl to get _libfdt.so
    path = os.path.dirname(prog)
    if filter_fname:
        glob_list = glob.glob(os.path.join(path, '*.py'))
        glob_list = [fname for fname in glob_list if filter_fname in fname]
    else:
        glob_list = []
    glob_list += exclude_list
    glob_list += ['*libfdt.py', '*site-packages*', '*dist-packages*']
    glob_list += ['*concurrencytest*']
    test_cmd = 'test' if 'binman' in prog or 'patman' in prog else '-t'
    prefix = ''
    if build_dir:
        prefix = 'PYTHONPATH=$PYTHONPATH:%s/sandbox_spl/tools ' % build_dir
    cmd = ('%spython3-coverage run '
           '--omit "%s" %s %s %s -P1' %
           (prefix, ','.join(glob_list), prog, extra_args or '', test_cmd))
    os.system(cmd)
    stdout = command.output('python3-coverage', 'report')
    lines = stdout.splitlines()
    if required:
        # Convert '/path/to/name.py' just the module name 'name'
        test_set = set([
            os.path.splitext(os.path.basename(line.split()[0]))[0]
            for line in lines if '/etype/' in line
        ])
        missing_list = required
        missing_list.discard('__init__')
        missing_list.difference_update(test_set)
        if missing_list:
            print('Missing tests for %s' % (', '.join(missing_list)))
            print(stdout)
            ok = False

    coverage = lines[-1].split(' ')[-1]
    ok = True
    print(coverage)
    if coverage != '100%':
        print(stdout)
        print("Type 'python3-coverage html' to get a report in "
              'htmlcov/index.html')
        print('Coverage error: %s, but should be 100%%' % coverage)
        ok = False
    if not ok:
        raise ValueError('Test coverage failure')