Exemple #1
0
def CheckPatch(fname, verbose=False, show_types=False):
    """Run checkpatch.pl on a file and parse the results.

    Args:
        fname: Filename to check
        verbose: True to print out every line of the checkpatch output as it is
            parsed
        show_types: Tell checkpatch to show the type (number) of each message

    Returns:
        namedtuple containing:
            ok: False=failure, True=ok
            problems: List of problems, each a dict:
                'type'; error or warning
                'msg': text message
                'file' : filename
                'line': line number
            errors: Number of errors
            warnings: Number of warnings
            checks: Number of checks
            lines: Number of lines
            stdout: Full output of checkpatch
    """
    chk = FindCheckPatch()
    args = [chk, '--no-tree']
    if show_types:
        args.append('--show-types')
    output = command.Output(*args, fname, raise_on_error=False)

    return CheckPatchParse(output, verbose)
Exemple #2
0
    def Unpack(self, fname, dest):
        """Unpack a tar file

        Args:
            fname: Filename to unpack
            dest: Destination directory
        Returns:
            Directory name of the first entry in the archive, without the
            trailing /
        """
        stdout = command.Output('tar', 'xvfJ', fname, '-C', dest)
        dirs = stdout.splitlines()[1].split('/')[:2]
        return '/'.join(dirs)
Exemple #3
0
    def testMakeElf(self):
        """Test for the MakeElf function"""
        outdir = tempfile.mkdtemp(prefix='elf.')
        expected_text = b'1234'
        expected_data = b'wxyz'
        elf_fname = os.path.join(outdir, 'elf')
        bin_fname = os.path.join(outdir, 'bin')

        # Make an Elf file and then convert it to a fkat binary file. This
        # should produce the original data.
        elf.MakeElf(elf_fname, expected_text, expected_data)
        stdout = command.Output('objcopy', '-O', 'binary', elf_fname,
                                bin_fname)
        with open(bin_fname, 'rb') as fd:
            data = fd.read()
        self.assertEqual(expected_text + expected_data, data)
        shutil.rmtree(outdir)
def GetMaintainer(fname, verbose=False):
    """Run get_maintainer.pl on a file if we find it.

    We look for get_maintainer.pl in the 'scripts' directory at the top of
    git.  If we find it we'll run it.  If we don't find get_maintainer.pl
    then we fail silently.

    Args:
        fname: Path to the patch file to run get_maintainer.pl on.

    Returns:
        A list of email addresses to CC to.
    """
    get_maintainer = FindGetMaintainer()
    if not get_maintainer:
        if verbose:
            print("WARNING: Couldn't find get_maintainer.pl")
        return []

    stdout = command.Output(get_maintainer, '--norolestats', fname)
    lines = stdout.splitlines()
    return [x.replace('"', '') for x in lines]
Exemple #5
0
def CheckPatch(fname, verbose=False):
    """Run checkpatch.pl on a file.

    Returns:
        namedtuple containing:
            ok: False=failure, True=ok
            problems: List of problems, each a dict:
                'type'; error or warning
                'msg': text message
                'file' : filename
                'line': line number
            errors: Number of errors
            warnings: Number of warnings
            checks: Number of checks
            lines: Number of lines
            stdout: Full output of checkpatch
    """
    fields = ['ok', 'problems', 'errors', 'warnings', 'checks', 'lines',
              'stdout']
    result = collections.namedtuple('CheckPatchResult', fields)
    result.ok = False
    result.errors, result.warnings, result.checks = 0, 0, 0
    result.lines = 0
    result.problems = []
    chk = FindCheckPatch()
    item = {}
    result.stdout = command.Output(chk, '--no-tree', fname,
                                   raise_on_error=False)
    #pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE)
    #stdout, stderr = pipe.communicate()

    # total: 0 errors, 0 warnings, 159 lines checked
    # or:
    # total: 0 errors, 2 warnings, 7 checks, 473 lines checked
    emacs_prefix = '(?:[0-9]{4}.*\.patch:[0-9]+: )?'
    emacs_stats = '(?:[0-9]{4}.*\.patch )?'
    re_stats = re.compile(emacs_stats +
                          'total: (\\d+) errors, (\d+) warnings, (\d+)')
    re_stats_full = re.compile(emacs_stats +
                               'total: (\\d+) errors, (\d+) warnings, (\d+)'
                               ' checks, (\d+)')
    re_ok = re.compile('.*has no obvious style problems')
    re_bad = re.compile('.*has style problems, please review')
    re_error = re.compile('ERROR: (.*)')
    re_warning = re.compile(emacs_prefix + 'WARNING:(?:[A-Z_]+:)? (.*)')
    re_check = re.compile('CHECK: (.*)')
    re_file = re.compile('#\d+: FILE: ([^:]*):(\d+):')
    re_note = re.compile('NOTE: (.*)')
    indent = ' ' * 6
    for line in result.stdout.splitlines():
        if verbose:
            print(line)

        # A blank line indicates the end of a message
        if not line:
            if item:
                result.problems.append(item)
                item = {}
            continue
        if re_note.match(line):
            continue
        # Skip lines which quote code
        if line.startswith(indent):
            continue
        # Skip code quotes and #<n>
        if line.startswith('+') or line.startswith('#'):
            continue
        match = re_stats_full.match(line)
        if not match:
            match = re_stats.match(line)
        if match:
            result.errors = int(match.group(1))
            result.warnings = int(match.group(2))
            if len(match.groups()) == 4:
                result.checks = int(match.group(3))
                result.lines = int(match.group(4))
            else:
                result.lines = int(match.group(3))
            continue
        elif re_ok.match(line):
            result.ok = True
            continue
        elif re_bad.match(line):
            result.ok = False
            continue
        err_match = re_error.match(line)
        warn_match = re_warning.match(line)
        file_match = re_file.match(line)
        check_match = re_check.match(line)
        subject_match = line.startswith('Subject:')
        if err_match:
            item['msg'] = err_match.group(1)
            item['type'] = 'error'
        elif warn_match:
            item['msg'] = warn_match.group(1)
            item['type'] = 'warning'
        elif check_match:
            item['msg'] = check_match.group(1)
            item['type'] = 'check'
        elif file_match:
            item['file'] = file_match.group(1)
            item['line'] = int(file_match.group(2))
        elif subject_match:
            item['file'] = '<patch subject>'
            item['line'] = None
        else:
            print('bad line "%s", %d' % (line, len(line)))

    return result
Exemple #6
0
def RunTestCoverage(prog,
                    filter_fname,
                    exclude_list,
                    build_dir,
                    required=None,
                    extra_args=None):
    """Run tests and check that we get 100% coverage

    Args:
        prog: Program to run (with be passed a '-t' argument to run tests
        filter_fname: Normally all *.py files in the program's directory will
            be included. If this is not None, then it is used to filter the
            list so that only filenames that don't contain filter_fname are
            included.
        exclude_list: List of file patterns to exclude from the coverage
            calculation
        build_dir: Build directory, used to locate libfdt.py
        required: List of modules which must be in the coverage report
        extra_args (str): Extra arguments to pass to the tool before the -t/test
            arg

    Raises:
        ValueError if the code coverage is not 100%
    """
    # This uses the build output from sandbox_spl to get _libfdt.so
    path = os.path.dirname(prog)
    if filter_fname:
        glob_list = glob.glob(os.path.join(path, '*.py'))
        glob_list = [fname for fname in glob_list if filter_fname in fname]
    else:
        glob_list = []
    glob_list += exclude_list
    glob_list += ['*libfdt.py', '*site-packages*', '*dist-packages*']
    glob_list += ['*concurrencytest*']
    test_cmd = 'test' if 'binman' in prog or 'patman' in prog else '-t'
    prefix = ''
    if build_dir:
        prefix = 'PYTHONPATH=$PYTHONPATH:%s/sandbox_spl/tools ' % build_dir
    cmd = ('%spython3-coverage run '
           '--omit "%s" %s %s %s -P1' %
           (prefix, ','.join(glob_list), prog, extra_args or '', test_cmd))
    os.system(cmd)
    stdout = command.Output('python3-coverage', 'report')
    lines = stdout.splitlines()
    if required:
        # Convert '/path/to/name.py' just the module name 'name'
        test_set = set([
            os.path.splitext(os.path.basename(line.split()[0]))[0]
            for line in lines if '/etype/' in line
        ])
        missing_list = required
        missing_list.discard('__init__')
        missing_list.difference_update(test_set)
        if missing_list:
            print('Missing tests for %s' % (', '.join(missing_list)))
            print(stdout)
            ok = False

    coverage = lines[-1].split(' ')[-1]
    ok = True
    print(coverage)
    if coverage != '100%':
        print(stdout)
        print("Type 'python3-coverage html' to get a report in "
              'htmlcov/index.html')
        print('Coverage error: %s, but should be 100%%' % coverage)
        ok = False
    if not ok:
        raise ValueError('Test coverage failure')
Exemple #7
0
def EmailPatches(series,
                 cover_fname,
                 args,
                 dry_run,
                 raise_on_error,
                 cc_fname,
                 self_only=False,
                 alias=None,
                 in_reply_to=None,
                 thread=False,
                 smtp_server=None):
    """Email a patch series.

    Args:
        series: Series object containing destination info
        cover_fname: filename of cover letter
        args: list of filenames of patch files
        dry_run: Just return the command that would be run
        raise_on_error: True to raise an error when an alias fails to match,
                False to just print a message.
        cc_fname: Filename of Cc file for per-commit Cc
        self_only: True to just email to yourself as a test
        in_reply_to: If set we'll pass this to git as --in-reply-to.
            Should be a message ID that this is in reply to.
        thread: True to add --thread to git send-email (make
            all patches reply to cover-letter or first patch in series)
        smtp_server: SMTP server to use to send patches

    Returns:
        Git command that was/would be run

    # For the duration of this doctest pretend that we ran patman with ./patman
    >>> _old_argv0 = sys.argv[0]
    >>> sys.argv[0] = './patman'

    >>> alias = {}
    >>> alias['fred'] = ['*****@*****.**']
    >>> alias['john'] = ['*****@*****.**']
    >>> alias['mary'] = ['*****@*****.**']
    >>> alias['boys'] = ['fred', ' john']
    >>> alias['all'] = ['fred ', 'john', '   mary   ']
    >>> alias[os.getenv('USER')] = ['*****@*****.**']
    >>> series = {}
    >>> series['to'] = ['fred']
    >>> series['cc'] = ['mary']
    >>> EmailPatches(series, 'cover', ['p1', 'p2'], True, True, 'cc-fname', \
            False, alias)
    'git send-email --annotate --to "*****@*****.**" --cc \
"*****@*****.**" --cc-cmd "./patman --cc-cmd cc-fname" cover p1 p2'
    >>> EmailPatches(series, None, ['p1'], True, True, 'cc-fname', False, \
            alias)
    'git send-email --annotate --to "*****@*****.**" --cc \
"*****@*****.**" --cc-cmd "./patman --cc-cmd cc-fname" p1'
    >>> series['cc'] = ['all']
    >>> EmailPatches(series, 'cover', ['p1', 'p2'], True, True, 'cc-fname', \
            True, alias)
    'git send-email --annotate --to "*****@*****.**" --cc-cmd "./patman \
--cc-cmd cc-fname" cover p1 p2'
    >>> EmailPatches(series, 'cover', ['p1', 'p2'], True, True, 'cc-fname', \
            False, alias)
    'git send-email --annotate --to "*****@*****.**" --cc \
"*****@*****.**" --cc "*****@*****.**" --cc \
"*****@*****.**" --cc-cmd "./patman --cc-cmd cc-fname" cover p1 p2'

    # Restore argv[0] since we clobbered it.
    >>> sys.argv[0] = _old_argv0
    """
    to = BuildEmailList(series.get('to'), '--to', alias, raise_on_error)
    if not to:
        git_config_to = command.Output('git',
                                       'config',
                                       'sendemail.to',
                                       raise_on_error=False)
        if not git_config_to:
            print("No recipient.\n"
                  "Please add something like this to a commit\n"
                  "Series-to: Fred Bloggs <*****@*****.**>\n"
                  "Or do something like this\n"
                  "git config sendemail.to [email protected]")
            return
    cc = BuildEmailList(list(set(series.get('cc')) - set(series.get('to'))),
                        '--cc', alias, raise_on_error)
    if self_only:
        to = BuildEmailList([os.getenv('USER')], '--to', alias, raise_on_error)
        cc = []
    cmd = ['git', 'send-email', '--annotate']
    if smtp_server:
        cmd.append('--smtp-server=%s' % smtp_server)
    if in_reply_to:
        cmd.append('--in-reply-to="%s"' % tools.FromUnicode(in_reply_to))
    if thread:
        cmd.append('--thread')

    cmd += to
    cmd += cc
    cmd += ['--cc-cmd', '"%s --cc-cmd %s"' % (sys.argv[0], cc_fname)]
    if cover_fname:
        cmd.append(cover_fname)
    cmd += args
    cmdstr = ' '.join(cmd)
    if not dry_run:
        os.system(cmdstr)
    return cmdstr
Exemple #8
0
def MakeElf(elf_fname, text, data):
    """Make an elf file with the given data in a single section

    The output file has a several section including '.text' and '.data',
    containing the info provided in arguments.

    Args:
        elf_fname: Output filename
        text: Text (code) to put in the file's .text section
        data: Data to put in the file's .data section
    """
    outdir = tempfile.mkdtemp(prefix='binman.elf.')
    s_file = os.path.join(outdir, 'elf.S')

    # Spilt the text into two parts so that we can make the entry point two
    # bytes after the start of the text section
    text_bytes1 = ['\t.byte\t%#x' % tools.ToByte(byte) for byte in text[:2]]
    text_bytes2 = ['\t.byte\t%#x' % tools.ToByte(byte) for byte in text[2:]]
    data_bytes = ['\t.byte\t%#x' % tools.ToByte(byte) for byte in data]
    with open(s_file, 'w') as fd:
        print(
            '''/* Auto-generated C program to produce an ELF file for testing */

.section .text
.code32
.globl _start
.type _start, @function
%s
_start:
%s
.ident "comment"

.comm fred,8,4

.section .empty
.globl _empty
_empty:
.byte 1

.globl ernie
.data
.type ernie, @object
.size ernie, 4
ernie:
%s
''' % ('\n'.join(text_bytes1), '\n'.join(text_bytes2), '\n'.join(data_bytes)),
            file=fd)
    lds_file = os.path.join(outdir, 'elf.lds')

    # Use a linker script to set the alignment and text address.
    with open(lds_file, 'w') as fd:
        print(
            '''/* Auto-generated linker script to produce an ELF file for testing */

PHDRS
{
    text PT_LOAD ;
    data PT_LOAD ;
    empty PT_LOAD FLAGS ( 6 ) ;
    note PT_NOTE ;
}

SECTIONS
{
    . = 0xfef20000;
    ENTRY(_start)
    .text . : SUBALIGN(0)
    {
        *(.text)
    } :text
    .data : {
        *(.data)
    } :data
    _bss_start = .;
    .empty : {
        *(.empty)
    } :empty
    /DISCARD/ : {
        *(.note.gnu.property)
    }
    .note : {
        *(.comment)
    } :note
    .bss _bss_start  (OVERLAY) : {
        *(.bss)
    }
}
''',
            file=fd)
    # -static: Avoid requiring any shared libraries
    # -nostdlib: Don't link with C library
    # -Wl,--build-id=none: Don't generate a build ID, so that we just get the
    #   text section at the start
    # -m32: Build for 32-bit x86
    # -T...: Specifies the link script, which sets the start address
    cc, args = tools.GetTargetCompileTool('cc')
    args += [
        '-static', '-nostdlib', '-Wl,--build-id=none', '-m32', '-T', lds_file,
        '-o', elf_fname, s_file
    ]
    stdout = command.Output(cc, *args)
    shutil.rmtree(outdir)
Exemple #9
0
def DoBuildman(options,
               args,
               toolchains=None,
               make_func=None,
               boards=None,
               clean_dir=False):
    """The main control code for buildman

    Args:
        options: Command line options object
        args: Command line arguments (list of strings)
        toolchains: Toolchains to use - this should be a Toolchains()
                object. If None, then it will be created and scanned
        make_func: Make function to use for the builder. This is called
                to execute 'make'. If this is None, the normal function
                will be used, which calls the 'make' tool with suitable
                arguments. This setting is useful for tests.
        board: Boards() object to use, containing a list of available
                boards. If this is None it will be created and scanned.
    """
    global builder

    if options.full_help:
        pager = os.getenv('PAGER')
        if not pager:
            pager = 'more'
        fname = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])),
                             'README')
        command.Run(pager, fname)
        return 0

    gitutil.Setup()
    col = terminal.Color()

    options.git_dir = os.path.join(options.git, '.git')

    no_toolchains = toolchains is None
    if no_toolchains:
        toolchains = toolchain.Toolchains(options.override_toolchain)

    if options.fetch_arch:
        if options.fetch_arch == 'list':
            sorted_list = toolchains.ListArchs()
            print(
                col.Color(
                    col.BLUE,
                    'Available architectures: %s\n' % ' '.join(sorted_list)))
            return 0
        else:
            fetch_arch = options.fetch_arch
            if fetch_arch == 'all':
                fetch_arch = ','.join(toolchains.ListArchs())
                print(
                    col.Color(col.CYAN,
                              '\nDownloading toolchains: %s' % fetch_arch))
            for arch in fetch_arch.split(','):
                print()
                ret = toolchains.FetchAndInstall(arch)
                if ret:
                    return ret
            return 0

    if no_toolchains:
        toolchains.GetSettings()
        toolchains.Scan(options.list_tool_chains and options.verbose)
    if options.list_tool_chains:
        toolchains.List()
        print()
        return 0

    if options.incremental:
        print(
            col.Color(col.RED,
                      'Warning: -I has been removed. See documentation'))
    if not options.output_dir:
        if options.work_in_output:
            sys.exit(col.Color(col.RED, '-w requires that you specify -o'))
        options.output_dir = '..'

    # Work out what subset of the boards we are building
    if not boards:
        if not os.path.exists(options.output_dir):
            os.makedirs(options.output_dir)
        board_file = os.path.join(options.output_dir, 'boards.cfg')
        genboardscfg = os.path.join(options.git, 'tools/genboardscfg.py')
        status = subprocess.call([genboardscfg, '-q', '-o', board_file])
        if status != 0:
            sys.exit("Failed to generate boards.cfg")

        boards = board.Boards()
        boards.ReadBoards(board_file)

    exclude = []
    if options.exclude:
        for arg in options.exclude:
            exclude += arg.split(',')

    if options.boards:
        requested_boards = []
        for b in options.boards:
            requested_boards += b.split(',')
    else:
        requested_boards = None
    why_selected, board_warnings = boards.SelectBoards(args, exclude,
                                                       requested_boards)
    selected = boards.GetSelected()
    if not len(selected):
        sys.exit(col.Color(col.RED, 'No matching boards found'))

    if options.print_prefix:
        err = ShowToolchainPrefix(boards, toolchains)
        if err:
            sys.exit(col.Color(col.RED, err))
        return 0

    # Work out how many commits to build. We want to build everything on the
    # branch. We also build the upstream commit as a control so we can see
    # problems introduced by the first commit on the branch.
    count = options.count
    has_range = options.branch and '..' in options.branch
    if count == -1:
        if not options.branch:
            count = 1
        else:
            if has_range:
                count, msg = gitutil.CountCommitsInRange(
                    options.git_dir, options.branch)
            else:
                count, msg = gitutil.CountCommitsInBranch(
                    options.git_dir, options.branch)
            if count is None:
                sys.exit(col.Color(col.RED, msg))
            elif count == 0:
                sys.exit(
                    col.Color(col.RED,
                              "Range '%s' has no commits" % options.branch))
            if msg:
                print(col.Color(col.YELLOW, msg))
            count += 1  # Build upstream commit also

    if not count:
        str = ("No commits found to process in branch '%s': "
               "set branch's upstream or use -c flag" % options.branch)
        sys.exit(col.Color(col.RED, str))
    if options.work_in_output:
        if len(selected) != 1:
            sys.exit(
                col.Color(col.RED, '-w can only be used with a single board'))
        if count != 1:
            sys.exit(
                col.Color(col.RED, '-w can only be used with a single commit'))

    # Read the metadata from the commits. First look at the upstream commit,
    # then the ones in the branch. We would like to do something like
    # upstream/master~..branch but that isn't possible if upstream/master is
    # a merge commit (it will list all the commits that form part of the
    # merge)
    # Conflicting tags are not a problem for buildman, since it does not use
    # them. For example, Series-version is not useful for buildman. On the
    # other hand conflicting tags will cause an error. So allow later tags
    # to overwrite earlier ones by setting allow_overwrite=True
    if options.branch:
        if count == -1:
            if has_range:
                range_expr = options.branch
            else:
                range_expr = gitutil.GetRangeInBranch(options.git_dir,
                                                      options.branch)
            upstream_commit = gitutil.GetUpstream(options.git_dir,
                                                  options.branch)
            series = patchstream.GetMetaDataForList(upstream_commit,
                                                    options.git_dir,
                                                    1,
                                                    series=None,
                                                    allow_overwrite=True)

            series = patchstream.GetMetaDataForList(range_expr,
                                                    options.git_dir,
                                                    None,
                                                    series,
                                                    allow_overwrite=True)
        else:
            # Honour the count
            series = patchstream.GetMetaDataForList(options.branch,
                                                    options.git_dir,
                                                    count,
                                                    series=None,
                                                    allow_overwrite=True)
    else:
        series = None
        if not options.dry_run:
            options.verbose = True
            if not options.summary:
                options.show_errors = True

    # By default we have one thread per CPU. But if there are not enough jobs
    # we can have fewer threads and use a high '-j' value for make.
    if not options.threads:
        options.threads = min(multiprocessing.cpu_count(), len(selected))
    if not options.jobs:
        options.jobs = max(1,
                           (multiprocessing.cpu_count() + len(selected) - 1) //
                           len(selected))

    if not options.step:
        options.step = len(series.commits) - 1

    gnu_make = command.Output(os.path.join(options.git,
                                           'scripts/show-gnu-make'),
                              raise_on_error=False).rstrip()
    if not gnu_make:
        sys.exit('GNU Make not found')

    # Create a new builder with the selected options.
    output_dir = options.output_dir
    if options.branch:
        dirname = options.branch.replace('/', '_')
        # As a special case allow the board directory to be placed in the
        # output directory itself rather than any subdirectory.
        if not options.no_subdirs:
            output_dir = os.path.join(options.output_dir, dirname)
        if clean_dir and os.path.exists(output_dir):
            shutil.rmtree(output_dir)
    builder = Builder(toolchains,
                      output_dir,
                      options.git_dir,
                      options.threads,
                      options.jobs,
                      gnu_make=gnu_make,
                      checkout=True,
                      show_unknown=options.show_unknown,
                      step=options.step,
                      no_subdirs=options.no_subdirs,
                      full_path=options.full_path,
                      verbose_build=options.verbose_build,
                      mrproper=options.mrproper,
                      per_board_out_dir=options.per_board_out_dir,
                      config_only=options.config_only,
                      squash_config_y=not options.preserve_config_y,
                      warnings_as_errors=options.warnings_as_errors,
                      work_in_output=options.work_in_output)
    builder.force_config_on_failure = not options.quick
    if make_func:
        builder.do_make = make_func

    # For a dry run, just show our actions as a sanity check
    if options.dry_run:
        ShowActions(series, why_selected, selected, builder, options,
                    board_warnings)
    else:
        builder.force_build = options.force_build
        builder.force_build_failures = options.force_build_failures
        builder.force_reconfig = options.force_reconfig
        builder.in_tree = options.in_tree

        # Work out which boards to build
        board_selected = boards.GetSelectedDict()

        if series:
            commits = series.commits
            # Number the commits for test purposes
            for commit in range(len(commits)):
                commits[commit].sequence = commit
        else:
            commits = None

        Print(
            GetActionSummary(options.summary, commits, board_selected,
                             options))

        # We can't show function sizes without board details at present
        if options.show_bloat:
            options.show_detail = True
        builder.SetDisplayOptions(
            options.show_errors, options.show_sizes, options.show_detail,
            options.show_bloat, options.list_error_boards, options.show_config,
            options.show_environment, options.filter_dtb_warnings,
            options.filter_migration_warnings)
        if options.summary:
            builder.ShowSummary(commits, board_selected)
        else:
            fail, warned = builder.BuildBoards(commits, board_selected,
                                               options.keep_outputs,
                                               options.verbose)
            if fail:
                return 100
            elif warned and not options.ignore_warnings:
                return 101
    return 0