Пример #1
0
def RunTestCoverage(prog,
                    filter_fname,
                    exclude_list,
                    build_dir,
                    required=None):
    """Run tests and check that we get 100% coverage

    Args:
        prog: Program to run (with be passed a '-t' argument to run tests
        filter_fname: Normally all *.py files in the program's directory will
            be included. If this is not None, then it is used to filter the
            list so that only filenames that don't contain filter_fname are
            included.
        exclude_list: List of file patterns to exclude from the coverage
            calculation
        build_dir: Build directory, used to locate libfdt.py
        required: List of modules which must be in the coverage report

    Raises:
        ValueError if the code coverage is not 100%
    """
    # This uses the build output from sandbox_spl to get _libfdt.so
    path = os.path.dirname(prog)
    if filter_fname:
        glob_list = glob.glob(os.path.join(path, '*.py'))
        glob_list = [fname for fname in glob_list if filter_fname in fname]
    else:
        glob_list = []
    glob_list += exclude_list
    glob_list += ['*libfdt.py', '*site-packages*', '*dist-packages*']
    cmd = ('PYTHONPATH=$PYTHONPATH:%s/sandbox_spl/tools %s-coverage run '
           '--omit "%s" %s -P1 -t' %
           (build_dir, PYTHON, ','.join(glob_list), prog))
    os.system(cmd)
    stdout = command.Output('%s-coverage' % PYTHON, 'report')
    lines = stdout.splitlines()
    if required:
        # Convert '/path/to/name.py' just the module name 'name'
        test_set = set([
            os.path.splitext(os.path.basename(line.split()[0]))[0]
            for line in lines if '/etype/' in line
        ])
        missing_list = required
        missing_list.difference_update(test_set)
        if missing_list:
            print('Missing tests for %s' % (', '.join(missing_list)))
            print(stdout)
            ok = False

    coverage = lines[-1].split(' ')[-1]
    ok = True
    print(coverage)
    if coverage != '100%':
        print(stdout)
        print("Type '%s-coverage html' to get a report in "
              'htmlcov/index.html' % PYTHON)
        print('Coverage error: %s, but should be 100%%' % coverage)
        ok = False
    if not ok:
        raise ValueError('Test coverage failure')
Пример #2
0
def RunTestCoverage():
    """Run the tests and check that we get 100% coverage"""
    # This uses the build output from sandbox_spl to get _libfdt.so
    cmd = ('PYTHONPATH=$PYTHONPATH:%s/sandbox_spl/tools coverage run '
           '--include "tools/binman/*.py" --omit "*test*,*binman.py" '
           'tools/binman/binman.py -t' % options.build_dir)
    os.system(cmd)
    stdout = command.Output('coverage', 'report')
    lines = stdout.splitlines()

    test_set = set([
        os.path.basename(line.split()[0]) for line in lines
        if '/etype/' in line
    ])
    glob_list = glob.glob(os.path.join(our_path, 'etype/*.py'))
    all_set = set([os.path.basename(item) for item in glob_list])
    missing_list = all_set
    missing_list.difference_update(test_set)
    missing_list.remove('_testing.py')
    coverage = lines[-1].split(' ')[-1]
    ok = True
    if missing_list:
        print 'Missing tests for %s' % (', '.join(missing_list))
        ok = False
    if coverage != '100%':
        print stdout
        print "Type 'coverage html' to get a report in htmlcov/index.html"
        print 'Coverage error: %s, but should be 100%%' % coverage
        ok = False
    if not ok:
        raise ValueError('Test coverage failure')
Пример #3
0
    def GetProp(self, node, prop, default=None, typespec=None):
        """Get a property from a device tree.

        This looks up the given node and property, and returns the value as a
        string,

        If the node or property does not exist, this will return the default
        value.

        Args:
            node: Full path to node to look up.
            prop: Property name to look up.
            default: Default value to return if nothing is present in the fdt,
                or None to raise in this case. This will be converted to a
                string.
            typespec: Type character to use (None for default, 's' for string)

        Returns:
            string containing the property value.

        Raises:
            CmdError: if the property does not exist and no default is provided.
        """
        args = [self.fname, node, prop, '-t', 'bx']
        if default is not None:
          args += ['-d', str(default)]
        if typespec is not None:
          args += ['-t%s' % typespec]
        out = command.Output('fdtget', *args)
        return out.strip()
Пример #4
0
    def Unpack(self, fname, dest):
        """Unpack a tar file

        Args:
            fname: Filename to unpack
            dest: Destination directory
        Returns:
            Directory name of the first entry in the archive, without the
            trailing /
        """
        stdout = command.Output('tar', 'xvfJ', fname, '-C', dest)
        return stdout.splitlines()[0][:-1]
Пример #5
0
def make_dtb():
    """Make a sample .dts file and compile it to a .dtb

    Returns:
        Filename of .dtb file created
    """
    src = make_fname('u-boot.dts')
    dtb = make_fname('u-boot.dtb')
    with open(src, 'w') as fd:
        print >> fd, base_fdt
    command.Output('dtc', src, '-O', 'dtb', '-o', dtb)
    return dtb
Пример #6
0
    def DeleteProp(self, prop_name):
        """Delete a property of a node

        The property is deleted using fdtput.

        Args:
            prop_name: Name of the property to delete
        Raises:
            CommandError if the property does not exist
        """
        args = [self._fdt._fname, '-d', self.path, prop_name]
        command.Output('fdtput', *args)
        del self.props[prop_name]
Пример #7
0
def RunTestCoverage():
    """Run the tests and check that we get 100% coverage"""
    # This uses the build output from sandbox_spl to get _libfdt.so
    cmd = ('PYTHONPATH=%s/sandbox_spl/tools coverage run '
           '--include "tools/binman/*.py" --omit "*test*,*binman.py" '
           'tools/binman/binman.py -t' % options.build_dir)
    os.system(cmd)
    stdout = command.Output('coverage', 'report')
    coverage = stdout.splitlines()[-1].split(' ')[-1]
    if coverage != '100%':
        print stdout
        print "Type 'coverage html' to get a report in htmlcov/index.html"
        raise ValueError('Coverage error: %s, but should be 100%%' % coverage)
Пример #8
0
    def GetSubNodes(self, node):
        """Returns a list of sub-nodes of a given node

        Args:
            node: Node name to return children from

        Returns:
            List of children in the node (each a string node name)

        Raises:
            CmdError: if the node does not exist.
        """
        out = command.Output('fdtget', self.fname, '-l', node)
        return out.strip().splitlines()
Пример #9
0
    def testMakeElf(self):
        """Test for the MakeElf function"""
        outdir = tempfile.mkdtemp(prefix='elf.')
        expected_text = b'1234'
        expected_data = b'wxyz'
        elf_fname = os.path.join(outdir, 'elf')
        bin_fname = os.path.join(outdir, 'bin')

        # Make an Elf file and then convert it to a fkat binary file. This
        # should produce the original data.
        elf.MakeElf(elf_fname, expected_text, expected_data)
        stdout = command.Output('objcopy', '-O', 'binary', elf_fname, bin_fname)
        with open(bin_fname, 'rb') as fd:
            data = fd.read()
        self.assertEqual(expected_text + expected_data, data)
        shutil.rmtree(outdir)
Пример #10
0
def make_fit(mkimage, params):
    """Make a sample .fit file ready for loading

    This creates a .its script with the selected parameters and uses mkimage to
    turn this into a .fit image.

    Args:
        mkimage: Filename of 'mkimage' utility
        params: Dictionary containing parameters to embed in the %() strings
    Return:
        Filename of .fit file created
    """
    fit = make_fname('test.fit')
    its = make_its(params)
    command.Output(mkimage, '-f', its, fit)
    with open(make_fname('u-boot.dts'), 'w') as fd:
        print >> fd, base_fdt
    return fit
Пример #11
0
def GetSymbols(fname, patterns):
    """Get the symbols from an ELF file

    Args:
        fname: Filename of the ELF file to read
        patterns: List of regex patterns to search for, each a string

    Returns:
        None, if the file does not exist, or Dict:
          key: Name of symbol
          value: Hex value of symbol
    """
    stdout = command.Output('objdump', '-t', fname, raise_on_error=False)
    lines = stdout.splitlines()
    if patterns:
        re_syms = re.compile('|'.join(patterns))
    else:
        re_syms = None
    syms = {}
    syms_started = False
    for line in lines:
        if not line or not syms_started:
            if 'SYMBOL TABLE' in line:
                syms_started = True
            line = None  # Otherwise code coverage complains about 'continue'
            continue
        if re_syms and not re_syms.search(line):
            continue

        space_pos = line.find(' ')
        value, rest = line[:space_pos], line[space_pos + 1:]
        flags = rest[:7]
        parts = rest[7:].split()
        section, size = parts[:2]
        if len(parts) > 2:
            name = parts[2]
            syms[name] = Symbol(section, int(value, 16), int(size, 16),
                                flags[1] == 'w')

    # Sort dict by address
    return OrderedDict(sorted(syms.items(), key=lambda x: x[1].address))
Пример #12
0
    def GetProps(self, node):
        """Get all properties from a node

        Args:
            node: full path to node name to look in

        Returns:
            A dictionary containing all the properties, indexed by node name.
            The entries are simply strings - no decoding of lists or numbers
            is done.

        Raises:
            CmdError: if the node does not exist.
        """
        out = command.Output('fdtget', self._fname, node, '-p')
        props = out.strip().splitlines()
        props_dict = {}
        for prop in props:
            name = prop
            props_dict[prop] = self.GetProp(node, name)
        return props_dict
Пример #13
0
def GetMaintainer(fname, verbose=False):
    """Run get_maintainer.pl on a file if we find it.

    We look for get_maintainer.pl in the 'scripts' directory at the top of
    git.  If we find it we'll run it.  If we don't find get_maintainer.pl
    then we fail silently.

    Args:
        fname: Path to the patch file to run get_maintainer.pl on.

    Returns:
        A list of email addresses to CC to.
    """
    get_maintainer = FindGetMaintainer()
    if not get_maintainer:
        if verbose:
            print("WARNING: Couldn't find get_maintainer.pl")
        return []

    stdout = command.Output(get_maintainer, '--norolestats', fname)
    return stdout.splitlines()
Пример #14
0
def EmailPatches(series,
                 cover_fname,
                 args,
                 dry_run,
                 raise_on_error,
                 cc_fname,
                 self_only=False,
                 alias=None,
                 in_reply_to=None):
    """Email a patch series.

    Args:
        series: Series object containing destination info
        cover_fname: filename of cover letter
        args: list of filenames of patch files
        dry_run: Just return the command that would be run
        raise_on_error: True to raise an error when an alias fails to match,
                False to just print a message.
        cc_fname: Filename of Cc file for per-commit Cc
        self_only: True to just email to yourself as a test
        in_reply_to: If set we'll pass this to git as --in-reply-to.
            Should be a message ID that this is in reply to.

    Returns:
        Git command that was/would be run

    # For the duration of this doctest pretend that we ran patman with ./patman
    >>> _old_argv0 = sys.argv[0]
    >>> sys.argv[0] = './patman'

    >>> alias = {}
    >>> alias['fred'] = ['*****@*****.**']
    >>> alias['john'] = ['*****@*****.**']
    >>> alias['mary'] = ['*****@*****.**']
    >>> alias['boys'] = ['fred', ' john']
    >>> alias['all'] = ['fred ', 'john', '   mary   ']
    >>> alias[os.getenv('USER')] = ['*****@*****.**']
    >>> series = series.Series()
    >>> series.to = ['fred']
    >>> series.cc = ['mary']
    >>> EmailPatches(series, 'cover', ['p1', 'p2'], True, True, 'cc-fname', \
            False, alias)
    'git send-email --annotate --to "*****@*****.**" --cc \
"*****@*****.**" --cc-cmd "./patman --cc-cmd cc-fname" cover p1 p2'
    >>> EmailPatches(series, None, ['p1'], True, True, 'cc-fname', False, \
            alias)
    'git send-email --annotate --to "*****@*****.**" --cc \
"*****@*****.**" --cc-cmd "./patman --cc-cmd cc-fname" p1'
    >>> series.cc = ['all']
    >>> EmailPatches(series, 'cover', ['p1', 'p2'], True, True, 'cc-fname', \
            True, alias)
    'git send-email --annotate --to "*****@*****.**" --cc-cmd "./patman \
--cc-cmd cc-fname" cover p1 p2'
    >>> EmailPatches(series, 'cover', ['p1', 'p2'], True, True, 'cc-fname', \
            False, alias)
    'git send-email --annotate --to "*****@*****.**" --cc \
"*****@*****.**" --cc "*****@*****.**" --cc \
"*****@*****.**" --cc-cmd "./patman --cc-cmd cc-fname" cover p1 p2'

    # Restore argv[0] since we clobbered it.
    >>> sys.argv[0] = _old_argv0
    """
    to = BuildEmailList(series.get('to'), '--to', alias, raise_on_error)
    if not to:
        git_config_to = command.Output('git', 'config', 'sendemail.to')
        if not git_config_to:
            print(
                "No recipient.\n"
                "Please add something like this to a commit\n"
                "Series-to: Fred Bloggs <*****@*****.**>\n"
                "Or do something like this\n"
                "git config sendemail.to [email protected]")
            return
    cc = BuildEmailList(series.get('cc'), '--cc', alias, raise_on_error)
    if self_only:
        to = BuildEmailList([os.getenv('USER')], '--to', alias, raise_on_error)
        cc = []
    cmd = ['git', 'send-email', '--annotate']
    if in_reply_to:
        cmd.append('--in-reply-to="%s"' % in_reply_to)

    cmd += to
    cmd += cc
    cmd += ['--cc-cmd', '"%s --cc-cmd %s"' % (sys.argv[0], cc_fname)]
    if cover_fname:
        cmd.append(cover_fname)
    cmd += args
    str = ' '.join(cmd)
    if not dry_run:
        os.system(str)
    return str
Пример #15
0
def CheckPatch(fname, verbose=False):
    """Run checkpatch.pl on a file.

    Returns:
        namedtuple containing:
            ok: False=failure, True=ok
            problems: List of problems, each a dict:
                'type'; error or warning
                'msg': text message
                'file' : filename
                'line': line number
            errors: Number of errors
            warnings: Number of warnings
            checks: Number of checks
            lines: Number of lines
            stdout: Full output of checkpatch
    """
    fields = [
        'ok', 'problems', 'errors', 'warnings', 'checks', 'lines', 'stdout'
    ]
    result = collections.namedtuple('CheckPatchResult', fields)
    result.ok = False
    result.errors, result.warning, result.checks = 0, 0, 0
    result.lines = 0
    result.problems = []
    chk = FindCheckPatch()
    item = {}
    result.stdout = command.Output(chk, '--no-tree', fname)
    #pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE)
    #stdout, stderr = pipe.communicate()

    # total: 0 errors, 0 warnings, 159 lines checked
    # or:
    # total: 0 errors, 2 warnings, 7 checks, 473 lines checked
    re_stats = re.compile('total: (\\d+) errors, (\d+) warnings, (\d+)')
    re_stats_full = re.compile('total: (\\d+) errors, (\d+) warnings, (\d+)'
                               ' checks, (\d+)')
    re_ok = re.compile('.*has no obvious style problems')
    re_bad = re.compile('.*has style problems, please review')
    re_error = re.compile('ERROR: (.*)')
    re_warning = re.compile('WARNING: (.*)')
    re_check = re.compile('CHECK: (.*)')
    re_file = re.compile('#\d+: FILE: ([^:]*):(\d+):')

    for line in result.stdout.splitlines():
        if verbose:
            print line

        # A blank line indicates the end of a message
        if not line and item:
            result.problems.append(item)
            item = {}
        match = re_stats_full.match(line)
        if not match:
            match = re_stats.match(line)
        if match:
            result.errors = int(match.group(1))
            result.warnings = int(match.group(2))
            if len(match.groups()) == 4:
                result.checks = int(match.group(3))
                result.lines = int(match.group(4))
            else:
                result.lines = int(match.group(3))
        elif re_ok.match(line):
            result.ok = True
        elif re_bad.match(line):
            result.ok = False
        err_match = re_error.match(line)
        warn_match = re_warning.match(line)
        file_match = re_file.match(line)
        check_match = re_check.match(line)
        if err_match:
            item['msg'] = err_match.group(1)
            item['type'] = 'error'
        elif warn_match:
            item['msg'] = warn_match.group(1)
            item['type'] = 'warning'
        elif check_match:
            item['msg'] = check_match.group(1)
            item['type'] = 'check'
        elif file_match:
            item['file'] = file_match.group(1)
            item['line'] = int(file_match.group(2))

    return result
Пример #16
0
def DoBuildman(options,
               args,
               toolchains=None,
               make_func=None,
               boards=None,
               clean_dir=False):
    """The main control code for buildman

    Args:
        options: Command line options object
        args: Command line arguments (list of strings)
        toolchains: Toolchains to use - this should be a Toolchains()
                object. If None, then it will be created and scanned
        make_func: Make function to use for the builder. This is called
                to execute 'make'. If this is None, the normal function
                will be used, which calls the 'make' tool with suitable
                arguments. This setting is useful for tests.
        board: Boards() object to use, containing a list of available
                boards. If this is None it will be created and scanned.
    """
    global builder

    if options.full_help:
        pager = os.getenv('PAGER')
        if not pager:
            pager = 'more'
        fname = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])),
                             'README')
        command.Run(pager, fname)
        return 0

    gitutil.Setup()
    col = terminal.Color()

    options.git_dir = os.path.join(options.git, '.git')

    no_toolchains = toolchains is None
    if no_toolchains:
        toolchains = toolchain.Toolchains(options.override_toolchain)

    if options.fetch_arch:
        if options.fetch_arch == 'list':
            sorted_list = toolchains.ListArchs()
            print col.Color(
                col.BLUE,
                'Available architectures: %s\n' % ' '.join(sorted_list))
            return 0
        else:
            fetch_arch = options.fetch_arch
            if fetch_arch == 'all':
                fetch_arch = ','.join(toolchains.ListArchs())
                print col.Color(col.CYAN,
                                '\nDownloading toolchains: %s' % fetch_arch)
            for arch in fetch_arch.split(','):
                print
                ret = toolchains.FetchAndInstall(arch)
                if ret:
                    return ret
            return 0

    if no_toolchains:
        toolchains.GetSettings()
        toolchains.Scan(options.list_tool_chains and options.verbose)
    if options.list_tool_chains:
        toolchains.List()
        print
        return 0

    # Work out how many commits to build. We want to build everything on the
    # branch. We also build the upstream commit as a control so we can see
    # problems introduced by the first commit on the branch.
    count = options.count
    has_range = options.branch and '..' in options.branch
    if count == -1:
        if not options.branch:
            count = 1
        else:
            if has_range:
                count, msg = gitutil.CountCommitsInRange(
                    options.git_dir, options.branch)
            else:
                count, msg = gitutil.CountCommitsInBranch(
                    options.git_dir, options.branch)
            if count is None:
                sys.exit(col.Color(col.RED, msg))
            elif count == 0:
                sys.exit(
                    col.Color(col.RED,
                              "Range '%s' has no commits" % options.branch))
            if msg:
                print col.Color(col.YELLOW, msg)
            count += 1  # Build upstream commit also

    if not count:
        str = ("No commits found to process in branch '%s': "
               "set branch's upstream or use -c flag" % options.branch)
        sys.exit(col.Color(col.RED, str))

    # Work out what subset of the boards we are building
    if not boards:
        board_file = os.path.join(options.output_dir, 'boards.cfg')
        genboardscfg = os.path.join(options.git, 'tools/genboardscfg.py')
        status = subprocess.call([genboardscfg, '-o', board_file])
        if status != 0:
            sys.exit("Failed to generate boards.cfg")

        boards = board.Boards()
        boards.ReadBoards(board_file)

    exclude = []
    if options.exclude:
        for arg in options.exclude:
            exclude += arg.split(',')

    if options.boards:
        requested_boards = []
        for b in options.boards:
            requested_boards += b.split(',')
    else:
        requested_boards = None
    why_selected, board_warnings = boards.SelectBoards(args, exclude,
                                                       requested_boards)
    selected = boards.GetSelected()
    if not len(selected):
        sys.exit(col.Color(col.RED, 'No matching boards found'))

    # Read the metadata from the commits. First look at the upstream commit,
    # then the ones in the branch. We would like to do something like
    # upstream/master~..branch but that isn't possible if upstream/master is
    # a merge commit (it will list all the commits that form part of the
    # merge)
    # Conflicting tags are not a problem for buildman, since it does not use
    # them. For example, Series-version is not useful for buildman. On the
    # other hand conflicting tags will cause an error. So allow later tags
    # to overwrite earlier ones by setting allow_overwrite=True
    if options.branch:
        if count == -1:
            if has_range:
                range_expr = options.branch
            else:
                range_expr = gitutil.GetRangeInBranch(options.git_dir,
                                                      options.branch)
            upstream_commit = gitutil.GetUpstream(options.git_dir,
                                                  options.branch)
            series = patchstream.GetMetaDataForList(upstream_commit,
                                                    options.git_dir,
                                                    1,
                                                    series=None,
                                                    allow_overwrite=True)

            series = patchstream.GetMetaDataForList(range_expr,
                                                    options.git_dir,
                                                    None,
                                                    series,
                                                    allow_overwrite=True)
        else:
            # Honour the count
            series = patchstream.GetMetaDataForList(options.branch,
                                                    options.git_dir,
                                                    count,
                                                    series=None,
                                                    allow_overwrite=True)
    else:
        series = None
        if not options.dry_run:
            options.verbose = True
            if not options.summary:
                options.show_errors = True

    # By default we have one thread per CPU. But if there are not enough jobs
    # we can have fewer threads and use a high '-j' value for make.
    if not options.threads:
        options.threads = min(multiprocessing.cpu_count(), len(selected))
    if not options.jobs:
        options.jobs = max(1,
                           (multiprocessing.cpu_count() + len(selected) - 1) /
                           len(selected))

    if not options.step:
        options.step = len(series.commits) - 1

    gnu_make = command.Output(os.path.join(options.git,
                                           'scripts/show-gnu-make'),
                              raise_on_error=False).rstrip()
    if not gnu_make:
        sys.exit('GNU Make not found')

    # Create a new builder with the selected options.
    output_dir = options.output_dir
    if options.branch:
        dirname = options.branch.replace('/', '_')
        # As a special case allow the board directory to be placed in the
        # output directory itself rather than any subdirectory.
        if not options.no_subdirs:
            output_dir = os.path.join(options.output_dir, dirname)
        if clean_dir and os.path.exists(output_dir):
            shutil.rmtree(output_dir)
    CheckOutputDir(output_dir)
    builder = Builder(toolchains,
                      output_dir,
                      options.git_dir,
                      options.threads,
                      options.jobs,
                      gnu_make=gnu_make,
                      checkout=True,
                      show_unknown=options.show_unknown,
                      step=options.step,
                      no_subdirs=options.no_subdirs,
                      full_path=options.full_path,
                      verbose_build=options.verbose_build,
                      incremental=options.incremental,
                      per_board_out_dir=options.per_board_out_dir,
                      config_only=options.config_only,
                      squash_config_y=not options.preserve_config_y,
                      warnings_as_errors=options.warnings_as_errors)
    builder.force_config_on_failure = not options.quick
    if make_func:
        builder.do_make = make_func

    # For a dry run, just show our actions as a sanity check
    if options.dry_run:
        ShowActions(series, why_selected, selected, builder, options,
                    board_warnings)
    else:
        builder.force_build = options.force_build
        builder.force_build_failures = options.force_build_failures
        builder.force_reconfig = options.force_reconfig
        builder.in_tree = options.in_tree

        # Work out which boards to build
        board_selected = boards.GetSelectedDict()

        if series:
            commits = series.commits
            # Number the commits for test purposes
            for commit in range(len(commits)):
                commits[commit].sequence = commit
        else:
            commits = None

        Print(
            GetActionSummary(options.summary, commits, board_selected,
                             options))

        # We can't show function sizes without board details at present
        if options.show_bloat:
            options.show_detail = True
        builder.SetDisplayOptions(options.show_errors, options.show_sizes,
                                  options.show_detail, options.show_bloat,
                                  options.list_error_boards,
                                  options.show_config,
                                  options.show_environment)
        if options.summary:
            builder.ShowSummary(commits, board_selected)
        else:
            fail, warned = builder.BuildBoards(commits, board_selected,
                                               options.keep_outputs,
                                               options.verbose)
            if fail:
                return 128
            elif warned:
                return 129
    return 0
def DoBuildman(options, args):
    """The main control code for buildman

    Args:
        options: Command line options object
        args: Command line arguments (list of strings)
    """
    gitutil.Setup()

    bsettings.Setup(options.config_file)
    options.git_dir = os.path.join(options.git, '.git')

    toolchains = toolchain.Toolchains()
    toolchains.Scan(options.list_tool_chains)
    if options.list_tool_chains:
        toolchains.List()
        print
        return

    # Work out how many commits to build. We want to build everything on the
    # branch. We also build the upstream commit as a control so we can see
    # problems introduced by the first commit on the branch.
    col = terminal.Color()
    count = options.count
    if count == -1:
        if not options.branch:
            count = 1
        else:
            count = gitutil.CountCommitsInBranch(options.git_dir,
                                                 options.branch)
            if count is None:
                str = ("Branch '%s' not found or has no upstream" %
                       options.branch)
                sys.exit(col.Color(col.RED, str))
            count += 1  # Build upstream commit also

    if not count:
        str = ("No commits found to process in branch '%s': "
               "set branch's upstream or use -c flag" % options.branch)
        sys.exit(col.Color(col.RED, str))

    # Work out what subset of the boards we are building
    board_file = os.path.join(options.git, 'boards.cfg')
    status = subprocess.call(
        [os.path.join(options.git, 'tools/genboardscfg.py')])
    if status != 0:
        sys.exit("Failed to generate boards.cfg")

    boards = board.Boards()
    boards.ReadBoards(os.path.join(options.git, 'boards.cfg'))
    why_selected = boards.SelectBoards(args)
    selected = boards.GetSelected()
    if not len(selected):
        sys.exit(col.Color(col.RED, 'No matching boards found'))

    # Read the metadata from the commits. First look at the upstream commit,
    # then the ones in the branch. We would like to do something like
    # upstream/master~..branch but that isn't possible if upstream/master is
    # a merge commit (it will list all the commits that form part of the
    # merge)
    if options.branch:
        if count == -1:
            range_expr = gitutil.GetRangeInBranch(options.git_dir,
                                                  options.branch)
            upstream_commit = gitutil.GetUpstream(options.git_dir,
                                                  options.branch)
            series = patchstream.GetMetaDataForList(upstream_commit,
                                                    options.git_dir, 1)

            # Conflicting tags are not a problem for buildman, since it does
            # not use them. For example, Series-version is not useful for
            # buildman. On the other hand conflicting tags will cause an
            # error. So allow later tags to overwrite earlier ones.
            series.allow_overwrite = True
            series = patchstream.GetMetaDataForList(range_expr,
                                                    options.git_dir, None,
                                                    series)
        else:
            # Honour the count
            series = patchstream.GetMetaDataForList(options.branch,
                                                    options.git_dir, count)
    else:
        series = None
        options.verbose = True
        options.show_errors = True

    # By default we have one thread per CPU. But if there are not enough jobs
    # we can have fewer threads and use a high '-j' value for make.
    if not options.threads:
        options.threads = min(multiprocessing.cpu_count(), len(selected))
    if not options.jobs:
        options.jobs = max(1,
                           (multiprocessing.cpu_count() + len(selected) - 1) /
                           len(selected))

    if not options.step:
        options.step = len(series.commits) - 1

    gnu_make = command.Output(
        os.path.join(options.git, 'scripts/show-gnu-make')).rstrip()
    if not gnu_make:
        sys.exit('GNU Make not found')

    # Create a new builder with the selected options
    if options.branch:
        dirname = options.branch
    else:
        dirname = 'current'
    output_dir = os.path.join(options.output_dir, dirname)
    builder = Builder(toolchains,
                      output_dir,
                      options.git_dir,
                      options.threads,
                      options.jobs,
                      gnu_make=gnu_make,
                      checkout=True,
                      show_unknown=options.show_unknown,
                      step=options.step)
    builder.force_config_on_failure = not options.quick

    # For a dry run, just show our actions as a sanity check
    if options.dry_run:
        ShowActions(series, why_selected, selected, builder, options)
    else:
        builder.force_build = options.force_build
        builder.force_build_failures = options.force_build_failures
        builder.force_reconfig = options.force_reconfig
        builder.in_tree = options.in_tree

        # Work out which boards to build
        board_selected = boards.GetSelectedDict()

        if series:
            commits = series.commits
        else:
            commits = None

        print GetActionSummary(options.summary, commits, board_selected,
                               options)

        builder.SetDisplayOptions(options.show_errors, options.show_sizes,
                                  options.show_detail, options.show_bloat)
        if options.summary:
            # We can't show function sizes without board details at present
            if options.show_bloat:
                options.show_detail = True
            builder.ShowSummary(commits, board_selected)
        else:
            builder.BuildBoards(commits, board_selected, options.keep_outputs,
                                options.verbose)
Пример #18
0
def run_fit_test(mkimage, u_boot):
    """Basic sanity check of FIT loading in U-Boot

    TODO: Almost everything:
       - hash algorithms - invalid hash/contents should be detected
       - signature algorithms - invalid sig/contents should be detected
       - compression
       - checking that errors are detected like:
            - image overwriting
            - missing images
            - invalid configurations
            - incorrect os/arch/type fields
            - empty data
            - images too large/small
            - invalid FDT (e.g. putting a random binary in instead)
       - default configuration selection
       - bootm command line parameters should have desired effect
       - run code coverage to make sure we are testing all the code
    """
    global test_name

    # Set up invariant files
    control_dtb = make_dtb()
    kernel = make_kernel('test-kernel.bin', 'kernel')
    ramdisk = make_ramdisk('test-ramdisk.bin', 'ramdisk')
    loadables1 = make_kernel('test-loadables1.bin', 'lenrek')
    loadables2 = make_ramdisk('test-loadables2.bin', 'ksidmar')
    kernel_out = make_fname('kernel-out.bin')
    fdt_out = make_fname('fdt-out.dtb')
    ramdisk_out = make_fname('ramdisk-out.bin')
    loadables1_out = make_fname('loadables1-out.bin')
    loadables2_out = make_fname('loadables2-out.bin')

    # Set up basic parameters with default values
    params = {
        'fit_addr': 0x1000,
        'kernel': kernel,
        'kernel_out': kernel_out,
        'kernel_addr': 0x40000,
        'kernel_size': filesize(kernel),
        'fdt_out': fdt_out,
        'fdt_addr': 0x80000,
        'fdt_size': filesize(control_dtb),
        'fdt_load': '',
        'ramdisk': ramdisk,
        'ramdisk_out': ramdisk_out,
        'ramdisk_addr': 0xc0000,
        'ramdisk_size': filesize(ramdisk),
        'ramdisk_load': '',
        'ramdisk_config': '',
        'loadables1': loadables1,
        'loadables1_out': loadables1_out,
        'loadables1_addr': 0x100000,
        'loadables1_size': filesize(loadables1),
        'loadables1_load': '',
        'loadables2': loadables2,
        'loadables2_out': loadables2_out,
        'loadables2_addr': 0x140000,
        'loadables2_size': filesize(loadables2),
        'loadables2_load': '',
        'loadables_config': '',
    }

    # Make a basic FIT and a script to load it
    fit = make_fit(mkimage, params)
    params['fit'] = fit
    cmd = base_script % params

    # First check that we can load a kernel
    # We could perhaps reduce duplication with some loss of readability
    set_test('Kernel load')
    stdout = command.Output(u_boot, '-d', control_dtb, '-c', cmd)
    debug_stdout(stdout)
    if read_file(kernel) != read_file(kernel_out):
        fail('Kernel not loaded', stdout)
    if read_file(control_dtb) == read_file(fdt_out):
        fail('FDT loaded but should be ignored', stdout)
    if read_file(ramdisk) == read_file(ramdisk_out):
        fail('Ramdisk loaded but should not be', stdout)

    # Find out the offset in the FIT where U-Boot has found the FDT
    line = find_matching(stdout, 'Booting using the fdt blob at ')
    fit_offset = int(line, 16) - params['fit_addr']
    fdt_magic = struct.pack('>L', 0xd00dfeed)
    data = read_file(fit)

    # Now find where it actually is in the FIT (skip the first word)
    real_fit_offset = data.find(fdt_magic, 4)
    if fit_offset != real_fit_offset:
        fail(
            'U-Boot loaded FDT from offset %#x, FDT is actually at %#x' %
            (fit_offset, real_fit_offset), stdout)

    # Now a kernel and an FDT
    set_test('Kernel + FDT load')
    params['fdt_load'] = 'load = <%#x>;' % params['fdt_addr']
    fit = make_fit(mkimage, params)
    stdout = command.Output(u_boot, '-d', control_dtb, '-c', cmd)
    debug_stdout(stdout)
    if read_file(kernel) != read_file(kernel_out):
        fail('Kernel not loaded', stdout)
    if read_file(control_dtb) != read_file(fdt_out):
        fail('FDT not loaded', stdout)
    if read_file(ramdisk) == read_file(ramdisk_out):
        fail('Ramdisk loaded but should not be', stdout)

    # Try a ramdisk
    set_test('Kernel + FDT + Ramdisk load')
    params['ramdisk_config'] = 'ramdisk = "ramdisk@1";'
    params['ramdisk_load'] = 'load = <%#x>;' % params['ramdisk_addr']
    fit = make_fit(mkimage, params)
    stdout = command.Output(u_boot, '-d', control_dtb, '-c', cmd)
    debug_stdout(stdout)
    if read_file(ramdisk) != read_file(ramdisk_out):
        fail('Ramdisk not loaded', stdout)

    # Configuration with some Loadables
    set_test('Kernel + FDT + Ramdisk load + Loadables')
    params['loadables_config'] = 'loadables = "kernel@2", "ramdisk@2";'
    params['loadables1_load'] = 'load = <%#x>;' % params['loadables1_addr']
    params['loadables2_load'] = 'load = <%#x>;' % params['loadables2_addr']
    fit = make_fit(mkimage, params)
    stdout = command.Output(u_boot, '-d', control_dtb, '-c', cmd)
    debug_stdout(stdout)
    if read_file(loadables1) != read_file(loadables1_out):
        fail('Loadables1 (kernel) not loaded', stdout)
    if read_file(loadables2) != read_file(loadables2_out):
        fail('Loadables2 (ramdisk) not loaded', stdout)
Пример #19
0
def MakeElf(elf_fname, text, data):
    """Make an elf file with the given data in a single section

    The output file has a several section including '.text' and '.data',
    containing the info provided in arguments.

    Args:
        elf_fname: Output filename
        text: Text (code) to put in the file's .text section
        data: Data to put in the file's .data section
    """
    outdir = tempfile.mkdtemp(prefix='binman.elf.')
    s_file = os.path.join(outdir, 'elf.S')

    # Spilt the text into two parts so that we can make the entry point two
    # bytes after the start of the text section
    text_bytes1 = ['\t.byte\t%#x' % tools.ToByte(byte) for byte in text[:2]]
    text_bytes2 = ['\t.byte\t%#x' % tools.ToByte(byte) for byte in text[2:]]
    data_bytes = ['\t.byte\t%#x' % tools.ToByte(byte) for byte in data]
    with open(s_file, 'w') as fd:
        print(
            '''/* Auto-generated C program to produce an ELF file for testing */

.section .text
.code32
.globl _start
.type _start, @function
%s
_start:
%s
.ident "comment"

.comm fred,8,4

.section .empty
.globl _empty
_empty:
.byte 1

.globl ernie
.data
.type ernie, @object
.size ernie, 4
ernie:
%s
''' % ('\n'.join(text_bytes1), '\n'.join(text_bytes2), '\n'.join(data_bytes)),
            file=fd)
    lds_file = os.path.join(outdir, 'elf.lds')

    # Use a linker script to set the alignment and text address.
    with open(lds_file, 'w') as fd:
        print(
            '''/* Auto-generated linker script to produce an ELF file for testing */

PHDRS
{
    text PT_LOAD ;
    data PT_LOAD ;
    empty PT_LOAD FLAGS ( 6 ) ;
    note PT_NOTE ;
}

SECTIONS
{
    . = 0xfef20000;
    ENTRY(_start)
    .text . : SUBALIGN(0)
    {
        *(.text)
    } :text
    .data : {
        *(.data)
    } :data
    _bss_start = .;
    .empty : {
        *(.empty)
    } :empty
    .note : {
        *(.comment)
    } :note
    .bss _bss_start  (OVERLAY) : {
        *(.bss)
    }
}
''',
            file=fd)
    # -static: Avoid requiring any shared libraries
    # -nostdlib: Don't link with C library
    # -Wl,--build-id=none: Don't generate a build ID, so that we just get the
    #   text section at the start
    # -m32: Build for 32-bit x86
    # -T...: Specifies the link script, which sets the start address
    stdout = command.Output('cc', '-static', '-nostdlib',
                            '-Wl,--build-id=none', '-m32', '-T', lds_file,
                            '-o', elf_fname, s_file)
    shutil.rmtree(outdir)
Пример #20
0
def CheckPatch(fname, verbose=False):
    """Run checkpatch.pl on a file.

    Returns:
        4-tuple containing:
            result: False=failure, True=ok
            problems: List of problems, each a dict:
                'type'; error or warning
                'msg': text message
                'file' : filename
                'line': line number
            lines: Number of lines
    """
    result = False
    error_count, warning_count, lines = 0, 0, 0
    problems = []
    chk = FindCheckPatch()
    if not chk:
        raise OSError, ('Cannot find checkpatch.pl - please put it in your ' +
                        '~/bin directory')
    item = {}
    stdout = command.Output(chk, '--no-tree', fname)
    #pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE)
    #stdout, stderr = pipe.communicate()

    # total: 0 errors, 0 warnings, 159 lines checked
    re_stats = re.compile('total: (\\d+) errors, (\d+) warnings, (\d+)')
    re_ok = re.compile('.*has no obvious style problems')
    re_bad = re.compile('.*has style problems, please review')
    re_error = re.compile('ERROR: (.*)')
    re_warning = re.compile('WARNING: (.*)')
    re_file = re.compile('#\d+: FILE: ([^:]*):(\d+):')

    for line in stdout.splitlines():
        if verbose:
            print line

        # A blank line indicates the end of a message
        if not line and item:
            problems.append(item)
            item = {}
        match = re_stats.match(line)
        if match:
            error_count = int(match.group(1))
            warning_count = int(match.group(2))
            lines = int(match.group(3))
        elif re_ok.match(line):
            result = True
        elif re_bad.match(line):
            result = False
        match = re_error.match(line)
        if match:
            item['msg'] = match.group(1)
            item['type'] = 'error'
        match = re_warning.match(line)
        if match:
            item['msg'] = match.group(1)
            item['type'] = 'warning'
        match = re_file.match(line)
        if match:
            item['file'] = match.group(1)
            item['line'] = int(match.group(2))

    return result, problems, error_count, warning_count, lines, stdout