Пример #1
0
    def test_topological_order_packages(self):
        def create_mock(name, build_depends, run_depends, path):
            m = Mock()
            m.name = name
            m.build_depends = build_depends
            m.buildtool_depends = []
            m.run_depends = run_depends
            m.exports = []
            m.path = path
            return m

        mc = create_mock('c', [], [], 'pc')
        md = create_mock('d', [], [], 'pd')
        ma = create_mock('a', [mc], [md], 'pa')
        mb = create_mock('b', [ma], [], 'pb')

        packages = {ma.path: ma,
                    mb.path: mb,
                    mc.path: mc,
                    md.path: md}

        ordered_packages = topological_order_packages(packages, blacklisted=['c'])
        # d before b because of the run dependency from a to d
        # a before d only because of alphabetic order, a run depend on d should not influence the order
        self.assertEqual(['pa', 'pd', 'pb'], [path for path, _ in ordered_packages])

        ordered_packages = topological_order_packages(packages, whitelisted=['a', 'b', 'c'])
        # c before a because of the run dependency from a to c
        self.assertEqual(['pc', 'pa', 'pb'], [path for path, _ in ordered_packages])
Пример #2
0
    def test_topological_order_packages(self):
        def create_mock(name, build_depends, run_depends, path):
            m = Mock()
            m.name = name
            m.build_depends = build_depends
            m.buildtool_depends = []
            m.run_depends = run_depends
            m.test_depends = []
            m.exports = []
            m.path = path
            return m

        mc = create_mock("c", [], [], "pc")
        md = create_mock("d", [], [], "pd")
        ma = create_mock("a", [mc], [md], "pa")
        mb = create_mock("b", [ma], [], "pb")

        packages = {ma.path: ma, mb.path: mb, mc.path: mc, md.path: md}

        ordered_packages = topological_order_packages(packages, blacklisted=["c"])
        # d before b because of the run dependency from a to d
        # a before d only because of alphabetic order, a run depend on d should not influence the order
        self.assertEqual(["pa", "pd", "pb"], [path for path, _ in ordered_packages])

        ordered_packages = topological_order_packages(packages, whitelisted=["a", "b", "c"])
        # c before a because of the run dependency from a to c
        self.assertEqual(["pc", "pa", "pb"], [path for path, _ in ordered_packages])
Пример #3
0
    def test_topological_order_packages(self):
        def create_mock(name, build_depends, run_depends, path):
            m = Mock()
            m.name = name
            m.build_depends = build_depends
            m.buildtool_depends = []
            m.run_depends = run_depends
            m.exports = []
            m.path = path
            return m

        mc = create_mock('c', [], [], 'pc')
        md = create_mock('d', [], [], 'pd')
        ma = create_mock('a', [mc], [md], 'pa')
        mb = create_mock('b', [ma], [], 'pb')

        packages = {ma.path: ma,
                    mb.path: mb,
                    mc.path: mc,
                    md.path: md}

        ordered_packages = topological_order_packages(packages, blacklisted=['c'])
        # d before b because of the run dependency from a to d
        # a before d only because of alphabetic order, a run depend on d should not influence the order
        self.assertEqual(['pa', 'pd', 'pb'], [path for path, _ in ordered_packages])

        ordered_packages = topological_order_packages(packages, whitelisted=['a', 'b', 'c'])
        # c before a because of the run dependency from a to c
        self.assertEqual(['pc', 'pa', 'pb'], [path for path, _ in ordered_packages])
Пример #4
0
 def test_topological_order_packages_with_duplicates(self):
     pkg1 = create_mock('pkg', [], [], 'path/to/pkg1')
     pkg2_dep = create_mock('pkg_dep', [], [], 'path/to/pkg2_dep')
     pkg2 = create_mock('pkg', [pkg2_dep], [], 'path/to/pkg2')
     with self.assertRaisesRegexp(RuntimeError, 'Two packages with the same name "pkg" in the workspace'):
         topological_order_packages({
             pkg1.path: pkg1,
             pkg2_dep.path: pkg2_dep,
             pkg2.path: pkg2,
         })
Пример #5
0
def dry_run(context, packages, no_deps, start_with):
    # Print Summary
    log(context.summary())
    # Get all the packages in the context source space
    # Suppress warnings since this is a utility function
    workspace_packages = find_packages(context.source_space_abs, exclude_subspaces=True, warnings=[])
    # Find list of packages in the workspace
    packages_to_be_built, packages_to_be_built_deps, all_packages = determine_packages_to_be_built(
        packages, context, workspace_packages)
    # Assert start_with package is in the workspace
    verify_start_with_option(start_with, packages, all_packages, packages_to_be_built + packages_to_be_built_deps)
    if not no_deps:
        # Extend packages to be built to include their deps
        packages_to_be_built.extend(packages_to_be_built_deps)
        # Also resort
        packages_to_be_built = topological_order_packages(dict(packages_to_be_built))
    # Print packages
    log("Packages to be built:")
    max_name_len = str(max([len(pkg.name) for pth, pkg in packages_to_be_built]))
    prefix = clr('@{pf}' + ('------ ' if start_with else '- ') + '@|')
    for pkg_path, pkg in packages_to_be_built:
        build_type = get_build_type(pkg)
        if build_type == 'catkin' and 'metapackage' in [e.tagname for e in pkg.exports]:
            build_type = 'metapackage'
        if start_with and pkg.name == start_with:
            start_with = None
        log(clr("{prefix}@{cf}{name:<" + max_name_len + "}@| (@{yf}{build_type}@|)")
            .format(prefix=clr('@!@{kf}(skip)@| ') if start_with else prefix, name=pkg.name, build_type=build_type))
    log("Total packages: " + str(len(packages_to_be_built)))
Пример #6
0
def dry_run(context, packages, no_deps, start_with):
    # Print Summary
    log(context.summary())
    # Get all the packages in the context source space
    # Suppress warnings since this is a utility function
    workspace_packages = find_packages(context.source_space_abs, exclude_subspaces=True, warnings=[])
    # Find list of packages in the workspace
    packages_to_be_built, packages_to_be_built_deps, all_packages = determine_packages_to_be_built(
        packages, context, workspace_packages)
    # Assert start_with package is in the workspace
    verify_start_with_option(start_with, packages, all_packages, packages_to_be_built + packages_to_be_built_deps)
    if not no_deps:
        # Extend packages to be built to include their deps
        packages_to_be_built.extend(packages_to_be_built_deps)
        # Also resort
        packages_to_be_built = topological_order_packages(dict(packages_to_be_built))
    # Print packages
    log("Packages to be built:")
    max_name_len = str(max([len(pkg.name) for pth, pkg in packages_to_be_built]))
    prefix = clr('@{pf}' + ('------ ' if start_with else '- ') + '@|')
    for pkg_path, pkg in packages_to_be_built:
        build_type = get_build_type(pkg)
        if build_type == 'catkin' and 'metapackage' in [e.tagname for e in pkg.exports]:
            build_type = 'metapackage'
        if start_with and pkg.name == start_with:
            start_with = None
        log(clr("{prefix}@{cf}{name:<" + max_name_len + "}@| (@{yf}{build_type}@|)")
            .format(prefix=clr('@!@{kf}(skip)@| ') if start_with else prefix, name=pkg.name, build_type=build_type))
    log("Total packages: " + str(len(packages_to_be_built)))
Пример #7
0
def determine_packages_to_be_cleaned(context, include_dependents, packages):
    """Returns list of packages which should be cleaned, and those packages' deps.

    :param context: Workspace context
    :type context: :py:class:`catkin_tools.verbs.catkin_build.context.Context`
    :param packages: list of package names to be cleaned
    :type packages: list
    :returns: full list of package names to be cleaned
    :rtype: list
    """

    # Get all the cached packages in the context source space
    workspace_packages = find_packages(context.package_metadata_path(),
                                       exclude_subspaces=True,
                                       warnings=[])
    # Order the packages by topology
    ordered_packages = topological_order_packages(workspace_packages)

    # Create a dict of all packages in the workspace by name
    workspace_packages_by_name = dict([(pkg.name, (path, pkg))
                                       for path, pkg in ordered_packages])

    # Initialize empty output
    packages_to_be_cleaned = set()

    # Expand glob patterns in packages
    expanded_packages = []
    for package_name in packages:
        expanded_packages.extend(
            expand_glob_package(package_name, workspace_packages_by_name))
    packages = expanded_packages

    # Expand metapackages into their constituents
    for package_name in packages:
        # This is ok if it's orphaned
        if package_name not in workspace_packages_by_name:
            packages_to_be_cleaned.add(package_name)
        else:
            # Get the package object
            package = workspace_packages_by_name[package_name][1]
            # If metapackage, include run depends which are in the workspace
            if 'metapackage' in [e.tagname for e in package.exports]:
                for rdep in package.run_depends:
                    if rdep.name in workspace_packages_by_name:
                        packages_to_be_cleaned.add(rdep.name)
            else:
                packages_to_be_cleaned.add(package_name)

    # Determine the packages that depend on the given packages
    if include_dependents:
        for package_name in list(packages_to_be_cleaned):
            # Get the packages that depend on the packages to be cleaned
            dependents = get_recursive_build_dependents_in_workspace(
                package_name, ordered_packages)
            packages_to_be_cleaned.update([pkg.name for _, pkg in dependents])

    return [
        workspace_packages_by_name[n] for n in packages_to_be_cleaned
        if n in workspace_packages_by_name
    ]
Пример #8
0
    def test_topological_order_packages(self):
        mc = create_mock('c', [], [], 'pc')
        md = create_mock('d', [], [], 'pd')
        ma = create_mock('a', [mc], [md], 'pa')
        mb = create_mock('b', [ma], [], 'pb')

        packages = {ma.path: ma,
                    mb.path: mb,
                    mc.path: mc,
                    md.path: md}

        ordered_packages = topological_order_packages(packages, blacklisted=['c'])
        # d before b because of the run dependency from a to d
        # a before d only because of alphabetic order, a run depend on d should not influence the order
        self.assertEqual(['pa', 'pd', 'pb'], [path for path, _ in ordered_packages])

        ordered_packages = topological_order_packages(packages, whitelisted=['a', 'b', 'c'])
        # c before a because of the run dependency from a to c
        self.assertEqual(['pc', 'pa', 'pb'], [path for path, _ in ordered_packages])
Пример #9
0
    def test_topological_order_packages(self):
        mc = create_mock('c', [], [], 'pc')
        md = create_mock('d', [], [], 'pd')
        ma = create_mock('a', [mc], [md], 'pa')
        mb = create_mock('b', [ma], [], 'pb')

        packages = {ma.path: ma,
                    mb.path: mb,
                    mc.path: mc,
                    md.path: md}

        ordered_packages = topological_order_packages(packages, blacklisted=['c'])
        # d before b because of the run dependency from a to d
        # a before d only because of alphabetic order, a run depend on d should not influence the order
        self.assertEqual(['pa', 'pd', 'pb'], [path for path, _ in ordered_packages])

        ordered_packages = topological_order_packages(packages, whitelisted=['a', 'b', 'c'])
        # c before a because of the run dependency from a to c
        self.assertEqual(['pc', 'pa', 'pb'], [path for path, _ in ordered_packages])
Пример #10
0
def determine_packages_to_be_built(packages, context):
    """Returns list of packages which should be built, and those package's deps.

    :param packages: list of packages to be built, if None all packages are built
    :type packages: list
    :param context: Workspace context
    :type context: :py:class:`catkin_tools.verbs.catkin_build.context.Context`
    :returns: tuple of packages to be built and those package's deps
    :rtype: tuple
    """
    start = time.time()
    workspace_packages = find_packages(context.source_space_abs,
                                       exclude_subspaces=True)
    # If there are no packages raise
    if not workspace_packages:
        sys.exit("No packages were found in the source space '{0}'".format(
            context.source_space_abs))
    log("Found '{0}' packages in {1}.".format(
        len(workspace_packages), format_time_delta(time.time() - start)))

    # Order the packages by topology
    ordered_packages = topological_order_packages(workspace_packages)
    # Set the packages in the workspace for the context
    context.packages = ordered_packages
    # Determin the packages which should be built
    packages_to_be_built = []
    packages_to_be_built_deps = []
    if packages:
        # First assert all of the packages given are in the workspace
        workspace_package_names = dict([(pkg.name, (path, pkg))
                                        for path, pkg in ordered_packages])
        for package in packages:
            if package not in workspace_package_names:
                sys.exit("Given package '{0}' is not in the workspace".format(
                    package))
            # If metapackage, include run depends which are in the workspace
            package_obj = workspace_package_names[package][1]
            if 'metapackage' in [e.tagname for e in package_obj.exports]:
                for rdep in package_obj.run_depends:
                    if rdep.name in workspace_package_names:
                        packages.append(rdep.name)
        # Limit the packages to be built to just the provided packages
        for pkg_path, package in ordered_packages:
            if package.name in packages:
                packages_to_be_built.append((pkg_path, package))
                # Get the recursive dependencies for each of these packages
                pkg_deps = get_cached_recursive_build_depends_in_workspace(
                    package, ordered_packages)
                packages_to_be_built_deps.extend(pkg_deps)
    else:
        packages_to_be_built = ordered_packages
    return packages_to_be_built, packages_to_be_built_deps, ordered_packages
Пример #11
0
def determine_packages_to_be_built(packages, context):
    """Returns list of packages which should be built, and those package's deps.

    :param packages: list of packages to be built, if None all packages are built
    :type packages: list
    :param context: Workspace context
    :type context: :py:class:`catkin_tools.verbs.catkin_build.context.Context`
    :returns: tuple of packages to be built and those package's deps
    :rtype: tuple
    """
    start = time.time()
    workspace_packages = find_packages(context.source_space_abs, exclude_subspaces=True)
    # If there are no packages raise
    if not workspace_packages:
        sys.exit("No packages were found in the source space '{0}'".format(context.source_space_abs))
    log("Found '{0}' packages in {1}."
        .format(len(workspace_packages), format_time_delta(time.time() - start)))

    # Order the packages by topology
    ordered_packages = topological_order_packages(workspace_packages)
    # Set the packages in the workspace for the context
    context.packages = ordered_packages
    # Determin the packages which should be built
    packages_to_be_built = []
    packages_to_be_built_deps = []
    if packages:
        # First assert all of the packages given are in the workspace
        workspace_package_names = dict([(pkg.name, (path, pkg)) for path, pkg in ordered_packages])
        for package in packages:
            if package not in workspace_package_names:
                sys.exit("Given package '{0}' is not in the workspace".format(package))
            # If metapackage, include run depends which are in the workspace
            package_obj = workspace_package_names[package][1]
            if 'metapackage' in [e.tagname for e in package_obj.exports]:
                for rdep in package_obj.run_depends:
                    if rdep.name in workspace_package_names:
                        packages.append(rdep.name)
        # Limit the packages to be built to just the provided packages
        for pkg_path, package in ordered_packages:
            if package.name in packages:
                packages_to_be_built.append((pkg_path, package))
                # Get the recursive dependencies for each of these packages
                pkg_deps = get_cached_recursive_build_depends_in_workspace(package, ordered_packages)
                packages_to_be_built_deps.extend(pkg_deps)
    else:
        packages_to_be_built = ordered_packages
    return packages_to_be_built, packages_to_be_built_deps, ordered_packages
Пример #12
0
def determine_packages_to_be_cleaned(context, include_dependents, packages):
    """Returns list of packages which should be cleaned, and those packages' deps.

    :param context: Workspace context
    :type context: :py:class:`catkin_tools.verbs.catkin_build.context.Context`
    :param packages: list of package names to be cleaned
    :type packages: list
    :returns: full list of package names to be cleaned
    :rtype: list
    """

    # Get all the cached packages in the context source space
    workspace_packages = find_packages(context.package_metadata_path(), exclude_subspaces=True, warnings=[])
    # Order the packages by topology
    ordered_packages = topological_order_packages(workspace_packages)

    # Create a dict of all packages in the workspace by name
    workspace_packages_by_name = dict([(pkg.name, (path, pkg)) for path, pkg in ordered_packages])

    # Initialize empty output
    packages_to_be_cleaned = set()

    # Expand metapackages into their constituents
    for package_name in packages:
        # This is ok if it's orphaned
        if package_name not in workspace_packages_by_name:
            packages_to_be_cleaned.add(package_name)
        else:
            # Get the package object
            package = workspace_packages_by_name[package_name][1]
            # If metapackage, include run depends which are in the workspace
            if 'metapackage' in [e.tagname for e in package.exports]:
                for rdep in package.run_depends:
                    if rdep.name in workspace_packages_by_name:
                        packages_to_be_cleaned.add(rdep.name)
            else:
                packages_to_be_cleaned.add(package_name)

    # Determine the packages that depend on the given packages
    if include_dependents:
        for package_name in list(packages_to_be_cleaned):
            # Get the packages that depend on the packages to be cleaned
            dependents = get_recursive_build_dependents_in_workspace(package_name, ordered_packages)
            packages_to_be_cleaned.update([pkg.name for _, pkg in dependents])

    return [workspace_packages_by_name[n] for n in packages_to_be_cleaned if n in workspace_packages_by_name]
Пример #13
0
def package_depends(pkg):  # pkg is string
    global pkg_map
    if pkg_map is None:
        pkg_map = get_pkg_map()
    depends = {}
    depends_impl = package_depends_impl(pkg)
    for d in depends_impl:
        try:
            pkg_obj = pkg_map[d]
            p_path = os.path.dirname(pkg_obj.filename)
            if (os.path.exists(os.path.join(p_path, "msg")) or
                    os.path.exists(os.path.join(p_path, "srv"))):
                depends[d] = pkg_obj
        except Exception as e:
            print(terminal_color.fmt(
                '@{yellow}[WARNING] path to %s is not found') % (pkg))
            print(e)
    return [p.name for n,p in topological_order.topological_order_packages(depends)]
Пример #14
0
    def test_topological_order_packages_cycles(self):
        def create_mock(name, build_depends, path):
            m = Mock()
            m.name = name
            m.build_depends = build_depends
            m.buildtool_depends = []
            m.test_depends = []
            m.run_depends = []
            m.exports = []
            m.path = path
            return m

        mc = create_mock("c", [], "pc")
        mb = create_mock("b", [mc], "pb")
        ma = create_mock("a", [mb], "pa")
        mc.build_depends = [ma]

        packages = {ma.path: ma, mb.math: mb, mc.path: mc}

        ordered_packages = topological_order_packages(packages)
        self.assertEqual([(None, "a, b, c")], ordered_packages)
Пример #15
0
    def test_topological_order_packages_cycles(self):
        def create_mock(name, build_depends, path):
            m = Mock()
            m.name = name
            m.build_depends = build_depends
            m.buildtool_depends = []
            m.test_depends = []
            m.run_depends = []
            m.exports = []
            m.path = path
            return m

        mc = create_mock('c', [], 'pc')
        mb = create_mock('b', [mc], 'pb')
        ma = create_mock('a', [mb], 'pa')
        mc.build_depends = [ma]

        packages = {ma.path: ma, mb.math: mb, mc.path: mc}

        ordered_packages = topological_order_packages(packages)
        self.assertEqual([(None, 'a, b, c')], ordered_packages)
Пример #16
0
    def test_topological_order_packages_with_underlay(self):
        def create_mock(name, build_depends, path):
            m = Mock()
            m.name = name
            m.build_depends = build_depends
            m.buildtool_depends = []
            m.run_depends = []
            m.exports = []
            m.path = path
            return m

        mc = create_mock('c', [], 'pc')
        mb = create_mock('b', [mc], 'pb')
        ma = create_mock('a', [mb], 'pa')

        packages = {ma.path: ma,
                    mc.path: mc}
        underlay_packages = {mb.path: mb}

        ordered_packages = topological_order_packages(packages, underlay_packages=underlay_packages)
        # c before a because of the indirect dependency via b which is part of an underlay
        self.assertEqual(['pc', 'pa'], [path for path, _ in ordered_packages])
Пример #17
0
    def test_topological_order_packages_cycles(self):
        def create_mock(name, build_depends, path):
            m = Mock()
            m.name = name
            m.build_depends = build_depends
            m.buildtool_depends = []
            m.test_depends = []
            m.run_depends = []
            m.group_depends = []
            m.exports = []
            m.path = path
            return m

        mc = create_mock('c', [], 'pc')
        mb = create_mock('b', [mc], 'pb')
        ma = create_mock('a', [mb], 'pa')
        mc.build_depends = [ma]

        packages = {ma.path: ma,
                    mb.math: mb,
                    mc.path: mc}

        ordered_packages = topological_order_packages(packages)
        self.assertEqual([(None, 'a, b, c')], ordered_packages)
Пример #18
0
def build_workspace_isolated(
    workspace='.',
    sourcespace=None,
    buildspace=None,
    develspace=None,
    installspace=None,
    merge=False,
    install=False,
    force_cmake=False,
    colorize=True,
    build_packages=None,
    quiet=False,
    cmake_args=None,
    make_args=None,
    catkin_make_args=None,
    continue_from_pkg=False,
    only_pkg_with_deps=None,
    destdir=None,
    use_ninja=False,
    use_nmake=False,
    override_build_tool_check=False
):
    '''
    Runs ``cmake``, ``make`` and optionally ``make install`` for all
    catkin packages in sourcespace_dir.  It creates several folders
    in the current working directory. For non-catkin packages it runs
    ``cmake``, ``make`` and ``make install`` for each, installing it to
    the devel space or install space if the ``install`` option is specified.

    :param workspace: path to the current workspace, ``str``
    :param sourcespace: workspace folder containing catkin packages, ``str``
    :param buildspace: path to build space location, ``str``
    :param develspace: path to devel space location, ``str``
    :param installspace: path to install space (CMAKE_INSTALL_PREFIX), ``str``
    :param merge: if True, build each catkin package into the same
        devel space (not affecting plain cmake packages), ``bool``
    :param install: if True, install all packages to the install space,
        ``bool``
    :param force_cmake: (optional), if True calls cmake explicitly for each
        package, ``bool``
    :param colorize: if True, colorize cmake output and other messages,
        ``bool``
    :param build_packages: specific packages to build (all parent packages
        in the topological order must have been built before), ``str``
    :param quiet: if True, hides some build output, ``bool``
    :param cmake_args: additional arguments for cmake, ``[str]``
    :param make_args: additional arguments for make, ``[str]``
    :param catkin_make_args: additional arguments for make but only for catkin
        packages, ``[str]``
    :param continue_from_pkg: indicates whether or not cmi should continue
        when a package is reached, ``bool``
    :param only_pkg_with_deps: only consider the specific packages and their
        recursive dependencies and ignore all other packages in the workspace,
        ``[str]``
    :param destdir: define DESTDIR for cmake/invocation, ``string``
    :param use_ninja: if True, use ninja instead of make, ``bool``
    :param use_nmake: if True, use nmake instead of make, ``bool``
    :param override_build_tool_check: if True, build even if a space was built
        by another tool previously.
    '''
    if not colorize:
        disable_ANSI_colors()

    # Check workspace existance
    if not os.path.exists(workspace):
        sys.exit("Workspace path '{0}' does not exist.".format(workspace))
    workspace = os.path.abspath(workspace)

    # Check source space existance
    if sourcespace is None:
        sourcespace = os.path.join(workspace, 'src')
    if not os.path.exists(sourcespace):
        sys.exit('Could not find source space: {0}'.format(sourcespace))
    print('Base path: ' + str(workspace))
    print('Source space: ' + str(sourcespace))

    # Check build space
    if buildspace is None:
        buildspace = os.path.join(workspace, 'build_isolated')
    if not os.path.exists(buildspace):
        os.mkdir(buildspace)
    print('Build space: ' + str(buildspace))

    # ensure the build space was previously built by catkin_make_isolated
    previous_tool = get_previous_tool_used_on_the_space(buildspace)
    if previous_tool is not None and previous_tool != 'catkin_make_isolated':
        if override_build_tool_check:
            print(fmt(
                "@{yf}Warning: build space at '%s' was previously built by '%s', "
                "but --override-build-tool-check was passed so continuing anyways."
                % (buildspace, previous_tool)))
        else:
            sys.exit(fmt(
                "@{rf}The build space at '%s' was previously built by '%s'. "
                "Please remove the build space or pick a different build space."
                % (buildspace, previous_tool)))
    mark_space_as_built_by(buildspace, 'catkin_make_isolated')

    # Check devel space
    if develspace is None:
        develspace = os.path.join(workspace, 'devel_isolated')
    print('Devel space: ' + str(develspace))

    # ensure the devel space was previously built by catkin_make_isolated
    previous_tool = get_previous_tool_used_on_the_space(develspace)
    if previous_tool is not None and previous_tool != 'catkin_make_isolated':
        if override_build_tool_check:
            print(fmt(
                "@{yf}Warning: devel space at '%s' was previously built by '%s', "
                "but --override-build-tool-check was passed so continuing anyways."
                % (develspace, previous_tool)))
        else:
            sys.exit(fmt(
                "@{rf}The devel space at '%s' was previously built by '%s'. "
                "Please remove the devel space or pick a different devel space."
                % (develspace, previous_tool)))
    mark_space_as_built_by(develspace, 'catkin_make_isolated')

    # Check install space
    if installspace is None:
        installspace = os.path.join(workspace, 'install_isolated')
    print('Install space: ' + str(installspace))

    if cmake_args:
        print("Additional CMake Arguments: " + " ".join(cmake_args))
    else:
        cmake_args = []

    if not [arg for arg in cmake_args if arg.startswith('-G')]:
        if use_ninja:
            cmake_args += ['-G', 'Ninja']
        elif use_nmake:
            cmake_args += ['-G', 'NMake Makefiles']
        else:
            cmake_args += ['-G', 'Unix Makefiles']
    elif use_ninja or use_nmake:
        print(colorize_line("Error: either specify a generator using '-G...' or '--use-[ninja|nmake]' but not both"))
        sys.exit(1)

    if make_args:
        print("Additional make Arguments: " + " ".join(make_args))
    else:
        make_args = []

    if catkin_make_args:
        print("Additional make Arguments for catkin packages: " + " ".join(catkin_make_args))
    else:
        catkin_make_args = []

    # Find packages
    packages = find_packages(sourcespace, exclude_subspaces=True)
    if not packages:
        print(fmt("@{yf}No packages found in source space: %s@|" % sourcespace))

    # whitelist packages and their dependencies in workspace
    if only_pkg_with_deps:
        package_names = [p.name for p in packages.values()]
        unknown_packages = [name for name in only_pkg_with_deps if name not in package_names]
        if unknown_packages:
            sys.exit('Packages not found in the workspace: %s' % ', '.join(unknown_packages))

        whitelist_pkg_names = get_package_names_with_recursive_dependencies(packages, only_pkg_with_deps)
        print('Whitelisted packages: %s' % ', '.join(sorted(whitelist_pkg_names)))
        packages = {path: p for path, p in packages.items() if p.name in whitelist_pkg_names}

    # verify that specified package exists in workspace
    if build_packages:
        packages_by_name = {p.name: path for path, p in packages.items()}
        unknown_packages = [p for p in build_packages if p not in packages_by_name]
        if unknown_packages:
            sys.exit('Packages not found in the workspace: %s' % ', '.join(unknown_packages))

    # Report topological ordering
    ordered_packages = topological_order_packages(packages)
    unknown_build_types = []
    msg = []
    msg.append('@{pf}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' + ('~' * len(str(len(ordered_packages)))))
    msg.append('@{pf}~~@|  traversing %d packages in topological order:' % len(ordered_packages))
    for path, package in ordered_packages:
        if path is None:
            print(fmt('@{rf}Error: Circular dependency in subset of packages: @!%s@|' % package))
            sys.exit('Can not build workspace with circular dependency')

        export_tags = [e.tagname for e in package.exports]
        if 'build_type' in export_tags:
            build_type_tag = [e.content for e in package.exports if e.tagname == 'build_type'][0]
        else:
            build_type_tag = 'catkin'
        if build_type_tag == 'catkin':
            msg.append('@{pf}~~@|  - @!@{bf}' + package.name + '@|')
        elif build_type_tag == 'cmake':
            msg.append(
                '@{pf}~~@|  - @!@{bf}' + package.name + '@|' +
                ' (@!@{cf}plain cmake@|)'
            )
        else:
            msg.append(
                '@{pf}~~@|  - @!@{bf}' + package.name + '@|' +
                ' (@{rf}unknown@|)'
            )
            unknown_build_types.append(package)
    msg.append('@{pf}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' + ('~' * len(str(len(ordered_packages)))))
    for index in range(len(msg)):
        msg[index] = fmt(msg[index])
    print('\n'.join(msg))

    # Error if there are packages with unknown build_types
    if unknown_build_types:
        print(colorize_line('Error: Packages with unknown build types exist'))
        sys.exit('Can not build workspace with packages of unknown build_type')

    # Check to see if the workspace has changed
    cmake_args_with_spaces = list(cmake_args)
    if develspace:
        cmake_args_with_spaces.append('-DCATKIN_DEVEL_PREFIX=' + develspace)
    if installspace:
        cmake_args_with_spaces.append('-DCMAKE_INSTALL_PREFIX=' + installspace)
    if (
        not force_cmake and
        cmake_input_changed(packages, buildspace, cmake_args=cmake_args_with_spaces, filename='catkin_make_isolated')
    ):
        print('The packages or cmake arguments have changed, forcing cmake invocation')
        force_cmake = True

    ensure_workspace_marker(workspace)

    # Build packages
    pkg_develspace = None
    last_env = None
    for index, path_package in enumerate(ordered_packages):
        path, package = path_package
        if merge:
            pkg_develspace = develspace
        else:
            pkg_develspace = os.path.join(develspace, package.name)
        if not build_packages or package.name in build_packages:
            if continue_from_pkg and build_packages and package.name in build_packages:
                build_packages = None
            try:
                print()
                last_env = build_package(
                    path, package,
                    workspace, buildspace, pkg_develspace, installspace,
                    install, force_cmake,
                    quiet, last_env, cmake_args, make_args, catkin_make_args,
                    destdir=destdir, use_ninja=use_ninja,
                    number=index + 1, of=len(ordered_packages)
                )
            except subprocess.CalledProcessError as e:
                _print_build_error(package, e)
                # Let users know how to reproduce
                # First add the cd to the build folder of the package
                cmd = 'cd ' + quote(os.path.join(buildspace, package.name)) + ' && '
                # Then reproduce the command called
                if isinstance(e.cmd, list):
                    # quote arguments to allow copy-n-paste of command
                    cmd += ' '.join([quote(arg) for arg in e.cmd])
                else:
                    cmd += e.cmd
                print(fmt("\n@{rf}Reproduce this error by running:"))
                print(fmt("@{gf}@!==> @|") + cmd + "\n")
                sys.exit('Command failed, exiting.')
            except Exception as e:
                print("Unhandled exception of type '{0}':".format(type(e).__name__))
                import traceback
                traceback.print_exc()
                _print_build_error(package, e)
                sys.exit('Command failed, exiting.')
        else:
            cprint("Skipping package: '@!@{bf}" + package.name + "@|'")
            last_env = get_new_env(package, pkg_develspace, installspace, install, last_env, destdir)

    # Provide a top level devel space environment setup script
    if not os.path.exists(develspace):
        os.makedirs(develspace)
    if not build_packages:
        generated_env_sh = os.path.join(develspace, 'env.sh')
        generated_setup_util_py = os.path.join(develspace, '_setup_util.py')
        if not merge and pkg_develspace:
            # generate env.sh and setup.sh|bash|zsh which relay to last devel space
            with open(generated_env_sh, 'w') as f:
                f.write("""\
#!/usr/bin/env sh
# generated from catkin.builder module

{0} "$@"
""".format(os.path.join(pkg_develspace, 'env.sh')))
            os.chmod(generated_env_sh, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)

            for shell in ['sh', 'bash', 'zsh']:
                with open(os.path.join(develspace, 'setup.%s' % shell), 'w') as f:
                    f.write("""\
#!/usr/bin/env {1}
# generated from catkin.builder module

. "{0}/setup.{1}"
""".format(pkg_develspace, shell))

            # remove _setup_util.py file which might have been generated for an empty devel space before
            if os.path.exists(generated_setup_util_py):
                os.remove(generated_setup_util_py)

        elif not pkg_develspace:
            # generate env.sh and setup.sh|bash|zsh for an empty devel space
            if 'CMAKE_PREFIX_PATH' in os.environ.keys():
                variables = {
                    'CATKIN_GLOBAL_BIN_DESTINATION': 'bin',
                    'CATKIN_LIB_ENVIRONMENT_PATHS': "'lib'",
                    'CATKIN_PKGCONFIG_ENVIRONMENT_PATHS': "os.path.join('lib', 'pkgconfig')",
                    'CMAKE_PREFIX_PATH_AS_IS': ';'.join(os.environ['CMAKE_PREFIX_PATH'].split(os.pathsep)),
                    'PYTHON_EXECUTABLE': sys.executable,
                    'PYTHON_INSTALL_DIR': get_python_install_dir(),
                }
                with open(generated_setup_util_py, 'w') as f:
                    f.write(configure_file(
                        os.path.join(get_cmake_path(), 'templates', '_setup_util.py.in'),
                        variables))
                os.chmod(generated_setup_util_py, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)
            else:
                sys.exit("Unable to process CMAKE_PREFIX_PATH from environment. Cannot generate environment files.")

            variables = {'SETUP_FILENAME': 'setup'}
            with open(generated_env_sh, 'w') as f:
                f.write(configure_file(os.path.join(get_cmake_path(), 'templates', 'env.sh.in'), variables))
            os.chmod(generated_env_sh, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)

            variables = {'SETUP_DIR': develspace}
            for shell in ['sh', 'bash', 'zsh']:
                with open(os.path.join(develspace, 'setup.%s' % shell), 'w') as f:
                    f.write(configure_file(
                        os.path.join(get_cmake_path(), 'templates', 'setup.%s.in' % shell),
                        variables))
Пример #19
0
def build_isolated_workspace(context,
                             packages=None,
                             start_with=None,
                             no_deps=False,
                             unbuilt=False,
                             n_jobs=None,
                             force_cmake=False,
                             pre_clean=False,
                             force_color=False,
                             quiet=False,
                             interleave_output=False,
                             no_status=False,
                             limit_status_rate=10.0,
                             lock_install=False,
                             no_notify=False,
                             continue_on_failure=False,
                             summarize_build=None,
                             relaxed_constraints=False,
                             influx_url=None,
                             influx_db=None):
    """Builds a catkin workspace in isolation

    This function will find all of the packages in the source space, start some
    executors, feed them packages to build based on dependencies and topological
    ordering, and then monitor the output of the executors, handling loggings of
    the builds, starting builds, failing builds, and finishing builds of
    packages, and handling the shutdown of the executors when appropriate.

    :param context: context in which to build the catkin workspace
    :type context: :py:class:`catkin_tools.verbs.catkin_build.context.Context`
    :param packages: list of packages to build, by default their dependencies will also be built
    :type packages: list
    :param start_with: package to start with, skipping all packages which proceed it in the topological order
    :type start_with: str
    :param no_deps: If True, the dependencies of packages will not be built first
    :type no_deps: bool
    :param n_jobs: number of parallel package build n_jobs
    :type n_jobs: int
    :param force_cmake: forces invocation of CMake if True, default is False
    :type force_cmake: bool
    :param force_color: forces colored output even if terminal does not support it
    :type force_color: bool
    :param quiet: suppresses the output of commands unless there is an error
    :type quiet: bool
    :param interleave_output: prints the output of commands as they are received
    :type interleave_output: bool
    :param no_status: disables status bar
    :type no_status: bool
    :param limit_status_rate: rate to which status updates are limited; the default 0, places no limit.
    :type limit_status_rate: float
    :param lock_install: causes executors to synchronize on access of install commands
    :type lock_install: bool
    :param no_notify: suppresses system notifications
    :type no_notify: bool
    :param continue_on_failure: do not stop building other jobs on error
    :type continue_on_failure: bool
    :param summarize_build: if True summarizes the build at the end, if None and continue_on_failure is True and the
        the build fails, then the build will be summarized, but if False it never will be summarized.
    :type summarize_build: bool
    :param relaxed_constraints If true, do not use exec_deps for topological ordering
    :type relaxed_constraints bool
    :param influx_url Url to access an InfluxDB instance
    :type influx_url string Url of the form user:password@host:port
    :param influx_db Database name in InfluxDB
    :type influx_db string

    :raises: SystemExit if buildspace is a file or no packages were found in the source space
        or if the provided options are invalid
    """
    pre_start_time = time.time()

    # Assert that the limit_status_rate is valid
    if limit_status_rate < 0:
        sys.exit(
            "[build] @!@{rf}Error:@| The value of --status-rate must be greater than or equal to zero."
        )

    # Declare a buildspace marker describing the build config for error checking
    buildspace_marker_data = {
        'workspace': context.workspace,
        'profile': context.profile,
        'install': context.install,
        'install_space': context.install_space_abs,
        'devel_space': context.devel_space_abs,
        'source_space': context.source_space_abs
    }

    # Check build config
    if os.path.exists(
            os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE)):
        with open(os.path.join(
                context.build_space_abs,
                BUILDSPACE_MARKER_FILE)) as buildspace_marker_file:
            existing_buildspace_marker_data = yaml.safe_load(
                buildspace_marker_file)
            misconfig_lines = ''
            for (k, v) in existing_buildspace_marker_data.items():
                new_v = buildspace_marker_data.get(k, None)
                if new_v != v:
                    misconfig_lines += (
                        '\n - %s: %s (stored) is not %s (commanded)' %
                        (k, v, new_v))
            if len(misconfig_lines) > 0:
                sys.exit(
                    clr("\n@{rf}Error:@| Attempting to build a catkin workspace using build space: "
                        "\"%s\" but that build space's most recent configuration "
                        "differs from the commanded one in ways which will cause "
                        "problems. Fix the following options or use @{yf}`catkin "
                        "clean -b`@| to remove the build space: %s" %
                        (context.build_space_abs, misconfig_lines)))

    # Summarize the context
    summary_notes = []
    if force_cmake:
        summary_notes += [
            clr("@!@{cf}NOTE:@| Forcing CMake to run for each package.")
        ]
    log(context.summary(summary_notes))

    # Make sure there is a build folder and it is not a file
    if os.path.exists(context.build_space_abs):
        if os.path.isfile(context.build_space_abs):
            sys.exit(
                clr("[build] @{rf}Error:@| Build space '{0}' exists but is a file and not a folder."
                    .format(context.build_space_abs)))
    # If it dosen't exist, create it
    else:
        log("[build] Creating build space: '{0}'".format(
            context.build_space_abs))
        os.makedirs(context.build_space_abs)

    # Write the current build config for config error checking
    with open(os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE),
              'w') as buildspace_marker_file:
        buildspace_marker_file.write(
            yaml.dump(buildspace_marker_data, default_flow_style=False))

    # Get all the packages in the context source space
    # Suppress warnings since this is a utility function
    workspace_packages = find_packages(context.source_space_abs,
                                       exclude_subspaces=True,
                                       warnings=[])

    # Get packages which have not been built yet
    built_packages, unbuilt_pkgs = get_built_unbuilt_packages(
        context, workspace_packages)

    # Handle unbuilt packages
    if unbuilt:
        # Check if there are any unbuilt
        if len(unbuilt_pkgs) > 0:
            # Add the unbuilt packages
            packages.extend(list(unbuilt_pkgs))
        else:
            log("[build] No unbuilt packages to be built.")
            return

    # If no_deps is given, ensure packages to build are provided
    if no_deps and packages is None:
        log(
            clr("[build] @!@{rf}Error:@| With no_deps, you must specify packages to build."
                ))
        return

    # Find list of packages in the workspace
    packages_to_be_built, packages_to_be_built_deps, all_packages = determine_packages_to_be_built(
        packages, context, workspace_packages)

    if not no_deps:
        # Extend packages to be built to include their deps
        packages_to_be_built.extend(packages_to_be_built_deps)

    # Also re-sort
    try:
        packages_to_be_built = topological_order_packages(
            dict(packages_to_be_built))
    except AttributeError:
        log(
            clr("[build] @!@{rf}Error:@| The workspace packages have a circular "
                "dependency, and cannot be built. Please run `catkin list "
                "--deps` to determine the problematic package(s)."))
        return

    # Check the number of packages to be built
    if len(packages_to_be_built) == 0:
        log(clr('[build] No packages to be built.'))

    # Assert start_with package is in the workspace
    verify_start_with_option(start_with, packages, all_packages,
                             packages_to_be_built)

    # Populate .catkin file if we're not installing
    # NOTE: This is done to avoid the Catkin CMake code from doing it,
    # which isn't parallel-safe. Catkin CMake only modifies this file if
    # it's package source path isn't found.
    if not context.install:
        dot_catkin_file_path = os.path.join(context.devel_space_abs, '.catkin')
        # If the file exists, get the current paths
        if os.path.exists(dot_catkin_file_path):
            dot_catkin_paths = open(dot_catkin_file_path,
                                    'r').read().split(';')
        else:
            dot_catkin_paths = []

        # Update the list with the new packages (in topological order)
        packages_to_be_built_paths = [
            os.path.join(context.source_space_abs, path)
            for path, pkg in packages_to_be_built
        ]

        new_dot_catkin_paths = [
            os.path.join(context.source_space_abs, path) for path in [
                os.path.join(context.source_space_abs, path)
                for path, pkg in all_packages
            ] if path in dot_catkin_paths or path in packages_to_be_built_paths
        ]

        # Write the new file if it's different, otherwise, leave it alone
        if dot_catkin_paths == new_dot_catkin_paths:
            wide_log("[build] Package table is up to date.")
        else:
            wide_log("[build] Updating package table.")
            open(dot_catkin_file_path,
                 'w').write(';'.join(new_dot_catkin_paths))

    # Remove packages before start_with
    if start_with is not None:
        for path, pkg in list(packages_to_be_built):
            if pkg.name != start_with:
                wide_log(
                    clr("@!@{pf}Skipping@|  @{gf}---@| @{cf}{}@|").format(
                        pkg.name))
                packages_to_be_built.pop(0)
            else:
                break

    # Get the names of all packages to be built
    packages_to_be_built_names = [p.name for _, p in packages_to_be_built]
    packages_to_be_built_deps_names = [
        p.name for _, p in packages_to_be_built_deps
    ]

    # Generate prebuild and prebuild clean jobs, if necessary
    prebuild_jobs = {}
    setup_util_present = os.path.exists(
        os.path.join(context.devel_space_abs, '_setup_util.py'))
    catkin_present = 'catkin' in (packages_to_be_built_names +
                                  packages_to_be_built_deps_names)
    catkin_built = 'catkin' in built_packages
    prebuild_built = 'catkin_tools_prebuild' in built_packages

    # Handle the prebuild jobs if the develspace is linked
    prebuild_pkg_deps = []
    if context.link_devel:
        prebuild_pkg = None

        # Construct a dictionary to lookup catkin package by name
        pkg_dict = dict([(pkg.name, (pth, pkg)) for pth, pkg in all_packages])

        if setup_util_present:
            # Setup util is already there, determine if it needs to be
            # regenerated
            if catkin_built:
                if catkin_present:
                    prebuild_pkg_path, prebuild_pkg = pkg_dict['catkin']
            elif prebuild_built:
                if catkin_present:
                    # TODO: Clean prebuild package
                    ct_prebuild_pkg_path = get_prebuild_package(
                        context.build_space_abs, context.devel_space_abs,
                        force_cmake)
                    ct_prebuild_pkg = parse_package(ct_prebuild_pkg_path)

                    prebuild_jobs[
                        'caktin_tools_prebuild'] = create_catkin_clean_job(
                            context,
                            ct_prebuild_pkg,
                            ct_prebuild_pkg_path,
                            dependencies=[],
                            dry_run=False,
                            clean_build=True,
                            clean_devel=True,
                            clean_install=True)

                    # TODO: Build catkin package
                    prebuild_pkg_path, prebuild_pkg = pkg_dict['catkin']
                    prebuild_pkg_deps.append('catkin_tools_prebuild')
            else:
                # How did these get here??
                log("Warning: devel space setup files have an unknown origin.")
        else:
            # Setup util needs to be generated
            if catkin_built or prebuild_built:
                log("Warning: generated devel space setup files have been deleted."
                    )

            if catkin_present:
                # Build catkin package
                prebuild_pkg_path, prebuild_pkg = pkg_dict['catkin']
            else:
                # Generate and buildexplicit prebuild package
                prebuild_pkg_path = get_prebuild_package(
                    context.build_space_abs, context.devel_space_abs,
                    force_cmake)
                prebuild_pkg = parse_package(prebuild_pkg_path)

        if prebuild_pkg is not None:
            # Create the prebuild job
            prebuild_job = create_catkin_build_job(
                context,
                prebuild_pkg,
                prebuild_pkg_path,
                build_dependencies=prebuild_pkg_deps,
                run_dependencies=[],
                force_cmake=force_cmake,
                pre_clean=pre_clean,
                prebuild=True)

            # Add the prebuld job
            prebuild_jobs[prebuild_job.jid] = prebuild_job

    # Remove prebuild jobs from normal job list
    for prebuild_jid, prebuild_job in prebuild_jobs.items():
        if prebuild_jid in packages_to_be_built_names:
            packages_to_be_built_names.remove(prebuild_jid)

    # Initial jobs list is just the prebuild jobs
    jobs = [] + list(prebuild_jobs.values())

    # Get all build type plugins
    build_job_creators = {
        ep.name: ep.load()['create_build_job']
        for ep in pkg_resources.iter_entry_points(group='catkin_tools.jobs')
    }

    # It's a problem if there aren't any build types available
    if len(build_job_creators) == 0:
        sys.exit(
            'Error: No build types available. Please check your catkin_tools installation.'
        )

    # Construct jobs
    for pkg_path, pkg in all_packages:
        if pkg.name not in packages_to_be_built_names:
            continue

        # Ignore metapackages
        if 'metapackage' in [e.tagname for e in pkg.exports]:
            continue

        # Get actual execution deps
        build_deps = [
            p.name for _, p in get_cached_recursive_build_depends_in_workspace(
                pkg, packages_to_be_built) if p.name not in prebuild_jobs
        ]
        build_for_run_deps = [
            p.name for _, p in get_cached_recursive_run_depends_in_workspace(
                pkg, packages_to_be_built) if p.name not in prebuild_jobs
        ]

        # All jobs depend on the prebuild jobs if they're defined
        if not no_deps:
            if relaxed_constraints:
                build_for_run_deps = [
                    p.name for _, p in
                    get_recursive_build_depends_for_run_depends_in_workspace(
                        [pkg], packages_to_be_built)
                    if p.name not in prebuild_jobs
                ]
            else:
                # revert to interpreting all dependencies as build dependencies
                build_deps = list(set(build_deps + build_for_run_deps))
                build_for_run_deps = []

            for j in prebuild_jobs.values():
                build_deps.append(j.jid)

        # Determine the job parameters
        build_job_kwargs = dict(context=context,
                                package=pkg,
                                package_path=pkg_path,
                                build_dependencies=build_deps,
                                run_dependencies=build_for_run_deps,
                                force_cmake=force_cmake,
                                pre_clean=pre_clean)

        # Create the job based on the build type
        build_type = get_build_type(pkg)

        if build_type in build_job_creators:
            jobs.append(build_job_creators[build_type](**build_job_kwargs))
        else:
            wide_log(
                clr("[build] @!@{yf}Warning:@| Skipping package `{}` because it "
                    "has an unsupported package build type: `{}`").format(
                        pkg.name, build_type))

            wide_log(clr("[build] Note: Available build types:"))
            for bt_name in build_job_creators.keys():
                wide_log(clr("[build]  - `{}`".format(bt_name)))

    # Queue for communicating status
    event_queue = Queue()

    status_queue = Queue()
    monitoring_queue = Queue()

    class ForwardingQueue(threading.Thread):
        def __init__(self, queues):
            super(ForwardingQueue, self).__init__()
            self.keep_running = True
            self.queues = queues

        def run(self):
            while self.keep_running:
                event = event_queue.get(True)
                for queue in self.queues:
                    queue.put(event)
                if event is None:
                    break

    queue_thread = ForwardingQueue([status_queue, monitoring_queue])

    threads = [queue_thread]

    try:
        # Spin up status output thread
        status_thread = ConsoleStatusController(
            'build', ['package', 'packages'],
            jobs,
            n_jobs, [pkg.name for _, pkg in context.packages],
            [p for p in context.whitelist], [p for p in context.blacklist],
            status_queue,
            show_notifications=not no_notify,
            show_active_status=not no_status,
            show_buffered_stdout=not quiet and not interleave_output,
            show_buffered_stderr=not interleave_output,
            show_live_stdout=interleave_output,
            show_live_stderr=interleave_output,
            show_stage_events=not quiet,
            show_full_summary=(summarize_build is True),
            pre_start_time=pre_start_time,
            active_status_rate=limit_status_rate)
        threads.append(status_thread)

        if influx_db is not None:
            if not have_influx_db:
                sys.exit(
                    "[build] @!@{rf}Error:@| InfluxDB monitoring is not possible, cannot import influxdb"
                )

            match = re.match('^(.+):(.+)@(.+):(.+)$', influx_url)
            if not match:
                sys.exit(
                    "[build] @!@{rf}Error:@| The value of --influx has to be of the form username:password@host:port"
                )
            username, password, host, port = match.groups()

            influxdb_thread = InfluxDBStatusController(monitoring_queue,
                                                       influx_db, host, port,
                                                       username, password)

            threads.append(influxdb_thread)

        for thread in threads:
            thread.start()

        # Initialize locks
        locks = {
            'installspace': asyncio.Lock() if lock_install else FakeLock()
        }

        # Block while running N jobs asynchronously
        try:
            all_succeeded = run_until_complete(
                execute_jobs('build',
                             jobs,
                             locks,
                             event_queue,
                             context.log_space_abs,
                             max_toplevel_jobs=n_jobs,
                             continue_on_failure=continue_on_failure,
                             continue_without_deps=False,
                             relaxed_constraints=relaxed_constraints))
        except Exception:
            all_succeeded = False
            for thread in threads:
                thread.keep_running = False
            for thread in threads:
                thread.join(1.0)
            wide_log(str(traceback.format_exc()))

        event_queue.put(None)

        for thread in threads:
            thread.join(1.0)

        # Warn user about new packages
        now_built_packages, now_unbuilt_pkgs = get_built_unbuilt_packages(
            context, workspace_packages)
        new_pkgs = [p for p in unbuilt_pkgs if p not in now_unbuilt_pkgs]
        if len(new_pkgs) > 0:
            log(
                clr("[build] @/@!Note:@| @/Workspace packages have changed, "
                    "please re-source setup files to use them.@|"))

        if all_succeeded:
            # Create isolated devel setup if necessary
            if context.isolate_devel:
                if not context.install:
                    _create_unmerged_devel_setup(context, now_unbuilt_pkgs)
                else:
                    _create_unmerged_devel_setup_for_install(context)
            return 0
        else:
            return 1

    except KeyboardInterrupt:
        wide_log("[build] Interrupted by user!")
        event_queue.put(None)

        return 130  # EOWNERDEAD return code is not part of the errno module.
Пример #20
0
def document_workspace(
    context,
    packages=None,
    start_with=None,
    no_deps=False,
    n_jobs=None,
    force_color=False,
    quiet=False,
    interleave_output=False,
    no_status=False,
    limit_status_rate=10.0,
    no_notify=False,
    continue_on_failure=False,
    summarize_build=None
):
    pre_start_time = time.time()

    # Get all the packages in the context source space
    # Suppress warnings since this is a utility function
    workspace_packages = find_packages(context.source_space_abs, exclude_subspaces=True, warnings=[])

    # If no_deps is given, ensure packages to build are provided
    if no_deps and packages is None:
        log(fmt("[document] @!@{rf}Error:@| With no_deps, you must specify packages to build."))
        return

    # Find list of packages in the workspace
    packages_to_be_documented, packages_to_be_documented_deps, all_packages = determine_packages_to_be_built(
        packages, context, workspace_packages)

    if not no_deps:
        # Extend packages to be documented to include their deps
        packages_to_be_documented.extend(packages_to_be_documented_deps)

    # Also re-sort
    try:
        packages_to_be_documented = topological_order_packages(dict(packages_to_be_documented))
    except AttributeError:
        log(fmt("[document] @!@{rf}Error:@| The workspace packages have a circular "
                "dependency, and cannot be documented. Please run `catkin list "
                "--deps` to determine the problematic package(s)."))
        return

    # Check the number of packages to be documented
    if len(packages_to_be_documented) == 0:
        log(fmt('[document] No packages to be documented.'))

    # Assert start_with package is in the workspace
    verify_start_with_option(
        start_with,
        packages,
        all_packages,
        packages_to_be_documented + packages_to_be_documented_deps)

    # Remove packages before start_with
    if start_with is not None:
        for path, pkg in list(packages_to_be_documented):
            if pkg.name != start_with:
                wide_log(fmt("@!@{pf}Skipping@|  @{gf}---@| @{cf}{}@|").format(pkg.name))
                packages_to_be_documented.pop(0)
            else:
                break

    # Get the names of all packages to be built
    packages_to_be_documented_names = [p.name for _, p in packages_to_be_documented]
    packages_to_be_documeted_deps_names = [p.name for _, p in packages_to_be_documented_deps]

    jobs = []

    # Construct jobs
    for pkg_path, pkg in all_packages:
        if pkg.name not in packages_to_be_documented_names:
            continue

        # Get actual execution deps
        deps = [
            p.name for _, p
            in get_cached_recursive_build_depends_in_workspace(pkg, packages_to_be_documented)
        ]

        jobs.append(create_package_job(context, pkg, pkg_path, deps))

    # Special job for post-job summary sphinx step.
    jobs.append(create_summary_job(context, package_names=packages_to_be_documented_names))

    # Queue for communicating status
    event_queue = Queue()

    try:
        # Spin up status output thread
        status_thread = ConsoleStatusController(
            'document',
            ['package', 'packages'],
            jobs,
            n_jobs,
            [pkg.name for _, pkg in context.packages],
            [p for p in context.whitelist],
            [p for p in context.blacklist],
            event_queue,
            show_notifications=not no_notify,
            show_active_status=not no_status,
            show_buffered_stdout=not quiet and not interleave_output,
            show_buffered_stderr=not interleave_output,
            show_live_stdout=interleave_output,
            show_live_stderr=interleave_output,
            show_stage_events=not quiet,
            show_full_summary=(summarize_build is True),
            pre_start_time=pre_start_time,
            active_status_rate=limit_status_rate)
        status_thread.start()

        # Block while running N jobs asynchronously
        try:
            all_succeeded = run_until_complete(execute_jobs(
                'document',
                jobs,
                None,
                event_queue,
                context.log_space_abs,
                max_toplevel_jobs=n_jobs,
                continue_on_failure=continue_on_failure,
                continue_without_deps=False))

        except Exception:
            status_thread.keep_running = False
            all_succeeded = False
            status_thread.join(1.0)
            wide_log(str(traceback.format_exc()))
        status_thread.join(1.0)

        return 0 if all_succeeded else 1

    except KeyboardInterrupt:
        wide_log("[document] Interrupted by user!")
        event_queue.put(None)

        return 130  # EOWNERDEAD
Пример #21
0
def determine_packages_to_be_built(packages, context, workspace_packages):
    """Returns list of packages which should be built, and those package's deps.

    :param packages: list of packages to be built, if None all packages are built
    :type packages: list
    :param context: Workspace context
    :type context: :py:class:`catkin_tools.verbs.catkin_build.context.Context`
    :returns: tuple of packages to be built and those package's deps
    :rtype: tuple
    """
    start = time.time()

    # If there are no packages raise
    if not workspace_packages:
        log("[build] No packages were found in the source space '{0}'".format(
            context.source_space_abs))
    else:
        wide_log("[build] Found '{0}' packages in {1}.".format(
            len(workspace_packages), format_time_delta(time.time() - start)))

    # Order the packages by topology
    ordered_packages = topological_order_packages(workspace_packages)
    # Set the packages in the workspace for the context
    context.packages = ordered_packages
    # Determine the packages which should be built
    packages_to_be_built = []
    packages_to_be_built_deps = []

    # Check if topological_order_packages determined any circular dependencies, if so print an error and fail.
    # If this is the case, the last entry of ordered packages is a tuple that starts with nil.
    if ordered_packages and ordered_packages[-1][0] is None:
        guilty_packages = ", ".join(ordered_packages[-1][1:])
        sys.exit(
            "[build] Circular dependency detected in the following packages: {}"
            .format(guilty_packages))

    workspace_package_names = dict([(pkg.name, (path, pkg))
                                    for path, pkg in ordered_packages])
    # Determine the packages to be built
    if packages:
        # First assert all of the packages given are in the workspace
        for package in packages:
            if package not in workspace_package_names:
                # Try whether package is a pattern and matches
                glob_packages = expand_glob_package(package,
                                                    workspace_package_names)
                if len(glob_packages) > 0:
                    packages.extend(glob_packages)
                    continue
                else:
                    sys.exit(
                        "[build] Given package '{0}' is not in the workspace "
                        "and pattern does not match any package".format(
                            package))
            # If metapackage, include run depends which are in the workspace
            package_obj = workspace_package_names[package][1]
            if 'metapackage' in [e.tagname for e in package_obj.exports]:
                for rdep in package_obj.run_depends:
                    if rdep.name in workspace_package_names:
                        packages.append(rdep.name)
        # Limit the packages to be built to just the provided packages
        for pkg_path, package in ordered_packages:
            if package.name in packages:
                packages_to_be_built.append((pkg_path, package))
                # Get the recursive dependencies for each of these packages
                pkg_deps = get_cached_recursive_build_depends_in_workspace(
                    package, ordered_packages)
                packages_to_be_built_deps.extend(pkg_deps)
    else:
        # Only use whitelist when no other packages are specified
        if len(context.whitelist) > 0:
            # Expand glob patterns in whitelist
            whitelist = []
            for whitelisted_package in context.whitelist:
                whitelist.extend(
                    expand_glob_package(whitelisted_package,
                                        workspace_package_names))
            packages_to_be_built = [
                p for p in ordered_packages if (p[1].name in whitelist)
            ]
        else:
            packages_to_be_built = ordered_packages

    # Filter packages with blacklist
    if len(context.blacklist) > 0:
        # Expand glob patterns in blacklist
        blacklist = []
        for blacklisted_package in context.blacklist:
            blacklist.extend(
                expand_glob_package(blacklisted_package,
                                    workspace_package_names))
        # Apply blacklist to packages and dependencies
        packages_to_be_built = [
            (path, pkg) for path, pkg in packages_to_be_built
            if (pkg.name not in blacklist or pkg.name in packages)
        ]
        packages_to_be_built_deps = [
            (path, pkg) for path, pkg in packages_to_be_built_deps
            if (pkg.name not in blacklist or pkg.name in packages)
        ]

    return packages_to_be_built, packages_to_be_built_deps, ordered_packages
Пример #22
0
def build_workspace_isolated(workspace='.',
                             sourcespace=None,
                             buildspace=None,
                             develspace=None,
                             installspace=None,
                             merge=False,
                             install=False,
                             force_cmake=False,
                             colorize=True,
                             build_packages=None,
                             quiet=False,
                             cmake_args=None,
                             make_args=None,
                             catkin_make_args=None):
    '''
    Runs ``cmake``, ``make`` and optionally ``make install`` for all
    catkin packages in sourcespace_dir.  It creates several folders
    in the current working directory. For non-catkin packages it runs
    ``cmake``, ``make`` and ``make install`` for each, installing it to
    the devel space or install space if the ``install`` option is specified.

    :param workspace: path to the current workspace, ``str``
    :param sourcespace: workspace folder containing catkin packages, ``str``
    :param buildspace: path to build space location, ``str``
    :param develspace: path to devel space location, ``str``
    :param installspace: path to install space (CMAKE_INSTALL_PREFIX), ``str``
    :param merge: if True, build each catkin package into the same
        devel space. does not work with non-catkin packages, ``bool``
    :param install: if True, install all packages to the install space,
        ``bool``
    :param force_cmake: (optional), if True calls cmake explicitly for each
        package, ``bool``
    :param colorize: if True, colorize cmake output and other messages,
        ``bool``
    :param build_packages: specific packages to build (all parent packages
        in the topological order must have been built before), ``str``
    :param quiet: if True, hides some build output, ``bool``
    :param cmake_args: additional arguments for cmake, ``[str]``
    :param make_args: additional arguments for make, ``[str]``
    :param catkin_make_args: additional arguments for make but only for catkin
        packages, ``[str]``
    '''
    if not colorize:
        disable_ANSI_colors()

    # Check workspace existance
    if not os.path.exists(workspace):
        sys.exit("Workspace path '{0}' does not exist.".format(workspace))
    workspace = os.path.abspath(workspace)

    # Check source space existance
    if sourcespace is None:
        ws_sourcespace = os.path.join(workspace, 'src')
        if not os.path.exists(ws_sourcespace):
            sys.exit("Could not find source space: {0}".format(sourcespace))
        sourcespace = ws_sourcespace
    sourcespace = os.path.abspath(sourcespace)
    print('Base path: ' + str(workspace))
    print('Source space: ' + str(sourcespace))

    # Check build space
    if buildspace is None:
        buildspace = os.path.join(workspace, 'build_isolated')
    buildspace = os.path.abspath(buildspace)
    if not os.path.exists(buildspace):
        os.mkdir(buildspace)
    print('Build space: ' + str(buildspace))

    # Check devel space
    if develspace is None:
        develspace = os.path.join(workspace, 'devel_isolated')
    develspace = os.path.abspath(develspace)
    print('Devel space: ' + str(develspace))

    # Check install space
    if installspace is None:
        installspace = os.path.join(workspace, 'install_isolated')
    installspace = os.path.abspath(installspace)
    print('Install space: ' + str(installspace))

    if cmake_args:
        print("Additional CMake Arguments: " + " ".join(cmake_args))
    else:
        cmake_args = []

    if make_args:
        print("Additional make Arguments: " + " ".join(make_args))
    else:
        make_args = []

    if catkin_make_args:
        print("Additional make Arguments for catkin packages: " +
              " ".join(catkin_make_args))
    else:
        catkin_make_args = []

    # Find packages
    packages = find_packages(sourcespace, exclude_subspaces=True)
    if not packages:
        print(fmt("@{yf}No packages found in source space: %s@|" %
                  sourcespace))

    # verify that specified package exists in workspace
    if build_packages:
        packages_by_name = {p.name: path for path, p in packages.iteritems()}
        unknown_packages = [
            p for p in build_packages if p not in packages_by_name
        ]
        if unknown_packages:
            sys.exit('Packages not found in the workspace: %s' %
                     ', '.join(unknown_packages))

    # Report topological ordering
    ordered_packages = topological_order_packages(packages)
    unknown_build_types = []
    msg = []
    msg.append('@{pf}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' +
               ('~' * len(str(len(ordered_packages)))))
    msg.append('@{pf}~~@|  traversing %d packages in topological order:' %
               len(ordered_packages))
    for path, package in ordered_packages:
        export_tags = [e.tagname for e in package.exports]
        if 'build_type' in export_tags:
            build_type_tag = [
                e.content for e in package.exports if e.tagname == 'build_type'
            ][0]
        else:
            build_type_tag = 'catkin'
        if build_type_tag == 'catkin':
            msg.append('@{pf}~~@|  - @!@{bf}' + package.name + '@|')
        elif build_type_tag == 'cmake':
            msg.append('@{pf}~~@|  - @!@{bf}' + package.name + '@|' +
                       ' (@!@{cf}plain cmake@|)')
        else:
            msg.append('@{pf}~~@|  - @!@{bf}' + package.name + '@|' +
                       ' (@{rf}unknown@|)')
            unknown_build_types.append(package)
    msg.append('@{pf}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' +
               ('~' * len(str(len(ordered_packages)))))
    for index in range(len(msg)):
        msg[index] = fmt(msg[index])
    print('\n'.join(msg))

    # Error if there are packages with unknown build_types
    if unknown_build_types:
        print(colorize_line('Error: Packages with unknown build types exist'))
        sys.exit('Can not build workspace with packages of unknown build_type')

    # Check to see if the workspace has changed
    if not force_cmake:
        force_cmake, install_toggled = cmake_input_changed(
            packages,
            buildspace,
            install=install,
            cmake_args=cmake_args,
            filename='catkin_make_isolated')
        if force_cmake:
            print(
                'The packages or cmake arguments have changed, forcing cmake invocation'
            )
        elif install_toggled:
            print(
                'The install argument has been toggled, forcing cmake invocation on plain cmake package'
            )

    # Build packages
    pkg_develspace = None
    last_env = None
    for index, path_package in enumerate(ordered_packages):
        path, package = path_package
        if merge:
            pkg_develspace = develspace
        else:
            pkg_develspace = os.path.join(develspace, package.name)
        if not build_packages or package.name in build_packages:
            try:
                export_tags = [e.tagname for e in package.exports]
                is_cmake_package = 'cmake' in [
                    e.content for e in package.exports
                    if e.tagname == 'build_type'
                ]
                last_env = build_package(
                    path,
                    package,
                    workspace,
                    buildspace,
                    pkg_develspace,
                    installspace,
                    install,
                    force_cmake or (install_toggled and is_cmake_package),
                    quiet,
                    last_env,
                    cmake_args,
                    make_args,
                    catkin_make_args,
                    number=index + 1,
                    of=len(ordered_packages))
            except Exception as e:
                import traceback
                traceback.print_exc()
                cprint('@{rf}@!<==@| ' +
                       'Failed to process package \'@!@{bf}' + package.name +
                       '@|\': \n  ' + ('KeyboardInterrupt' if isinstance(
                           e, KeyboardInterrupt) else str(e)))
                if isinstance(e, subprocess.CalledProcessError):
                    cmd = ' '.join(e.cmd) if isinstance(e.cmd, list) else e.cmd
                    print(fmt("\n@{rf}Reproduce this error by running:"))
                    print(fmt("@{gf}@!==> @|") + cmd + "\n")
                sys.exit('Command failed, exiting.')
        else:
            cprint("Skipping package: '@!@{bf}" + package.name + "@|'")
            last_env = get_new_env(package, pkg_develspace, installspace,
                                   install, last_env)

    # Provide a top level devel space environment setup script
    if not os.path.exists(develspace):
        os.makedirs(develspace)
    if not build_packages:
        generated_env_sh = os.path.join(develspace, 'env.sh')
        generated_setup_sh = os.path.join(develspace, 'setup.sh')
        generated_setup_util_py = os.path.join(develspace, '_setup_util.py')
        if not merge and pkg_develspace:
            # generate env.sh and setup.sh which relay to last devel space
            with open(generated_env_sh, 'w') as f:
                f.write("""\
#!/usr/bin/env sh
# generated from catkin.builder module

{0} "$@"
""".format(os.path.join(pkg_develspace, 'env.sh')))
            os.chmod(generated_env_sh,
                     stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)
            with open(generated_setup_sh, 'w') as f:
                f.write("""\
#!/usr/bin/env sh
# generated from catkin.builder module

. "{0}/setup.sh"
""".format(pkg_develspace))

        elif not pkg_develspace:
            # generate env.sh and setup.sh for an empty devel space
            if 'CMAKE_PREFIX_PATH' in os.environ.keys():
                variables = {
                    'CATKIN_GLOBAL_BIN_DESTINATION':
                    'bin',
                    'CATKIN_GLOBAL_LIB_DESTINATION':
                    'lib',
                    'CMAKE_PREFIX_PATH_AS_IS':
                    ';'.join(os.environ['CMAKE_PREFIX_PATH'].split(
                        os.pathsep)),
                    'PYTHON_INSTALL_DIR':
                    get_python_install_dir(),
                    'SETUP_DIR':
                    '',
                }
                with open(generated_setup_util_py, 'w') as f:
                    f.write(
                        configure_file(
                            os.path.join(get_cmake_path(), 'templates',
                                         '_setup_util.py.in'), variables))
                os.chmod(generated_setup_util_py,
                         stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)
            else:
                sys.exit(
                    "Unable to process CMAKE_PREFIX_PATH from environment. Cannot generate environment files."
                )

            variables = {'SETUP_DIR': develspace, 'SETUP_FILENAME': 'setup'}
            with open(generated_env_sh, 'w') as f:
                f.write(
                    configure_file(
                        os.path.join(get_cmake_path(), 'templates',
                                     'env.sh.in'), variables))
            os.chmod(generated_env_sh,
                     stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)
            variables = {'SETUP_DIR': develspace}
            with open(generated_setup_sh, 'w') as f:
                f.write(
                    configure_file(
                        os.path.join(get_cmake_path(), 'templates',
                                     'setup.sh.in'), variables))

        if not merge and pkg_develspace:
            # remove _setup_util.py file which might have been generated for an empty
            if os.path.exists(generated_setup_util_py):
                os.remove(generated_setup_util_py)

        if not merge or not pkg_develspace:
            # generate setup.bash and setup.zsh for convenience
            variables = {'SETUP_DIR': develspace}
            with open(os.path.join(develspace, 'setup.bash'), 'w') as f:
                f.write(
                    configure_file(
                        os.path.join(get_cmake_path(), 'templates',
                                     'setup.bash.in'), variables))
            with open(os.path.join(develspace, 'setup.zsh'), 'w') as f:
                f.write(
                    configure_file(
                        os.path.join(get_cmake_path(), 'templates',
                                     'setup.zsh.in'), variables))
Пример #23
0
def build_workspace_isolated(workspace='.',
                             sourcespace=None,
                             buildspace=None,
                             develspace=None,
                             installspace=None,
                             merge=False,
                             install=False,
                             force_cmake=False,
                             colorize=True,
                             build_packages=None,
                             quiet=False,
                             cmake_args=None,
                             make_args=None,
                             catkin_make_args=None,
                             continue_from_pkg=False,
                             only_pkg_with_deps=None,
                             destdir=None,
                             use_ninja=False):
    '''
    Runs ``cmake``, ``make`` and optionally ``make install`` for all
    catkin packages in sourcespace_dir.  It creates several folders
    in the current working directory. For non-catkin packages it runs
    ``cmake``, ``make`` and ``make install`` for each, installing it to
    the devel space or install space if the ``install`` option is specified.

    :param workspace: path to the current workspace, ``str``
    :param sourcespace: workspace folder containing catkin packages, ``str``
    :param buildspace: path to build space location, ``str``
    :param develspace: path to devel space location, ``str``
    :param installspace: path to install space (CMAKE_INSTALL_PREFIX), ``str``
    :param merge: if True, build each catkin package into the same
        devel space (not affecting plain cmake packages), ``bool``
    :param install: if True, install all packages to the install space,
        ``bool``
    :param force_cmake: (optional), if True calls cmake explicitly for each
        package, ``bool``
    :param colorize: if True, colorize cmake output and other messages,
        ``bool``
    :param build_packages: specific packages to build (all parent packages
        in the topological order must have been built before), ``str``
    :param quiet: if True, hides some build output, ``bool``
    :param cmake_args: additional arguments for cmake, ``[str]``
    :param make_args: additional arguments for make, ``[str]``
    :param catkin_make_args: additional arguments for make but only for catkin
        packages, ``[str]``
    :param continue_from_pkg: indicates whether or not cmi should continue
        when a package is reached, ``bool``
    :param only_pkg_with_deps: only consider the specific packages and their
        recursive dependencies and ignore all other packages in the workspace,
        ``[str]``
    :param destdir: define DESTDIR for cmake/invocation, ``string``
    :param use_ninja: if True, use ninja instead of make, ``bool``
    '''
    if not colorize:
        disable_ANSI_colors()

    # Check workspace existance
    if not os.path.exists(workspace):
        sys.exit("Workspace path '{0}' does not exist.".format(workspace))
    workspace = os.path.abspath(workspace)

    # Check source space existance
    if sourcespace is None:
        sourcespace = os.path.join(workspace, 'src')
    if not os.path.exists(sourcespace):
        sys.exit('Could not find source space: {0}'.format(sourcespace))
    print('Base path: ' + str(workspace))
    print('Source space: ' + str(sourcespace))

    # Check build space
    if buildspace is None:
        buildspace = os.path.join(workspace, 'build_isolated')
    if not os.path.exists(buildspace):
        os.mkdir(buildspace)
    print('Build space: ' + str(buildspace))

    # Check devel space
    if develspace is None:
        develspace = os.path.join(workspace, 'devel_isolated')
    print('Devel space: ' + str(develspace))

    # Check install space
    if installspace is None:
        installspace = os.path.join(workspace, 'install_isolated')
    print('Install space: ' + str(installspace))

    if cmake_args:
        print("Additional CMake Arguments: " + " ".join(cmake_args))
    else:
        cmake_args = []

    if not [arg for arg in cmake_args if arg.startswith('-G')]:
        if not use_ninja:
            cmake_args += ['-G', 'Unix Makefiles']
        else:
            cmake_args += ['-G', 'Ninja']
    elif use_ninja:
        print(
            colorize_line(
                "Error: either specify a generator using '-G...' or '--use-ninja' but not both"
            ))
        sys.exit(1)

    if make_args:
        print("Additional make Arguments: " + " ".join(make_args))
    else:
        make_args = []

    if catkin_make_args:
        print("Additional make Arguments for catkin packages: " +
              " ".join(catkin_make_args))
    else:
        catkin_make_args = []

    # Find packages
    packages = find_packages(sourcespace, exclude_subspaces=True)
    if not packages:
        print(fmt("@{yf}No packages found in source space: %s@|" %
                  sourcespace))

    # whitelist packages and their dependencies in workspace
    if only_pkg_with_deps:
        package_names = [p.name for p in packages.values()]
        unknown_packages = [
            name for name in only_pkg_with_deps if name not in package_names
        ]
        if unknown_packages:
            sys.exit('Packages not found in the workspace: %s' %
                     ', '.join(unknown_packages))

        whitelist_pkg_names = get_package_names_with_recursive_dependencies(
            packages, only_pkg_with_deps)
        print('Whitelisted packages: %s' %
              ', '.join(sorted(whitelist_pkg_names)))
        packages = {
            path: p
            for path, p in packages.items() if p.name in whitelist_pkg_names
        }

    # verify that specified package exists in workspace
    if build_packages:
        packages_by_name = {p.name: path for path, p in packages.items()}
        unknown_packages = [
            p for p in build_packages if p not in packages_by_name
        ]
        if unknown_packages:
            sys.exit('Packages not found in the workspace: %s' %
                     ', '.join(unknown_packages))

    # Report topological ordering
    ordered_packages = topological_order_packages(packages)
    unknown_build_types = []
    msg = []
    msg.append('@{pf}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' +
               ('~' * len(str(len(ordered_packages)))))
    msg.append('@{pf}~~@|  traversing %d packages in topological order:' %
               len(ordered_packages))
    for path, package in ordered_packages:
        if path is None:
            print(
                fmt('@{rf}Error: Circular dependency in subset of packages: @!%s@|'
                    % package))
            sys.exit('Can not build workspace with circular dependency')

        export_tags = [e.tagname for e in package.exports]
        if 'build_type' in export_tags:
            build_type_tag = [
                e.content for e in package.exports if e.tagname == 'build_type'
            ][0]
        else:
            build_type_tag = 'catkin'
        if build_type_tag == 'catkin':
            msg.append('@{pf}~~@|  - @!@{bf}' + package.name + '@|')
        elif build_type_tag == 'cmake':
            msg.append('@{pf}~~@|  - @!@{bf}' + package.name + '@|' +
                       ' (@!@{cf}plain cmake@|)')
        else:
            msg.append('@{pf}~~@|  - @!@{bf}' + package.name + '@|' +
                       ' (@{rf}unknown@|)')
            unknown_build_types.append(package)
    msg.append('@{pf}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' +
               ('~' * len(str(len(ordered_packages)))))
    for index in range(len(msg)):
        msg[index] = fmt(msg[index])
    print('\n'.join(msg))

    # Error if there are packages with unknown build_types
    if unknown_build_types:
        print(colorize_line('Error: Packages with unknown build types exist'))
        sys.exit('Can not build workspace with packages of unknown build_type')

    # Check to see if the workspace has changed
    cmake_args_with_spaces = list(cmake_args)
    if develspace:
        cmake_args_with_spaces.append('-DCATKIN_DEVEL_PREFIX=' + develspace)
    if installspace:
        cmake_args_with_spaces.append('-DCMAKE_INSTALL_PREFIX=' + installspace)
    if not force_cmake and cmake_input_changed(
            packages,
            buildspace,
            cmake_args=cmake_args_with_spaces,
            filename='catkin_make_isolated'):
        print(
            'The packages or cmake arguments have changed, forcing cmake invocation'
        )
        force_cmake = True

    ensure_workspace_marker(workspace)

    # Build packages
    pkg_develspace = None
    last_env = None
    for index, path_package in enumerate(ordered_packages):
        path, package = path_package
        if merge:
            pkg_develspace = develspace
        else:
            pkg_develspace = os.path.join(develspace, package.name)
        if not build_packages or package.name in build_packages:
            if continue_from_pkg and build_packages and package.name in build_packages:
                build_packages = None
            try:
                print()
                last_env = build_package(path,
                                         package,
                                         workspace,
                                         buildspace,
                                         pkg_develspace,
                                         installspace,
                                         install,
                                         force_cmake,
                                         quiet,
                                         last_env,
                                         cmake_args,
                                         make_args,
                                         catkin_make_args,
                                         destdir=destdir,
                                         use_ninja=use_ninja,
                                         number=index + 1,
                                         of=len(ordered_packages))
            except subprocess.CalledProcessError as e:
                _print_build_error(package, e)
                # Let users know how to reproduce
                # First add the cd to the build folder of the package
                cmd = 'cd ' + quote(os.path.join(buildspace,
                                                 package.name)) + ' && '
                # Then reproduce the command called
                if isinstance(e.cmd, list):
                    # quote arguments to allow copy-n-paste of command
                    cmd += ' '.join([quote(arg) for arg in e.cmd])
                else:
                    cmd += e.cmd
                print(fmt("\n@{rf}Reproduce this error by running:"))
                print(fmt("@{gf}@!==> @|") + cmd + "\n")
                sys.exit('Command failed, exiting.')
            except Exception as e:
                print("Unhandled exception of type '{0}':".format(
                    type(e).__name__))
                import traceback
                traceback.print_exc()
                _print_build_error(package, e)
                sys.exit('Command failed, exiting.')
        else:
            cprint("Skipping package: '@!@{bf}" + package.name + "@|'")
            last_env = get_new_env(package, pkg_develspace, installspace,
                                   install, last_env, destdir)

    # Provide a top level devel space environment setup script
    if not os.path.exists(develspace):
        os.makedirs(develspace)
    if not build_packages:
        generated_env_sh = os.path.join(develspace, 'env.sh')
        generated_setup_util_py = os.path.join(develspace, '_setup_util.py')
        if not merge and pkg_develspace:
            # generate env.sh and setup.sh|bash|zsh which relay to last devel space
            with open(generated_env_sh, 'w') as f:
                f.write("""\
#!/usr/bin/env sh
# generated from catkin.builder module

{0} "$@"
""".format(os.path.join(pkg_develspace, 'env.sh')))
            os.chmod(generated_env_sh,
                     stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)

            for shell in ['sh', 'bash', 'zsh']:
                with open(os.path.join(develspace, 'setup.%s' % shell),
                          'w') as f:
                    f.write("""\
#!/usr/bin/env {1}
# generated from catkin.builder module

. "{0}/setup.{1}"
""".format(pkg_develspace, shell))

            # remove _setup_util.py file which might have been generated for an empty devel space before
            if os.path.exists(generated_setup_util_py):
                os.remove(generated_setup_util_py)

        elif not pkg_develspace:
            # generate env.sh and setup.sh|bash|zsh for an empty devel space
            if 'CMAKE_PREFIX_PATH' in os.environ.keys():
                variables = {
                    'CATKIN_GLOBAL_BIN_DESTINATION':
                    'bin',
                    'CATKIN_LIB_ENVIRONMENT_PATHS':
                    "'lib'",
                    'CATKIN_PKGCONFIG_ENVIRONMENT_PATHS':
                    "os.path.join('lib', 'pkgconfig')",
                    'CMAKE_PREFIX_PATH_AS_IS':
                    ';'.join(os.environ['CMAKE_PREFIX_PATH'].split(
                        os.pathsep)),
                    'PYTHON_EXECUTABLE':
                    sys.executable,
                    'PYTHON_INSTALL_DIR':
                    get_python_install_dir(),
                }
                with open(generated_setup_util_py, 'w') as f:
                    f.write(
                        configure_file(
                            os.path.join(get_cmake_path(), 'templates',
                                         '_setup_util.py.in'), variables))
                os.chmod(generated_setup_util_py,
                         stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)
            else:
                sys.exit(
                    "Unable to process CMAKE_PREFIX_PATH from environment. Cannot generate environment files."
                )

            variables = {'SETUP_FILENAME': 'setup'}
            with open(generated_env_sh, 'w') as f:
                f.write(
                    configure_file(
                        os.path.join(get_cmake_path(), 'templates',
                                     'env.sh.in'), variables))
            os.chmod(generated_env_sh,
                     stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)

            variables = {'SETUP_DIR': develspace}
            for shell in ['sh', 'bash', 'zsh']:
                with open(os.path.join(develspace, 'setup.%s' % shell),
                          'w') as f:
                    f.write(
                        configure_file(
                            os.path.join(get_cmake_path(), 'templates',
                                         'setup.%s.in' % shell), variables))
Пример #24
0
def build_workspace_isolated(
    workspace='.',
    sourcespace=None,
    buildspace=None,
    develspace=None,
    installspace=None,
    merge=False,
    install=False,
    jobs=None,
    force_cmake=False,
    build_packages=None,
    quiet=False,
    cmake_args=[],
    make_args=[],
    catkin_cmake_path=None,
    catkin_python_path=None
):
    '''
    Runs ``cmake``, ``make`` and optionally ``make install`` for all
    catkin packages in sourcespace_dir.  It creates several folders
    in the current working directory. For non-catkin packages it runs
    ``cmake``, ``make`` and ``make install`` for each, installing it to
    the devel space or install space if the ``install`` option is specified.

    :param workspace: path to the current workspace, ``str``
    :param sourcespace: workspace folder containing catkin packages, ``str``
    :param buildspace: path to build space location, ``str``
    :param develspace: path to devel space location, ``str``
    :param installspace: path to install space (CMAKE_INSTALL_PREFIX), ``str``
    :param merge: if True, build each catkin package into the same
        devel space. does not work with non-catkin packages, ``bool``
    :param install: if True, install all packages to the install space,
        ``bool``
    :param jobs: number of parallel build jobs to run (make -jN -lN), ``int``
    :param force_cmake: (optional), if True calls cmake explicitly for each
        package, ``bool``
    :param colorize: if True, colorize cmake output and other messages,
        ``bool``
    :param build_packages: specific packages to build (all parent packages
        in the topological order must have been built before), ``str``
    :param quiet: if True, hides some build output, ``bool``
    :param cmake_args: additional arguments for cmake, ``[str]``
    :param make_args: additional arguments for make, ``[str]``
    '''
    # Should actually have alot of argument checks here, rather than
    # before feeding the function (makes for safe functions)

    console.pretty_print("Base Path: ", console.cyan)
    console.pretty_println("%s" % workspace, console.yellow)
    console.pretty_print("Build Path: ", console.cyan)
    console.pretty_println("%s" % buildspace, console.yellow)
    console.pretty_print("Source Path: ", console.cyan)
    console.pretty_println("%s" % sourcespace, console.yellow)
    console.pretty_print("Devel Path: ", console.cyan)
    console.pretty_println("%s" % develspace, console.yellow)
    console.pretty_print("Install Path: ", console.cyan)
    console.pretty_println("%s" % installspace, console.yellow)
    console.pretty_print("Catkin CMake Path: ", console.cyan)
    console.pretty_println("%s" % catkin_cmake_path, console.yellow)
    console.pretty_print("Catkin Python Path: ", console.cyan)
    console.pretty_println("%s" % catkin_python_path, console.yellow)
    # Find packages
    packages = find_packages(sourcespace, exclude_subspaces=True)
    if not packages:
        raise RuntimeError("No packages found in source space: %s" % sourcespace)

    # verify that specified package exists in workspace
    if build_packages:
        packages_by_name = {p.name: path for path, p in packages.iteritems()}
        unknown_packages = [p for p in build_packages if p not in packages_by_name]
        if unknown_packages:
            raise RuntimeError('Packages not found in the workspace: %s' % ', '.join(unknown_packages))

    # Report topological ordering
    ordered_packages = topological_order_packages(packages)
    unknown_build_types = []
    msg = []
    msg.append('@{pf}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' + ('~' * len(str(len(ordered_packages)))))
    msg.append('@{pf}~~@|  traversing %d packages in topological order:' % len(ordered_packages))
    for path, package in ordered_packages:
        export_tags = [e.tagname for e in package.exports]
        if 'build_type' in export_tags:
            build_type_tag = [e.content for e in package.exports if e.tagname == 'build_type'][0]
        else:
            build_type_tag = 'catkin'
        if build_type_tag == 'catkin':
            msg.append('@{pf}~~@|  - @!@{bf}' + package.name + '@|')
        elif build_type_tag == 'cmake':
            msg.append(
                '@{pf}~~@|  - @!@{bf}' + package.name + '@|' +
                ' (@!@{cf}plain cmake@|)'
            )
        else:
            msg.append(
                '@{pf}~~@|  - @!@{bf}' + package.name + '@|' +
                ' (@{rf}unknown@|)'
            )
            unknown_build_types.append(package)
    msg.append('@{pf}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' + ('~' * len(str(len(ordered_packages)))))
    for index in range(len(msg)):
        msg[index] = fmt(msg[index])
    print('\n'.join(msg))

    # Error if there are packages with unknown build_types
    if unknown_build_types:
        raise RuntimeError('Can not build workspace with packages of unknown build_type.')

    # Check to see if the workspace has changed
    if not force_cmake:
        force_cmake, install_toggled = builder.cmake_input_changed(
            packages,
            buildspace,
            install=install,
            cmake_args=cmake_args,
            filename='catkin_make_isolated'
        )
        if force_cmake:
            print('The packages or cmake arguments have changed, forcing cmake invocation')
        elif install_toggled:
            print('The install argument has been toggled, forcing cmake invocation on plain cmake package')

    # Build packages
    original_develspace = copy.deepcopy(develspace)
    for index, path_package in enumerate(ordered_packages):
        path, package = path_package
        if not merge:
            develspace = os.path.join(original_develspace, package.name)
        if not build_packages or package.name in build_packages:
            try:
                export_tags = [e.tagname for e in package.exports]
                is_cmake_package = 'cmake' in [e.content for e in package.exports if e.tagname == 'build_type']
                builder.build_package(
                    path, package,
                    workspace, buildspace, develspace, installspace,
                    install, jobs, force_cmake or (install_toggled and is_cmake_package),
                    quiet, cmake_args, make_args,
                    number=index + 1, of=len(ordered_packages),
                    catkin_cmake_path=catkin_cmake_path,
                    catkin_python_path=catkin_python_path
                )
            except Exception as e:
                import traceback
                traceback.print_exc()
                builder.cprint(
                    '@{rf}@!<==@| ' +
                    'Failed to process package \'@!@{bf}' +
                    package.name + '@|\': \n  ' +
                    ('KeyboardInterrupt' if isinstance(e, KeyboardInterrupt)
                        else str(e))
                )
                if isinstance(e, subprocess.CalledProcessError):
                    cmd = ' '.join(e.cmd) if isinstance(e.cmd, list) else e.cmd
                    print(fmt("\n@{rf}Reproduce this error by running:"))
                    print(fmt("@{gf}@!==> @|") + cmd + "\n")
                sys.exit('Command failed, exiting.')
        else:
            builder.cprint("Skipping package: '@!@{bf}" + package.name + "@|'")

    # Provide a top level devel space environment setup script
    if not merge and not build_packages:
        # generate env.sh and setup.sh which relay to last devel space
        generated_env = os.path.join(original_develspace, 'env.sh')
        with open(generated_env, 'w') as f:
            f.write("""\
#!/usr/bin/env sh
# generated from catkin.builder module

{0} "$@"
""".format(os.path.join(develspace, 'env.sh')))
        os.chmod(generated_env, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)
        with open(os.path.join(original_develspace, 'setup.sh'), 'w') as f:
            f.write("""\
#!/usr/bin/env sh
# generated from catkin.builder module

. "{0}/setup.sh"
""".format(develspace))
        # generate setup.bash and setup.zsh for convenience
        variables = {'SETUP_DIR': original_develspace}
        with open(os.path.join(original_develspace, 'setup.bash'), 'w') as f:
            f.write(configure_file(os.path.join(catkin_cmake_path, 'templates', 'setup.bash.in'), variables))
        with open(os.path.join(original_develspace, 'setup.zsh'), 'w') as f:
            f.write(configure_file(os.path.join(catkin_cmake_path, 'templates', 'setup.zsh.in'), variables))
Пример #25
0
def main(argv=sys.argv[1:]):
    parser = argparse.ArgumentParser(
        description="Generate a 'Dockerfile' for the doc job")
    add_argument_config_url(parser)
    parser.add_argument(
        '--rosdistro-name',
        required=True,
        help='The name of the ROS distro to identify the setup file to be '
             'sourced')
    add_argument_build_name(parser, 'doc')
    parser.add_argument(
        '--workspace-root',
        required=True,
        help='The root path of the workspace to compile')
    parser.add_argument(
        '--rosdoc-lite-dir',
        required=True,
        help='The root path of the rosdoc_lite repository')
    parser.add_argument(
        '--catkin-sphinx-dir',
        required=True,
        help='The root path of the catkin-sphinx repository')
    parser.add_argument(
        '--rosdoc-index-dir',
        required=True,
        help='The root path of the rosdoc_index folder')
    add_argument_repository_name(parser)
    parser.add_argument(
        '--os-name',
        required=True,
        help="The OS name (e.g. 'ubuntu')")
    parser.add_argument(
        '--os-code-name',
        required=True,
        help="The OS code name (e.g. 'trusty')")
    parser.add_argument(
        '--arch',
        required=True,
        help="The architecture (e.g. 'amd64')")
    add_argument_vcs_information(parser)
    add_argument_distribution_repository_urls(parser)
    add_argument_distribution_repository_key_files(parser)
    add_argument_force(parser)
    add_argument_output_dir(parser, required=True)
    add_argument_dockerfile_dir(parser)
    args = parser.parse_args(argv)

    config = get_config_index(args.config_url)

    with Scope('SUBSECTION', 'packages'):
        # find packages in workspace
        source_space = os.path.join(args.workspace_root, 'src')
        print("Crawling for packages in workspace '%s'" % source_space)
        pkgs = find_packages(source_space)

        pkg_names = [pkg.name for pkg in pkgs.values()]
        print('Found the following packages:')
        for pkg_name in sorted(pkg_names):
            print('  -', pkg_name)

        maintainer_emails = set([])
        for pkg in pkgs.values():
            for m in pkg.maintainers:
                maintainer_emails.add(m.email)
        if maintainer_emails:
            print('Package maintainer emails: %s' %
                  ' '.join(sorted(maintainer_emails)))

    rosdoc_index = RosdocIndex(
        [os.path.join(args.rosdoc_index_dir, args.rosdistro_name)])

    with Scope('SUBSECTION', 'determine need to run documentation generation'):
        # compare hashes to determine if documentation needs to be regenerated
        current_hashes = {}
        current_hashes['ros_buildfarm'] = 1  # increase to retrigger doc jobs
        current_hashes['rosdoc_lite'] = get_git_hash(args.rosdoc_lite_dir)
        current_hashes['catkin-sphinx'] = get_git_hash(args.catkin_sphinx_dir)
        repo_dir = os.path.join(
            args.workspace_root, 'src', args.repository_name)
        current_hashes[args.repository_name] = get_hash(repo_dir)
        print('Current repository hashes: %s' % current_hashes)
        tag_index_hashes = rosdoc_index.hashes.get(args.repository_name, {})
        print('Stored repository hashes: %s' % tag_index_hashes)
        skip_doc_generation = current_hashes == tag_index_hashes

    if skip_doc_generation:
        print('No changes to the source repository or any tooling repository')

        if not args.force:
            print('Skipping generation of documentation')

            # create stamp files
            print('Creating marker files to identify that documentation is ' +
                  'up-to-date')
            create_stamp_files(pkg_names, os.path.join(args.output_dir, 'api'))

            return 0

        print("But job was started with the 'force' parameter set")

    else:
        print('The source repository and/or a tooling repository has changed')

    print('Running generation of documentation')
    rosdoc_index.hashes[args.repository_name] = current_hashes
    rosdoc_index.write_modified_data(args.output_dir, ['hashes'])

    # create stamp files
    print('Creating marker files to identify that documentation is ' +
          'up-to-date')
    create_stamp_files(pkg_names, os.path.join(args.output_dir, 'api_rosdoc'))

    index = get_index(config.rosdistro_index_url)
    dist_file = get_distribution_file(index, args.rosdistro_name)
    assert args.repository_name in dist_file.repositories
    valid_package_names = \
        set(pkg_names) | set(dist_file.release_packages.keys())

    # update package deps and metapackage deps
    with Scope('SUBSECTION', 'updated rosdoc_index information'):
        for pkg in pkgs.values():
            print("Updating dependendencies for package '%s'" % pkg.name)
            depends = _get_build_run_doc_dependencies(pkg)
            ros_dependency_names = sorted(set([
                d.name for d in depends if d.name in valid_package_names]))
            rosdoc_index.set_forward_deps(pkg.name, ros_dependency_names)

            if pkg.is_metapackage():
                print("Updating dependendencies for metapackage '%s'" %
                      pkg.name)
                depends = _get_run_dependencies(pkg)
                ros_dependency_names = sorted(set([
                    d.name for d in depends if d.name in valid_package_names]))
            else:
                ros_dependency_names = None
            rosdoc_index.set_metapackage_deps(
                pkg.name, ros_dependency_names)
        rosdoc_index.write_modified_data(
            args.output_dir, ['deps', 'metapackage_deps'])

    # generate changelog html from rst
    package_names_with_changelogs = set([])
    with Scope('SUBSECTION', 'generate changelog html from rst'):
        for pkg_path, pkg in pkgs.items():
            abs_pkg_path = os.path.join(source_space, pkg_path)
            assert os.path.exists(os.path.join(abs_pkg_path, 'package.xml'))
            changelog_file = os.path.join(abs_pkg_path, 'CHANGELOG.rst')
            if os.path.exists(changelog_file):
                print(("Package '%s' contains a CHANGELOG.rst, generating " +
                       "html") % pkg.name)
                package_names_with_changelogs.add(pkg.name)

                with open(changelog_file, 'r') as h:
                    rst_code = h.read()
                from docutils.core import publish_string
                html_code = publish_string(rst_code, writer_name='html')
                html_code = html_code.decode()

                # strip system message from html output
                open_tag = re.escape('<div class="first system-message">')
                close_tag = re.escape('</div>')
                pattern = '(' + open_tag + '.+?' + close_tag + ')'
                html_code = re.sub(pattern, '', html_code, flags=re.DOTALL)

                pkg_changelog_doc_path = os.path.join(
                    args.output_dir, 'changelogs', pkg.name)
                os.makedirs(pkg_changelog_doc_path)
                with open(os.path.join(
                        pkg_changelog_doc_path, 'changelog.html'), 'w') as h:
                    h.write(html_code)

    ordered_pkg_tuples = topological_order_packages(pkgs)

    # create rosdoc tag list and location files
    with Scope('SUBSECTION', 'create rosdoc tag list and location files'):
        for _, pkg in ordered_pkg_tuples:
            dst = os.path.join(
                args.output_dir, 'rosdoc_tags', '%s.yaml' % pkg.name)
            print("Generating rosdoc tag list file for package '%s'" %
                  pkg.name)

            dep_names = rosdoc_index.get_recursive_dependencies(pkg.name)
            # make sure that we don't pass our own tagfile to ourself
            # bad things happen when we do this
            assert pkg.name not in dep_names
            locations = []
            for dep_name in sorted(dep_names):
                if dep_name not in rosdoc_index.locations:
                    print("- skipping not existing location file of " +
                          "dependency '%s'" % dep_name)
                    continue
                print("- including location files of dependency '%s'" %
                      dep_name)
                dep_locations = rosdoc_index.locations[dep_name]
                if dep_locations:
                    for dep_location in dep_locations:
                        assert dep_location['package'] == dep_name
                        # update tag information to point to local location
                        location = copy.deepcopy(dep_location)
                        if not location['location'].startswith('file://'):
                            location['location'] = 'file://%s' % os.path.join(
                                args.rosdoc_index_dir, location['location'])
                        locations.append(location)

            dst_dir = os.path.dirname(dst)
            if not os.path.exists(dst_dir):
                os.makedirs(dst_dir)
            with open(dst, 'w') as h:
                yaml.dump(locations, h)

            print("Creating location file for package '%s'" % pkg.name)
            data = {
                'docs_url': '../../../api/%s/html' % pkg.name,
                'location': 'file://%s' % os.path.join(
                    args.output_dir, 'symbols', '%s.tag' % pkg.name),
                'package': pkg.name,
            }
            rosdoc_index.locations[pkg.name] = [data]
            # do not write these local locations

    # used to determine all source and release jobs
    source_build_files = get_source_build_files(config, args.rosdistro_name)
    release_build_files = get_release_build_files(config, args.rosdistro_name)

    # TODO this should reuse the logic from the job generation
    used_source_build_names = []
    for source_build_name, build_file in source_build_files.items():
        repo_names = build_file.filter_repositories([args.repository_name])
        if not repo_names:
            continue
        source_dist_file = get_distribution_file_matching_build_file(
            index, args.rosdistro_name, build_file)
        repo = source_dist_file.repositories[args.repository_name]
        if not repo.source_repository:
            continue
        if not repo.source_repository.version:
            continue
        if build_file.test_commits_force is False:
            continue
        elif repo.source_repository.test_commits is False:
            continue
        elif repo.source_repository.test_commits is None and \
                not build_file.test_commits_default:
            continue
        used_source_build_names.append(source_build_name)

    # TODO this should reuse the logic from the job generation
    used_release_build_names = release_build_files.keys()

    vcs_type, vcs_version, vcs_url = args.vcs_info.split(' ', 2)

    # create manifest.yaml files from repository / package meta information
    # will be merged with the manifest.yaml file generated by rosdoc_lite later
    repository = dist_file.repositories[args.repository_name]
    with Scope('SUBSECTION', 'create manifest.yaml files'):
        for pkg in pkgs.values():

            data = {}

            data['vcs'] = vcs_type
            data['vcs_uri'] = vcs_url
            data['vcs_version'] = vcs_version

            data['repo_name'] = args.repository_name
            data['timestamp'] = time.time()

            data['depends'] = rosdoc_index.forward_deps.get(pkg.name, [])
            data['depends_on'] = rosdoc_index.reverse_deps.get(pkg.name, [])

            if pkg.name in rosdoc_index.metapackage_index:
                data['metapackages'] = rosdoc_index.metapackage_index[pkg.name]

            if pkg.name in package_names_with_changelogs:
                data['has_changelog_rst'] = True

            data['api_documentation'] = 'http://docs.ros.org/%s/api/%s' % \
                (args.rosdistro_name, pkg.name)

            pkg_status = None
            pkg_status_description = None
            # package level status information
            if pkg.name in repository.status_per_package:
                pkg_status_data = repository.status_per_package[pkg.name]
                pkg_status = pkg_status_data.get('status', None)
                pkg_status_description = pkg_status_data.get(
                    'status_description', None)
            # repository level status information
            if pkg_status is None:
                pkg_status = repository.status
            if pkg_status_description is None:
                pkg_status_description = repository.status_description
            if pkg_status is not None:
                data['maintainer_status'] = pkg_status
            if pkg_status_description is not None:
                data['maintainer_status_description'] = pkg_status_description

            # add doc job url
            data['doc_job'] = get_doc_job_url(
                config.jenkins_url, args.rosdistro_name, args.doc_build_name,
                args.repository_name, args.os_name, args.os_code_name,
                args.arch)

            # add devel job urls
            build_files = {}
            for build_name in used_source_build_names:
                build_files[build_name] = source_build_files[build_name]
            devel_job_urls = get_devel_job_urls(
                config.jenkins_url, build_files, args.rosdistro_name,
                args.repository_name)
            if devel_job_urls:
                data['devel_jobs'] = list(devel_job_urls)

            # add release job urls
            build_files = {}
            for build_name in used_release_build_names:
                build_files[build_name] = release_build_files[build_name]
            release_job_urls = get_release_job_urls(
                config.jenkins_url, build_files, args.rosdistro_name, pkg.name)
            if release_job_urls:
                data['release_jobs'] = list(release_job_urls)

            # write manifest.yaml
            dst = os.path.join(
                args.output_dir, 'manifests', pkg.name, 'manifest.yaml')
            dst_dir = os.path.dirname(dst)
            if not os.path.exists(dst_dir):
                os.makedirs(dst_dir)
            with open(dst, 'w') as h:
                yaml.dump(data, h)

    # overwrite CMakeLists.txt files of each package
    with Scope(
        'SUBSECTION',
        'overwrite CMakeLists.txt files to only generate messages'
    ):
        for pkg_path, pkg in pkgs.items():
            abs_pkg_path = os.path.join(source_space, pkg_path)

            build_types = [
                e.content for e in pkg.exports if e.tagname == 'build_type']
            build_type_cmake = build_types and build_types[0] == 'cmake'

            data = {
                'package_name': pkg.name,
                'build_type_cmake': build_type_cmake,
            }
            content = expand_template('doc/CMakeLists.txt.em', data)
            print("Generating 'CMakeLists.txt' for package '%s'" %
                  pkg.name)
            cmakelist_file = os.path.join(abs_pkg_path, 'CMakeLists.txt')
            with open(cmakelist_file, 'w') as h:
                h.write(content)

    # initialize rosdep view
    context = initialize_resolver(
        args.rosdistro_name, args.os_name, args.os_code_name)

    apt_cache = Cache()

    debian_pkg_names = [
        'build-essential',
        'openssh-client',
        'python3',
        'python3-yaml',
        'rsync',
        # the following are required by rosdoc_lite
        'doxygen',
        'python-catkin-pkg',
        'python-epydoc',
        'python-kitchen',
        'python-rospkg',
        'python-sphinx',
        'python-yaml',
        # since catkin is not a run dependency but provides the setup files
        get_debian_package_name(args.rosdistro_name, 'catkin'),
        # rosdoc_lite does not work without genmsg being importable
        get_debian_package_name(args.rosdistro_name, 'genmsg'),
    ]
    if 'actionlib_msgs' in pkg_names:
        # to document actions in other packages in the same repository
        debian_pkg_names.append(
            get_debian_package_name(args.rosdistro_name, 'actionlib_msgs'))
    print('Always install the following generic dependencies:')
    for debian_pkg_name in sorted(debian_pkg_names):
        print('  -', debian_pkg_name)

    debian_pkg_versions = {}

    # get build, run and doc dependencies and map them to binary packages
    depends = get_dependencies(
        pkgs.values(), 'build, run and doc', _get_build_run_doc_dependencies)
    debian_pkg_names_depends = resolve_names(depends, **context)
    debian_pkg_names_depends -= set(debian_pkg_names)
    debian_pkg_names += order_dependencies(debian_pkg_names_depends)
    unknown_versions = []
    for debian_pkg_name in debian_pkg_names:
        try:
            debian_pkg_versions.update(
                get_binary_package_versions(apt_cache, [debian_pkg_name]))
        except KeyError:
            print("Could not find apt package '%s', skipping dependency" %
                  debian_pkg_name)
            unknown_versions.append(debian_pkg_name)
    for unknown_version in unknown_versions:
        debian_pkg_names.remove(unknown_version)

    build_files = get_doc_build_files(config, args.rosdistro_name)
    build_file = build_files[args.doc_build_name]

    rosdoc_config_files = {}
    for pkg_path, pkg in pkgs.items():
        abs_pkg_path = os.path.join(source_space, pkg_path)

        rosdoc_exports = [
            e.attributes['content'] for e in pkg.exports
            if e.tagname == 'rosdoc' and 'content' in e.attributes]
        prefix = '${prefix}'
        rosdoc_config_file = rosdoc_exports[-1] \
            if rosdoc_exports else '%s/rosdoc.yaml' % prefix
        rosdoc_config_file = rosdoc_config_file.replace(prefix, abs_pkg_path)
        if os.path.isfile(rosdoc_config_file):
            rosdoc_config_files[pkg.name] = rosdoc_config_file

    # generate Dockerfile
    data = {
        'os_name': args.os_name,
        'os_code_name': args.os_code_name,
        'arch': args.arch,

        'distribution_repository_urls': args.distribution_repository_urls,
        'distribution_repository_keys': get_distribution_repository_keys(
            args.distribution_repository_urls,
            args.distribution_repository_key_files),

        'rosdistro_name': args.rosdistro_name,

        'uid': get_user_id(),

        'dependencies': debian_pkg_names,
        'dependency_versions': debian_pkg_versions,

        'canonical_base_url': build_file.canonical_base_url,

        'ordered_pkg_tuples': ordered_pkg_tuples,
        'rosdoc_config_files': rosdoc_config_files,
    }
    create_dockerfile(
        'doc/doc_task.Dockerfile.em', data, args.dockerfile_dir)
Пример #26
0
def build_workspace_isolated(
    workspace=".",
    sourcespace=None,
    buildspace=None,
    develspace=None,
    installspace=None,
    merge=False,
    install=False,
    force_cmake=False,
    colorize=True,
    build_packages=None,
    quiet=False,
    cmake_args=None,
    make_args=None,
    catkin_make_args=None,
    continue_from_pkg=False,
    only_pkg_with_deps=None,
    destdir=None,
):
    """
    Runs ``cmake``, ``make`` and optionally ``make install`` for all
    catkin packages in sourcespace_dir.  It creates several folders
    in the current working directory. For non-catkin packages it runs
    ``cmake``, ``make`` and ``make install`` for each, installing it to
    the devel space or install space if the ``install`` option is specified.

    :param workspace: path to the current workspace, ``str``
    :param sourcespace: workspace folder containing catkin packages, ``str``
    :param buildspace: path to build space location, ``str``
    :param develspace: path to devel space location, ``str``
    :param installspace: path to install space (CMAKE_INSTALL_PREFIX), ``str``
    :param merge: if True, build each catkin package into the same
        devel space (not affecting plain cmake packages), ``bool``
    :param install: if True, install all packages to the install space,
        ``bool``
    :param force_cmake: (optional), if True calls cmake explicitly for each
        package, ``bool``
    :param colorize: if True, colorize cmake output and other messages,
        ``bool``
    :param build_packages: specific packages to build (all parent packages
        in the topological order must have been built before), ``str``
    :param quiet: if True, hides some build output, ``bool``
    :param cmake_args: additional arguments for cmake, ``[str]``
    :param make_args: additional arguments for make, ``[str]``
    :param catkin_make_args: additional arguments for make but only for catkin
        packages, ``[str]``
    :param continue_from_pkg: indicates whether or not cmi should continue
        when a package is reached, ``bool``
    :param only_pkg_with_deps: only consider the specific packages and their
        recursive dependencies and ignore all other packages in the workspace,
        ``[str]``
    :param destdir: define DESTDIR for cmake/invocation, ``string``
    """
    if not colorize:
        disable_ANSI_colors()

    # Check workspace existance
    if not os.path.exists(workspace):
        sys.exit("Workspace path '{0}' does not exist.".format(workspace))
    workspace = os.path.abspath(workspace)

    # Check source space existance
    if sourcespace is None:
        sourcespace = os.path.join(workspace, "src")
    if not os.path.exists(sourcespace):
        sys.exit("Could not find source space: {0}".format(sourcespace))
    print("Base path: " + str(workspace))
    print("Source space: " + str(sourcespace))

    # Check build space
    if buildspace is None:
        buildspace = os.path.join(workspace, "build_isolated")
    if not os.path.exists(buildspace):
        os.mkdir(buildspace)
    print("Build space: " + str(buildspace))

    # Check devel space
    if develspace is None:
        develspace = os.path.join(workspace, "devel_isolated")
    print("Devel space: " + str(develspace))

    # Check install space
    if installspace is None:
        installspace = os.path.join(workspace, "install_isolated")
    print("Install space: " + str(installspace))

    if cmake_args:
        print("Additional CMake Arguments: " + " ".join(cmake_args))
    else:
        cmake_args = []

    if make_args:
        print("Additional make Arguments: " + " ".join(make_args))
    else:
        make_args = []

    if catkin_make_args:
        print("Additional make Arguments for catkin packages: " + " ".join(catkin_make_args))
    else:
        catkin_make_args = []

    # Find packages
    packages = find_packages(sourcespace, exclude_subspaces=True)
    if not packages:
        print(fmt("@{yf}No packages found in source space: %s@|" % sourcespace))

    # whitelist packages and their dependencies in workspace
    if only_pkg_with_deps:
        package_names = [p.name for p in packages.values()]
        unknown_packages = [name for name in only_pkg_with_deps if name not in package_names]
        if unknown_packages:
            sys.exit("Packages not found in the workspace: %s" % ", ".join(unknown_packages))

        whitelist_pkg_names = get_package_names_with_recursive_dependencies(packages, only_pkg_with_deps)
        print("Whitelisted packages: %s" % ", ".join(sorted(whitelist_pkg_names)))
        packages = {path: p for path, p in packages.items() if p.name in whitelist_pkg_names}

    # verify that specified package exists in workspace
    if build_packages:
        packages_by_name = {p.name: path for path, p in packages.items()}
        unknown_packages = [p for p in build_packages if p not in packages_by_name]
        if unknown_packages:
            sys.exit("Packages not found in the workspace: %s" % ", ".join(unknown_packages))

    # Report topological ordering
    ordered_packages = topological_order_packages(packages)
    unknown_build_types = []
    msg = []
    msg.append("@{pf}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" + ("~" * len(str(len(ordered_packages)))))
    msg.append("@{pf}~~@|  traversing %d packages in topological order:" % len(ordered_packages))
    for path, package in ordered_packages:
        if path is None:
            print(fmt("@{rf}Error: Circular dependency in subset of packages: @!%s@|" % package))
            sys.exit("Can not build workspace with circular dependency")

        export_tags = [e.tagname for e in package.exports]
        if "build_type" in export_tags:
            build_type_tag = [e.content for e in package.exports if e.tagname == "build_type"][0]
        else:
            build_type_tag = "catkin"
        if build_type_tag == "catkin":
            msg.append("@{pf}~~@|  - @!@{bf}" + package.name + "@|")
        elif build_type_tag == "cmake":
            msg.append("@{pf}~~@|  - @!@{bf}" + package.name + "@|" + " (@!@{cf}plain cmake@|)")
        else:
            msg.append("@{pf}~~@|  - @!@{bf}" + package.name + "@|" + " (@{rf}unknown@|)")
            unknown_build_types.append(package)
    msg.append("@{pf}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" + ("~" * len(str(len(ordered_packages)))))
    for index in range(len(msg)):
        msg[index] = fmt(msg[index])
    print("\n".join(msg))

    # Error if there are packages with unknown build_types
    if unknown_build_types:
        print(colorize_line("Error: Packages with unknown build types exist"))
        sys.exit("Can not build workspace with packages of unknown build_type")

    # Check to see if the workspace has changed
    cmake_args_with_spaces = list(cmake_args)
    if develspace:
        cmake_args_with_spaces.append("-DCATKIN_DEVEL_PREFIX=" + develspace)
    if installspace:
        cmake_args_with_spaces.append("-DCMAKE_INSTALL_PREFIX=" + installspace)
    if not force_cmake and cmake_input_changed(
        packages, buildspace, cmake_args=cmake_args_with_spaces, filename="catkin_make_isolated"
    ):
        print("The packages or cmake arguments have changed, forcing cmake invocation")
        force_cmake = True

    ensure_workspace_marker(workspace)

    # Build packages
    pkg_develspace = None
    last_env = None
    for index, path_package in enumerate(ordered_packages):
        path, package = path_package
        if merge:
            pkg_develspace = develspace
        else:
            pkg_develspace = os.path.join(develspace, package.name)
        if not build_packages or package.name in build_packages:
            if continue_from_pkg and build_packages and package.name in build_packages:
                build_packages = None
            try:
                print()
                last_env = build_package(
                    path,
                    package,
                    workspace,
                    buildspace,
                    pkg_develspace,
                    installspace,
                    install,
                    force_cmake,
                    quiet,
                    last_env,
                    cmake_args,
                    make_args,
                    catkin_make_args,
                    destdir=destdir,
                    number=index + 1,
                    of=len(ordered_packages),
                )
            except subprocess.CalledProcessError as e:
                _print_build_error(package, e)
                # Let users know how to reproduce
                # First add the cd to the build folder of the package
                cmd = "cd " + os.path.join(buildspace, package.name) + " && "
                # Then reproduce the command called
                cmd += " ".join(e.cmd) if isinstance(e.cmd, list) else e.cmd
                print(fmt("\n@{rf}Reproduce this error by running:"))
                print(fmt("@{gf}@!==> @|") + cmd + "\n")
                sys.exit("Command failed, exiting.")
            except Exception as e:
                print("Unhandled exception of type '{0}':".format(type(e).__name__))
                import traceback

                traceback.print_exc()
                _print_build_error(package, e)
                sys.exit("Command failed, exiting.")
        else:
            cprint("Skipping package: '@!@{bf}" + package.name + "@|'")
            last_env = get_new_env(package, pkg_develspace, installspace, install, last_env, destdir)

    # Provide a top level devel space environment setup script
    if not os.path.exists(develspace):
        os.makedirs(develspace)
    if not build_packages:
        generated_env_sh = os.path.join(develspace, "env.sh")
        generated_setup_util_py = os.path.join(develspace, "_setup_util.py")
        if not merge and pkg_develspace:
            # generate env.sh and setup.sh|bash|zsh which relay to last devel space
            with open(generated_env_sh, "w") as f:
                f.write(
                    """\
#!/usr/bin/env sh
# generated from catkin.builder module

{0} "$@"
""".format(
                        os.path.join(pkg_develspace, "env.sh")
                    )
                )
            os.chmod(generated_env_sh, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)

            for shell in ["sh", "bash", "zsh"]:
                with open(os.path.join(develspace, "setup.%s" % shell), "w") as f:
                    f.write(
                        """\
#!/usr/bin/env {1}
# generated from catkin.builder module

. "{0}/setup.{1}"
""".format(
                            pkg_develspace, shell
                        )
                    )

            # remove _setup_util.py file which might have been generated for an empty devel space before
            if os.path.exists(generated_setup_util_py):
                os.remove(generated_setup_util_py)

        elif not pkg_develspace:
            # generate env.sh and setup.sh|bash|zsh for an empty devel space
            if "CMAKE_PREFIX_PATH" in os.environ.keys():
                variables = {
                    "CATKIN_GLOBAL_BIN_DESTINATION": "bin",
                    "CATKIN_GLOBAL_LIB_DESTINATION": "lib",
                    "CMAKE_PREFIX_PATH_AS_IS": ";".join(os.environ["CMAKE_PREFIX_PATH"].split(os.pathsep)),
                    "PYTHON_INSTALL_DIR": get_python_install_dir(),
                }
                with open(generated_setup_util_py, "w") as f:
                    f.write(configure_file(os.path.join(get_cmake_path(), "templates", "_setup_util.py.in"), variables))
                os.chmod(generated_setup_util_py, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)
            else:
                sys.exit("Unable to process CMAKE_PREFIX_PATH from environment. Cannot generate environment files.")

            variables = {"SETUP_FILENAME": "setup"}
            with open(generated_env_sh, "w") as f:
                f.write(configure_file(os.path.join(get_cmake_path(), "templates", "env.sh.in"), variables))
            os.chmod(generated_env_sh, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)

            variables = {"SETUP_DIR": develspace}
            for shell in ["sh", "bash", "zsh"]:
                with open(os.path.join(develspace, "setup.%s" % shell), "w") as f:
                    f.write(
                        configure_file(os.path.join(get_cmake_path(), "templates", "setup.%s.in" % shell), variables)
                    )
Пример #27
0
def make_isolated_main():
    args = _parse_args()

    if args.no_color:
        terminal_color.disable_ANSI_colors()

    (base_path, build_path, devel_path, source_path) = common.get_default_paths(isolated=args.suffixes)
    unused_catkin_toplevel, catkin_python_path, unused_catkin_cmake_path = common.find_catkin(base_path)
    install_path = config_cache.get_install_prefix_from_config_cmake(isolated=args.suffixes)

    sys.path.insert(0, catkin_python_path)

    from catkin.builder import build_workspace_isolated

    # Clear out previous temporaries if requested
    if args.pre_clean:
        console.pretty_print("Pre-cleaning before building.", console.cyan)
        shutil.rmtree(devel_path, ignore_errors=True)
        shutil.rmtree(build_path, ignore_errors=True)
        shutil.rmtree(install_path, ignore_errors=True)

    if not os.path.exists(build_path):
        os.mkdir(build_path)

    # Validate package argument
    packages = find_packages(source_path, exclude_subspaces=True)
    packages_by_name = {p.name: path for path, p in packages.iteritems()}
    if args.packages:
        for package in args.packages:
            if package not in packages_by_name:
                raise RuntimeError('Package %s not found in the workspace' % package)

    make.validate_build_space(base_path)  # raises a RuntimeError if there is a problem
    make.check_and_update_source_repo_paths(source_path)
    build_workspace_isolated(
        workspace=base_path,
        sourcespace=source_path,
        buildspace=build_path,
        develspace=devel_path,
        installspace=install_path,
        merge=args.merge,
        install=args.install,
        force_cmake=args.force_cmake,
        build_packages=args.packages,
        quiet=args.quiet,
        cmake_args=args.cmake_args,
        make_args=args.make_args
    )
    # this is a really fugly way of building a specific target after all else has been built
    # (and rebuilt), usually this is enough as the check of already built packages is quick
    if args.target:
        env = os.environ.copy()
        cmd = ['make', args.target]
        make_paths = []
        if args.packages:
            for package in args.packages:
                # It's an isolated build, so packages are listed under the build path as a flat list (not fully traceable dirs like in catkin_make)
                # make_path = os.path.join(make_path, packages_by_name[package])
                make_paths.append(os.path.join(build_path, package))
        else:
            for (unused_path, package) in topological_order_packages(packages):
	        # 3rd party builds put make targets under an install directory
	        # catkin package builds put make targets under the package name
	        # why? no bloody idea, but just detect what is what here
	        third_party_build_path = os.path.join(build_path, package.name, 'install')
	        catkin_build_path = os.path.join(build_path, package.name)
	        package_build_path = third_party_build_path if os.path.exists(third_party_build_path) else catkin_build_path
                make_paths.append(package_build_path)
        for make_path in make_paths:
            builder.print_command_banner(cmd, make_path, color=not args.no_color)
            if args.no_color:
                builder.run_command(cmd, make_path, env=env)
            else:
                builder.run_command_colorized(cmd, make_path, env=env)
Пример #28
0
def test_workspace(
    context,
    packages=None,
    n_jobs=None,
    quiet=False,
    interleave_output=False,
    no_status=False,
    limit_status_rate=10.0,
    no_notify=False,
    continue_on_failure=False,
    summarize_build=False,
    catkin_test_target='run_tests',
    cmake_test_target='test',
):
    """Tests a catkin workspace

    :param context: context in which to test the catkin workspace
    :type context: :py:class:`catkin_tools.context.Context`
    :param packages: list of packages to test
    :type packages: list
    :param n_jobs: number of parallel package test jobs
    :type n_jobs: int
    :param quiet: suppresses verbose build or test information
    :type quiet: bool
    :param interleave_output: prints the output of commands as they are received
    :type interleave_output: bool
    :param no_status: suppresses the bottom status line
    :type no_status: bool
    :param limit_status_rate: rate to which status updates are limited; the default 0, places no limit.
    :type limit_status_rate: float
    :param no_notify: suppresses system notifications
    :type no_notify: bool
    :param continue_on_failure: do not stop testing other packages on error
    :type continue_on_failure: bool
    :param summarize_build: summarizes the build at the end
    :type summarize_build: bool
    :param catkin_test_target: make target for tests in catkin packages
    :type catkin_test_target: str
    :param cmake_test_target: make target for tests in cmake packages
    :type cmake_test_target: str
    """
    pre_start_time = time.time()

    # Assert that the limit_status_rate is valid
    if limit_status_rate < 0:
        sys.exit(
            "[test] @!@{rf}Error:@| The value of --limit-status-rate must be greater than or equal to zero."
        )

    # Get all the packages in the context source space
    # Suppress warnings since this is a utility function
    try:
        workspace_packages = find_packages(context.source_space_abs,
                                           exclude_subspaces=True,
                                           warnings=[])
    except InvalidPackage as ex:
        sys.exit(
            clr("@{rf}Error:@| The file {} is an invalid package.xml file."
                " See below for details:\n\n{}").format(
                    ex.package_path, ex.msg))

    # Get all build type plugins
    test_job_creators = {
        ep.name: ep.load()['create_test_job']
        for ep in pkg_resources.iter_entry_points(group='catkin_tools.jobs')
    }

    # It's a problem if there aren't any build types available
    if len(test_job_creators) == 0:
        sys.exit(
            'Error: No build types available. Please check your catkin_tools installation.'
        )

    # Get list of packages to test
    ordered_packages = topological_order_packages(workspace_packages)

    # Check if topological_order_packages determined any circular dependencies, if so print an error and fail.
    # If this is the case, the last entry of ordered packages is a tuple that starts with nil.
    if ordered_packages and ordered_packages[-1][0] is None:
        guilty_packages = ", ".join(ordered_packages[-1][1:])
        sys.exit(
            "[test] Circular dependency detected in the following packages: {}"
            .format(guilty_packages))

    workspace_packages = dict([(pkg.name, (path, pkg))
                               for path, pkg in ordered_packages])
    packages_to_test = []
    if packages:
        for package in packages:
            if package not in workspace_packages:
                # Try whether package is a pattern and matches
                glob_packages = expand_glob_package(package,
                                                    workspace_packages)
                if len(glob_packages) > 0:
                    packages.extend(glob_packages)
                else:
                    sys.exit(
                        "[test] Given packages '{}' is not in the workspace "
                        "and pattern does not match any package".format(
                            package))
        for pkg_path, package in ordered_packages:
            if package.name in packages:
                packages_to_test.append((pkg_path, package))
    else:
        # Only use buildlist when no other packages are specified
        if len(context.buildlist) > 0:
            # Expand glob patterns in buildlist
            buildlist = []
            for buildlisted_package in context.buildlist:
                buildlist.extend(
                    expand_glob_package(buildlisted_package,
                                        workspace_packages))
            packages_to_test = [
                p for p in ordered_packages if (p[1].name in buildlist)
            ]
        else:
            packages_to_test = ordered_packages

    # Filter packages on skiplist
    if len(context.skiplist) > 0:
        # Expand glob patterns in skiplist
        skiplist = []
        for skiplisted_package in context.skiplist:
            skiplist.extend(
                expand_glob_package(skiplisted_package, workspace_packages))
        # Apply skiplist to packages and dependencies
        packages_to_test = [
            (path, pkg) for path, pkg in packages_to_test
            if (pkg.name not in skiplist or pkg.name in packages)
        ]

    # Check if all packages to test are already built
    built_packages = set([
        pkg.name
        for (path, pkg) in find_packages(context.package_metadata_path(),
                                         warnings=[]).items()
    ])

    packages_to_test_names = set(pkg.name for path, pkg in packages_to_test)
    if not built_packages.issuperset(packages_to_test_names):
        wide_log(
            clr("@{rf}Error: Packages have to be built before they can be tested.@|"
                ))
        wide_log(clr("The following requested packages are not built yet:"))
        for package_name in packages_to_test_names.difference(built_packages):
            wide_log(' - ' + package_name)
        sys.exit(1)

    # Construct jobs
    jobs = []
    for pkg_path, pkg in packages_to_test:
        # Determine the job parameters
        test_job_kwargs = dict(context=context,
                               package=pkg,
                               package_path=pkg_path,
                               verbose=not quiet)

        # Create the job based on the build type
        build_type = pkg.get_build_type()

        if build_type == 'catkin':
            test_job_kwargs['test_target'] = catkin_test_target
        elif build_type == 'cmake':
            test_job_kwargs['test_target'] = cmake_test_target

        if build_type in test_job_creators:
            jobs.append(test_job_creators[build_type](**test_job_kwargs))

    # Queue for communicating status
    event_queue = Queue()

    # Initialize job server
    job_server.initialize(
        max_jobs=n_jobs,
        max_load=None,
        gnu_make_enabled=context.use_internal_make_jobserver,
    )

    try:
        # Spin up status output thread
        status_thread = ConsoleStatusController(
            'test',
            ['package', 'packages'],
            jobs,
            n_jobs,
            [pkg.name for path, pkg in packages_to_test],
            [p for p in context.buildlist],
            [p for p in context.skiplist],
            event_queue,
            show_notifications=not no_notify,
            show_active_status=not no_status,
            show_buffered_stdout=not interleave_output,
            show_buffered_stderr=not interleave_output,
            show_live_stdout=interleave_output,
            show_live_stderr=interleave_output,
            show_full_summary=summarize_build,
            show_stage_events=not quiet,
            pre_start_time=pre_start_time,
            active_status_rate=limit_status_rate,
        )

        status_thread.start()

        locks = {}

        # Block while running N jobs asynchronously
        try:
            all_succeeded = run_until_complete(
                execute_jobs('test',
                             jobs,
                             locks,
                             event_queue,
                             context.log_space_abs,
                             max_toplevel_jobs=n_jobs,
                             continue_on_failure=continue_on_failure,
                             continue_without_deps=False))
        except Exception:
            status_thread.keep_running = False
            all_succeeded = False
            status_thread.join(1.0)
            wide_log(str(traceback.format_exc()))

        status_thread.join(1.0)

        if all_succeeded:
            return 0
        else:
            return 1

    except KeyboardInterrupt:
        wide_log("[test] Interrupted by user!")
        event_queue.put(None)

        return 130
Пример #29
0
ARGS   = PARSER.parse_args()


# -----------------------------------------------------------------------------
# Import catkin_pkg
# -----------------------------------------------------------------------------
try:
    from catkin_pkg.packages import find_packages
    from catkin_pkg.topological_order import topological_order_packages
except ImportError as exception:
    raise ImportError('Please adjust your PYTHONPATH before running this test: %s' % str(exception))


# -----------------------------------------------------------------------------
# Perform benchmark
# -----------------------------------------------------------------------------
for run in range(1, 3):
    START_TIME = time.time()
    WORKSPACE_PACKAGES = find_packages(ARGS.source, exclude_subspaces=True, warnings=[])
    print("Run %d: Found %d packages in %.1f s" % (run, len(WORKSPACE_PACKAGES),
                                                   time.time() - START_TIME))

    START_TIME = time.time()
    ORDERED_PACKAGES = topological_order_packages(WORKSPACE_PACKAGES)
    print("Run %d: Topological order took %.1f s" % (run, time.time() - START_TIME))


# -----------------------------------------------------------------------------
# EOF
# -----------------------------------------------------------------------------
Пример #30
0
def _create_unmerged_devel_setup(context):
    # Find all of the leaf packages in the workspace
    # where leaf means that nothing in the workspace depends on it
    workspace_packages = find_packages(context.source_space, exclude_subspaces=True)
    ordered_packages = topological_order_packages(workspace_packages)
    workspace_packages = dict([(p.name, p) for pth, p in workspace_packages.items()])
    dependencies = set([])
    for name, pkg in workspace_packages.items():
        dependencies.update([d.name for d in pkg.buildtool_depends + pkg.build_depends + pkg.run_depends])
    leaf_packages = []
    for name, pkg in workspace_packages.items():
        if pkg.name not in dependencies:
            leaf_packages.append(pkg.name)
    assert leaf_packages, leaf_packages  # Defensive, there should always be at least one leaf
    leaf_sources = []
    for pkg_name in leaf_packages:
        source_path = os.path.join(context.devel_space, pkg_name, "setup.sh")
        if os.path.isfile(source_path):
            leaf_sources.append(". {0}".format(source_path))
    # In addition to the leaf packages, we need to source the recursive run depends of the leaf packages
    run_depends = get_recursive_run_depends_in_workspace(
        [workspace_packages[p] for p in leaf_packages], ordered_packages
    )
    run_depends_sources = []
    for run_dep_name in [p.name for pth, p in run_depends]:
        source_path = os.path.join(context.devel_space, run_dep_name, "setup.sh")
        if os.path.isfile(source_path):
            run_depends_sources.append(". {0}".format(source_path))
    # Create the setup.sh file
    setup_sh_path = os.path.join(context.devel_space, "setup.sh")
    env_file = """\
#!/usr/bin/env sh
# generated from within catkin_tools/verbs/catkin_build/build.py

# This file is aggregates the many setup.sh files in the various
# unmerged devel spaces in this folder.
# This is occomplished by sourcing each leaf package and all the
# recursive run dependencies of those leaf packages

# Source the first package's setup.sh without the --extend option
{first_source}

# remove all passed in args, resetting $@, $*, $#, $n
shift $#
# set the --extend arg for rest of the packages setup.sh's
set -- $@ "--extend"
# source setup.sh for each of the leaf packages in the workspace
{leaf_sources}

# And now the setup.sh for each of their recursive run dependencies
{run_depends_sources}
""".format(
        first_source=leaf_sources[0],
        leaf_sources="\n".join(leaf_sources[1:]),
        run_depends_sources="\n".join(run_depends_sources),
    )
    with open(setup_sh_path, "w") as f:
        f.write(env_file)
    # Make this file executable
    os.chmod(setup_sh_path, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)
    # Create the setup.bash file
    setup_bash_path = os.path.join(context.devel_space, "setup.bash")
    with open(setup_bash_path, "w") as f:
        f.write(
            """\
#!/usr/bin/env bash
# generated from within catkin_tools/verbs/catkin_build/build.py

CATKIN_SHELL=bash

# source setup.sh from same directory as this file
_BUILD_SETUP_DIR=$(builtin cd "`dirname "${BASH_SOURCE[0]}"`" && pwd)
. "$_BUILD_SETUP_DIR/setup.sh"
"""
        )
    # Make this file executable
    os.chmod(setup_bash_path, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)
    setup_zsh_path = os.path.join(context.devel_space, "setup.zsh")
    with open(setup_zsh_path, "w") as f:
        f.write(
            """\
#!/usr/bin/env zsh
# generated from within catkin_tools/verbs/catkin_build/build.py

CATKIN_SHELL=zsh

# source setup.sh from same directory as this file
_BUILD_SETUP_DIR=$(builtin cd -q "`dirname "$0"`" && pwd)
emulate sh # emulate POSIX
. "$_BUILD_SETUP_DIR/setup.sh"
emulate zsh # back to zsh mode
"""
        )
    # Make this file executable
    os.chmod(setup_zsh_path, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)
Пример #31
0
def main(opts):

    # Load the context
    ctx = Context.load(opts.workspace, opts.profile, load_env=False)

    if not ctx:
        sys.exit(clr("@{rf}ERROR: Could not determine workspace.@|"),
                 file=sys.stderr)

    if opts.directory:
        folders = opts.directory
    else:
        folders = [ctx.source_space_abs]

    list_entry_format = '@{pf}-@| @{cf}%s@|' if not opts.unformatted else '%s'

    opts.depends_on = set(opts.depends_on) if opts.depends_on else set()
    warnings = []
    for folder in folders:
        try:
            packages = find_packages(folder, warnings=warnings)
            ordered_packages = topological_order_packages(packages)
            if ordered_packages and ordered_packages[-1][0] is None:
                sys.exit(
                    clr("@{rf}ERROR: Circular dependency within packages:@| " +
                        ordered_packages[-1][1]),
                    file=sys.stderr)
            packages_by_name = {
                pkg.name: (pth, pkg)
                for pth, pkg in ordered_packages
            }

            if opts.depends_on or opts.rdepends_on:

                dependents = set()

                for pth, pkg in ordered_packages:
                    is_dep = opts.depends_on.intersection(
                        [p.name for p in pkg.build_depends + pkg.run_depends])
                    if is_dep:
                        dependents.add(pkg.name)

                for pth, pkg in [
                        packages_by_name.get(n) for n in opts.rdepends_on
                ]:
                    if pkg is None:
                        continue
                    rbd = get_recursive_build_dependents_in_workspace(
                        pkg.name, ordered_packages)
                    rrd = get_recursive_run_dependents_in_workspace(
                        pkg.name, ordered_packages)
                    dependents.update([p.name for _, p in rbd])
                    dependents.update([p.name for _, p in rrd])

                filtered_packages = [(pth, pkg)
                                     for pth, pkg in ordered_packages
                                     if pkg.name in dependents]
            elif opts.this:
                this_package = find_enclosing_package(
                    search_start_path=getcwd(),
                    ws_path=ctx.workspace,
                    warnings=[])
                if this_package is None:
                    sys.exit(1)
                if this_package in packages_by_name:
                    filtered_packages = [packages_by_name[this_package]]
                else:
                    filtered_packages = []
            else:
                filtered_packages = ordered_packages

            for pkg_pth, pkg in filtered_packages:
                print(clr(list_entry_format % pkg.name))
                if opts.rdeps:
                    build_deps = [
                        p
                        for dp, p in get_recursive_build_depends_in_workspace(
                            pkg, ordered_packages)
                    ]
                    run_deps = [
                        p for dp, p in get_recursive_run_depends_in_workspace(
                            [pkg], ordered_packages)
                    ]
                else:
                    build_deps = [
                        dep for dep in pkg.build_depends
                        if dep.evaluated_condition
                    ]
                    run_deps = [
                        dep for dep in pkg.run_depends
                        if dep.evaluated_condition
                    ]

                if opts.deps or opts.rdeps:
                    if len(build_deps) > 0:
                        print(clr('  @{yf}build_depend:@|'))
                        for dep in build_deps:
                            print(clr('  @{pf}-@| %s' % dep.name))
                    if len(run_deps) > 0:
                        print(clr('  @{yf}run_depend:@|'))
                        for dep in run_deps:
                            print(clr('  @{pf}-@| %s' % dep.name))
        except InvalidPackage as ex:
            sys.exit(
                clr("@{rf}Error:@| The file %s is an invalid package.xml file."
                    " See below for details:\n\n%s" %
                    (ex.package_path, ex.msg)))

    # Print out warnings
    if not opts.quiet:
        for warning in warnings:
            print(clr("@{yf}Warning:@| %s" % warning), file=sys.stderr)
Пример #32
0
def main(opts):

    # Load the context
    ctx = Context.load(opts.workspace, opts.profile, load_env=False)

    if not ctx:
        print(clr("@{rf}ERROR: Could not determine workspace.@|"), file=sys.stderr)
        sys.exit(1)

    folders = [ctx.source_space_abs]

    list_entry_format = '@{pf}-@| @{cf}%s@|' if not opts.unformatted else '%s'

    opts.depends_on = set(opts.depends_on) if opts.depends_on else set()
    warnings = []
    for folder in folders:
        try:
            packages = find_packages(folder, warnings=warnings)
            ordered_packages = topological_order_packages(packages)
            packages_by_name = {pkg.name: (pth, pkg) for pth, pkg in ordered_packages}

            if opts.depends_on or opts.rdepends_on:

                dependents = set()

                for pth, pkg in ordered_packages:
                    is_dep = opts.depends_on.intersection([
                        p.name for p in pkg.build_depends + pkg.run_depends])
                    if is_dep:
                        dependents.add(pkg.name)

                for pth, pkg in [packages_by_name.get(n) for n in opts.rdepends_on]:
                    if pkg is None:
                        continue
                    rbd = get_recursive_build_dependents_in_workspace(pkg.name, ordered_packages)
                    rrd = get_recursive_run_dependents_in_workspace(pkg.name, ordered_packages)
                    dependents.update([p.name for _, p in rbd])
                    dependents.update([p.name for _, p in rrd])

                filtered_packages = [
                    (pth, pkg)
                    for pth, pkg in ordered_packages
                    if pkg.name in dependents]
            elif opts.this:
                this_package = find_enclosing_package(
                    search_start_path=getcwd(),
                    ws_path=ctx.workspace,
                    warnings=[])
                if this_package is None:
                    sys.exit(1)
                if this_package in packages_by_name:
                    filtered_packages = [packages_by_name[this_package]]
                else:
                    filtered_packages = []
            else:
                filtered_packages = ordered_packages

            for pkg_pth, pkg in filtered_packages:
                print(clr(list_entry_format % pkg.name))
                if opts.rdeps:
                    build_deps = [p for dp, p in get_recursive_build_depends_in_workspace(pkg, ordered_packages)]
                    run_deps = [p for dp, p in get_recursive_run_depends_in_workspace([pkg], ordered_packages)]
                else:
                    build_deps = pkg.build_depends
                    run_deps = pkg.run_depends

                if opts.deps or opts.rdeps:
                    if len(build_deps) > 0:
                        print(clr('  @{yf}build_depend:@|'))
                        for dep in build_deps:
                            print(clr('  @{pf}-@| %s' % dep.name))
                    if len(run_deps) > 0:
                        print(clr('  @{yf}run_depend:@|'))
                        for dep in run_deps:
                            print(clr('  @{pf}-@| %s' % dep.name))
        except InvalidPackage as ex:
            message = '\n'.join(ex.args)
            print(clr("@{rf}Error:@| The directory %s contains an invalid package."
                      " See below for details:\n\n%s" % (folder, message)))

    # Print out warnings
    if not opts.quiet:
        for warning in warnings:
            print(clr("@{yf}Warning:@| %s" % warning), file=sys.stderr)
Пример #33
0
def _create_unmerged_devel_setup(context):
    # Find all of the leaf packages in the workspace
    # where leaf means that nothing in the workspace depends on it
    workspace_packages = find_packages(context.source_space_abs,
                                       exclude_subspaces=True)
    ordered_packages = topological_order_packages(workspace_packages)
    workspace_packages = dict([(p.name, p)
                               for pth, p in workspace_packages.items()])
    dependencies = set([])
    for name, pkg in workspace_packages.items():
        dependencies.update([
            d.name for d in pkg.buildtool_depends + pkg.build_depends +
            pkg.run_depends
        ])
    leaf_packages = []
    for name, pkg in workspace_packages.items():
        if pkg.name not in dependencies:
            leaf_packages.append(pkg.name)
    assert leaf_packages, leaf_packages  # Defensive, there should always be at least one leaf
    leaf_sources = []
    for pkg_name in leaf_packages:
        source_path = os.path.join(context.devel_space_abs, pkg_name,
                                   'setup.sh')
        if os.path.isfile(source_path):
            leaf_sources.append('. {0}'.format(source_path))
    # In addition to the leaf packages, we need to source the recursive run depends of the leaf packages
    run_depends = get_recursive_run_depends_in_workspace(
        [workspace_packages[p] for p in leaf_packages], ordered_packages)
    run_depends_sources = []
    for run_dep_name in [p.name for pth, p in run_depends]:
        source_path = os.path.join(context.devel_space_abs, run_dep_name,
                                   'setup.sh')
        if os.path.isfile(source_path):
            run_depends_sources.append('. {0}'.format(source_path))
    # Create the setup.sh file
    setup_sh_path = os.path.join(context.devel_space_abs, 'setup.sh')
    env_file = """\
#!/usr/bin/env sh
# generated from within catkin_tools/verbs/catkin_build/build.py

# This file is aggregates the many setup.sh files in the various
# unmerged devel spaces in this folder.
# This is occomplished by sourcing each leaf package and all the
# recursive run dependencies of those leaf packages

# Source the first package's setup.sh without the --extend option
{first_source}

# remove all passed in args, resetting $@, $*, $#, $n
shift $#
# set the --extend arg for rest of the packages setup.sh's
set -- $@ "--extend"
# source setup.sh for each of the leaf packages in the workspace
{leaf_sources}

# And now the setup.sh for each of their recursive run dependencies
{run_depends_sources}
""".format(first_source=leaf_sources[0],
           leaf_sources='\n'.join(leaf_sources[1:]),
           run_depends_sources='\n'.join(run_depends_sources))
    with open(setup_sh_path, 'w') as f:
        f.write(env_file)
    # Make this file executable
    os.chmod(setup_sh_path, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)
    # Create the setup.bash file
    setup_bash_path = os.path.join(context.devel_space_abs, 'setup.bash')
    with open(setup_bash_path, 'w') as f:
        f.write("""\
#!/usr/bin/env bash
# generated from within catkin_tools/verbs/catkin_build/build.py

CATKIN_SHELL=bash

# source setup.sh from same directory as this file
_BUILD_SETUP_DIR=$(builtin cd "`dirname "${BASH_SOURCE[0]}"`" && pwd)
. "$_BUILD_SETUP_DIR/setup.sh"
""")
    # Make this file executable
    os.chmod(setup_bash_path, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)
    setup_zsh_path = os.path.join(context.devel_space_abs, 'setup.zsh')
    with open(setup_zsh_path, 'w') as f:
        f.write("""\
#!/usr/bin/env zsh
# generated from within catkin_tools/verbs/catkin_build/build.py

CATKIN_SHELL=zsh

# source setup.sh from same directory as this file
_BUILD_SETUP_DIR=$(builtin cd -q "`dirname "$0"`" && pwd)
emulate sh # emulate POSIX
. "$_BUILD_SETUP_DIR/setup.sh"
emulate zsh # back to zsh mode
""")
    # Make this file executable
    os.chmod(setup_zsh_path, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)
Пример #34
0
def build_isolated_workspace(
    context,
    packages=None,
    start_with=None,
    no_deps=False,
    unbuilt=False,
    n_jobs=None,
    force_cmake=False,
    pre_clean=False,
    force_color=False,
    quiet=False,
    interleave_output=False,
    no_status=False,
    limit_status_rate=10.0,
    lock_install=False,
    no_notify=False,
    continue_on_failure=False,
    summarize_build=None,
):
    """Builds a catkin workspace in isolation

    This function will find all of the packages in the source space, start some
    executors, feed them packages to build based on dependencies and topological
    ordering, and then monitor the output of the executors, handling loggings of
    the builds, starting builds, failing builds, and finishing builds of
    packages, and handling the shutdown of the executors when appropriate.

    :param context: context in which to build the catkin workspace
    :type context: :py:class:`catkin_tools.verbs.catkin_build.context.Context`
    :param packages: list of packages to build, by default their dependencies will also be built
    :type packages: list
    :param start_with: package to start with, skipping all packages which proceed it in the topological order
    :type start_with: str
    :param no_deps: If True, the dependencies of packages will not be built first
    :type no_deps: bool
    :param n_jobs: number of parallel package build n_jobs
    :type n_jobs: int
    :param force_cmake: forces invocation of CMake if True, default is False
    :type force_cmake: bool
    :param force_color: forces colored output even if terminal does not support it
    :type force_color: bool
    :param quiet: suppresses the output of commands unless there is an error
    :type quiet: bool
    :param interleave_output: prints the output of commands as they are received
    :type interleave_output: bool
    :param no_status: disables status bar
    :type no_status: bool
    :param limit_status_rate: rate to which status updates are limited; the default 0, places no limit.
    :type limit_status_rate: float
    :param lock_install: causes executors to synchronize on access of install commands
    :type lock_install: bool
    :param no_notify: suppresses system notifications
    :type no_notify: bool
    :param continue_on_failure: do not stop building other jobs on error
    :type continue_on_failure: bool
    :param summarize_build: if True summarizes the build at the end, if None and continue_on_failure is True and the
        the build fails, then the build will be summarized, but if False it never will be summarized.
    :type summarize_build: bool

    :raises: SystemExit if buildspace is a file or no packages were found in the source space
        or if the provided options are invalid
    """
    pre_start_time = time.time()

    # Assert that the limit_status_rate is valid
    if limit_status_rate < 0:
        sys.exit("[build] @!@{rf}Error:@| The value of --status-rate must be greater than or equal to zero.")

    # Declare a buildspace marker describing the build config for error checking
    buildspace_marker_data = {
        'workspace': context.workspace,
        'profile': context.profile,
        'install': context.install,
        'install_space': context.install_space_abs,
        'devel_space': context.devel_space_abs,
        'source_space': context.source_space_abs}

    # Check build config
    if os.path.exists(os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE)):
        with open(os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE)) as buildspace_marker_file:
            existing_buildspace_marker_data = yaml.load(buildspace_marker_file)
            misconfig_lines = ''
            for (k, v) in existing_buildspace_marker_data.items():
                new_v = buildspace_marker_data.get(k, None)
                if new_v != v:
                    misconfig_lines += (
                        '\n - %s: %s (stored) is not %s (commanded)' %
                        (k, v, new_v))
            if len(misconfig_lines) > 0:
                sys.exit(clr(
                    "\n@{rf}Error:@| Attempting to build a catkin workspace using build space: "
                    "\"%s\" but that build space's most recent configuration "
                    "differs from the commanded one in ways which will cause "
                    "problems. Fix the following options or use @{yf}`catkin "
                    "clean -b`@| to remove the build space: %s" %
                    (context.build_space_abs, misconfig_lines)))

    # Summarize the context
    summary_notes = []
    if force_cmake:
        summary_notes += [clr("@!@{cf}NOTE:@| Forcing CMake to run for each package.")]
    log(context.summary(summary_notes))

    # Make sure there is a build folder and it is not a file
    if os.path.exists(context.build_space_abs):
        if os.path.isfile(context.build_space_abs):
            sys.exit(clr(
                "[build] @{rf}Error:@| Build space '{0}' exists but is a file and not a folder."
                .format(context.build_space_abs)))
    # If it dosen't exist, create it
    else:
        log("[build] Creating build space: '{0}'".format(context.build_space_abs))
        os.makedirs(context.build_space_abs)

    # Write the current build config for config error checking
    with open(os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE), 'w') as buildspace_marker_file:
        buildspace_marker_file.write(yaml.dump(buildspace_marker_data, default_flow_style=False))

    # Get all the packages in the context source space
    # Suppress warnings since this is a utility function
    workspace_packages = find_packages(context.source_space_abs, exclude_subspaces=True, warnings=[])

    # Get packages which have not been built yet
    unbuilt_pkgs = get_unbuilt_packages(context, workspace_packages)

    # Handle unbuilt packages
    if unbuilt:
        # Check if there are any unbuilt
        if len(unbuilt_pkgs) > 0:
            # Add the unbuilt packages
            packages.extend(list(unbuilt_pkgs))
        else:
            log("[build] No unbuilt packages to be built.")
            return

    # If no_deps is given, ensure packages to build are provided
    if no_deps and packages is None:
        log(clr("[build] @!@{rf}Error:@| With no_deps, you must specify packages to build."))
        return

    # Find list of packages in the workspace
    packages_to_be_built, packages_to_be_built_deps, all_packages = determine_packages_to_be_built(
        packages, context, workspace_packages)

    if not no_deps:
        # Extend packages to be built to include their deps
        packages_to_be_built.extend(packages_to_be_built_deps)

    # Also re-sort
    try:
        packages_to_be_built = topological_order_packages(dict(packages_to_be_built))
    except AttributeError:
        log(clr("[build] @!@{rf}Error:@| The workspace packages have a circular "
                "dependency, and cannot be built. Please run `catkin list "
                "--deps` to determine the problematic package(s)."))
        return

    # Check the number of packages to be built
    if len(packages_to_be_built) == 0:
        log(clr('[build] No packages to be built.'))
        return

    # Assert start_with package is in the workspace
    verify_start_with_option(
        start_with,
        packages,
        all_packages,
        packages_to_be_built + packages_to_be_built_deps)

    # Populate .catkin file if we're not installing
    # NOTE: This is done to avoid the Catkin CMake code from doing it,
    # which isn't parallel-safe. Catkin CMake only modifies this file if
    # it's package source path isn't found.
    if not context.install:
        dot_catkin_file_path = os.path.join(context.devel_space_abs, '.catkin')
        # If the file exists, get the current paths
        if os.path.exists(dot_catkin_file_path):
            dot_catkin_paths = open(dot_catkin_file_path, 'r').read().split(';')
        else:
            dot_catkin_paths = []

        # Update the list with the new packages (in topological order)
        packages_to_be_built_paths = [
            os.path.join(context.source_space_abs, path)
            for path, pkg in packages_to_be_built
        ]

        new_dot_catkin_paths = [
            os.path.join(context.source_space_abs, path)
            for path in [os.path.join(context.source_space_abs, path) for path, pkg in all_packages]
            if path in dot_catkin_paths or path in packages_to_be_built_paths
        ]

        # Write the new file if it's different, otherwise, leave it alone
        if dot_catkin_paths == new_dot_catkin_paths:
            wide_log("[build] Package table is up to date.")
        else:
            wide_log("[build] Updating package table.")
            open(dot_catkin_file_path, 'w').write(';'.join(new_dot_catkin_paths))

    # Remove packages before start_with
    if start_with is not None:
        for path, pkg in list(packages_to_be_built):
            if pkg.name != start_with:
                wide_log(clr("@!@{pf}Skipping@| @{gf}---@| @{cf}{}@|").format(pkg.name))
                packages_to_be_built.pop(0)
            else:
                break

    # Get the names of all packages to be built
    packages_to_be_built_names = [p.name for _, p in packages_to_be_built]
    packages_to_be_built_deps_names = [p.name for _, p in packages_to_be_built_deps]

    # Generate prebuild jobs, if necessary
    prebuild_jobs = {}
    setup_util_exists = os.path.exists(os.path.join(context.devel_space_abs, '_setup_util.py'))
    if context.link_devel and (not setup_util_exists or (force_cmake and len(packages) == 0)):
        wide_log('[build] Preparing linked develspace...')

        pkg_dict = dict([(pkg.name, (pth, pkg)) for pth, pkg in all_packages])

        if 'catkin' in packages_to_be_built_names + packages_to_be_built_deps_names:
            # Use catkin as the prebuild package
            prebuild_pkg_path, prebuild_pkg = pkg_dict['catkin']
        else:
            # Generate explicit prebuild package
            prebuild_pkg_path = generate_prebuild_package(context.build_space_abs, context.devel_space_abs, force_cmake)
            prebuild_pkg = parse_package(prebuild_pkg_path)

        # Create the prebuild job
        prebuild_job = create_catkin_build_job(
            context,
            prebuild_pkg,
            prebuild_pkg_path,
            dependencies=[],
            force_cmake=force_cmake,
            pre_clean=pre_clean,
            prebuild=True)

        # Add the prebuld job
        prebuild_jobs[prebuild_job.jid] = prebuild_job

    # Remove prebuild jobs from normal job list
    for prebuild_jid, prebuild_job in prebuild_jobs.items():
        if prebuild_jid in packages_to_be_built_names:
            packages_to_be_built_names.remove(prebuild_jid)

    # Initial jobs list is just the prebuild jobs
    jobs = [] + list(prebuild_jobs.values())

    # Get all build type plugins
    build_job_creators = {
        ep.name: ep.load()['create_build_job']
        for ep in pkg_resources.iter_entry_points(group='catkin_tools.jobs')
    }

    # It's a problem if there aren't any build types available
    if len(build_job_creators) == 0:
        sys.exit('Error: No build types availalbe. Please check your catkin_tools installation.')

    # Construct jobs
    for pkg_path, pkg in all_packages:
        if pkg.name not in packages_to_be_built_names:
            continue

        # Ignore metapackages
        if 'metapackage' in [e.tagname for e in pkg.exports]:
            continue

        # Get actual execution deps
        deps = [
            p.name for _, p
            in get_cached_recursive_build_depends_in_workspace(pkg, packages_to_be_built)
            if p.name not in prebuild_jobs
        ]

        # All jobs depend on the prebuild job if it's defined
        for j in prebuild_jobs.values():
            deps.append(j.jid)

        # Determine the job parameters
        build_job_kwargs = dict(
            context=context,
            package=pkg,
            package_path=pkg_path,
            dependencies=deps,
            force_cmake=force_cmake,
            pre_clean=pre_clean)

        # Create the job based on the build type
        build_type = get_build_type(pkg)

        if build_type in build_job_creators:
            jobs.append(build_job_creators[build_type](**build_job_kwargs))
        else:
            wide_log(clr(
                "[build] @!@{yf}Warning:@| Skipping package `{}` because it "
                "has an unsupported package build type: `{}`"
            ).format(pkg.name, build_type))

            wide_log(clr("[build] Note: Available build types:"))
            for bt_name in build_job_creators.keys():
                wide_log(clr("[build]  - `{}`".format(bt_name)))

    # Queue for communicating status
    event_queue = Queue()

    try:
        # Spin up status output thread
        status_thread = ConsoleStatusController(
            'build',
            ['package', 'packages'],
            jobs,
            n_jobs,
            [pkg.name for _, pkg in context.packages],
            [p for p in context.whitelist],
            [p for p in context.blacklist],
            event_queue,
            show_notifications=not no_notify,
            show_active_status=not no_status,
            show_buffered_stdout=not quiet and not interleave_output,
            show_buffered_stderr=not interleave_output,
            show_live_stdout=interleave_output,
            show_live_stderr=interleave_output,
            show_stage_events=not quiet,
            show_full_summary=(summarize_build is True),
            pre_start_time=pre_start_time,
            active_status_rate=limit_status_rate)
        status_thread.start()

        # Initialize locks
        locks = {
            'installspace': asyncio.Lock() if lock_install else FakeLock()
        }

        # Block while running N jobs asynchronously
        try:
            all_succeeded = run_until_complete(execute_jobs(
                'build',
                jobs,
                locks,
                event_queue,
                os.path.join(context.build_space_abs, '_logs'),
                max_toplevel_jobs=n_jobs,
                continue_on_failure=continue_on_failure,
                continue_without_deps=False))
        except Exception:
            status_thread.keep_running = False
            all_succeeded = False
            status_thread.join(1.0)
            wide_log(str(traceback.format_exc()))

        status_thread.join(1.0)

        # Warn user about new packages
        now_unbuilt_pkgs = get_unbuilt_packages(context, workspace_packages)
        new_pkgs = [p for p in unbuilt_pkgs if p not in now_unbuilt_pkgs]
        if len(new_pkgs) > 0:
            log(clr("[build] @/@!Note:@| @/Workspace packages have changed, "
                    "please re-source setup files to use them.@|"))

        if all_succeeded:
            # Create isolated devel setup if necessary
            if context.isolate_devel:
                if not context.install:
                    _create_unmerged_devel_setup(context, now_unbuilt_pkgs)
                else:
                    _create_unmerged_devel_setup_for_install(context)
            return 0
        else:
            return 1

    except KeyboardInterrupt:
        wide_log("[build] Interrupted by user!")
        event_queue.put(None)
Пример #35
0
def build_isolated_workspace(context,
                             packages=None,
                             start_with=None,
                             no_deps=False,
                             jobs=None,
                             force_cmake=False,
                             force_color=False,
                             quiet=False,
                             interleave_output=False,
                             no_status=False,
                             lock_install=False,
                             no_notify=False):
    """Builds a catkin workspace in isolation

    This function will find all of the packages in the source space, start some
    executors, feed them packages to build based on dependencies and topological
    ordering, and then monitor the output of the executors, handling loggings of
    the builds, starting builds, failing builds, and finishing builds of
    packages, and handling the shutdown of the executors when appropriate.

    :param context: context in which to build the catkin workspace
    :type context: :py:class:`catkin_tools.verbs.catkin_build.context.Context`
    :param packages: list of packages to build, by default their dependencies will also be built
    :type packages: list
    :param start_with: package to start with, skipping all packages which proceed it in the topological order
    :type start_with: str
    :param no_deps: If True, the dependencies of packages will not be built first
    :type no_deps: bool
    :param jobs: number of parallel package build jobs
    :type jobs: int
    :param force_cmake: forces invocation of CMake if True, default is False
    :type force_cmake: bool
    :param force_color: forces colored output even if terminal does not support it
    :type force_color: bool
    :param quiet: suppresses the output of commands unless there is an error
    :type quiet: bool
    :param interleave_output: prints the output of commands as they are received
    :type interleave_output: bool
    :param no_status: disables status bar
    :type no_status: bool
    :param lock_install: causes executors to synchronize on access of install commands
    :type lock_install: bool
    :param no_notify: suppresses system notifications
    :type no_notify: bool

    :raises: SystemExit if buildspace is a file or no packages were found in the source space
        or if the provided options are invalid
    """
    # If no_deps is given, ensure packages to build are provided
    if no_deps and packages is None:
        sys.exit("With --no-deps, you must specify packages to build.")
    # Make sure there is a build folder and it is not a file
    if os.path.exists(context.build_space_abs):
        if os.path.isfile(context.build_space_abs):
            sys.exit(
                clr("@{rf}Error:@| Build space '{0}' exists but is a file and not a folder."
                    .format(context.build_space_abs)))
    # If it dosen't exist, create it
    else:
        log("Creating build space directory, '{0}'".format(
            context.build_space_abs))
        os.makedirs(context.build_space_abs)

    # Check for catkin_make droppings
    if context.corrupted_by_catkin_make():
        sys.exit(
            clr("@{rf}Error:@| Build space `{0}` exists but appears to have previously been "
                "created by the `catkin_make` or `catkin_make_isolated` tool. "
                "Please choose a different directory to use with `catkin build` "
                "or clean the build space.".format(context.build_space_abs)))

    # Declare a buildspace marker describing the build config for error checking
    buildspace_marker_data = {
        'workspace': context.workspace,
        'profile': context.profile,
        'install': context.install,
        'install_space': context.install_space_abs,
        'devel_space': context.devel_space_abs,
        'source_space': context.source_space_abs
    }

    # Check build config
    if os.path.exists(
            os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE)):
        with open(os.path.join(
                context.build_space_abs,
                BUILDSPACE_MARKER_FILE)) as buildspace_marker_file:
            existing_buildspace_marker_data = yaml.load(buildspace_marker_file)
            misconfig_lines = ''
            for (k, v) in existing_buildspace_marker_data.items():
                new_v = buildspace_marker_data.get(k, None)
                if new_v != v:
                    misconfig_lines += (
                        '\n - %s: %s (stored) is not %s (commanded)' %
                        (k, v, new_v))
            if len(misconfig_lines) > 0:
                sys.exit(
                    clr("\n@{rf}Error:@| Attempting to build a catkin workspace using build space: "
                        "\"%s\" but that build space's most recent configuration "
                        "differs from the commanded one in ways which will cause "
                        "problems. Fix the following options or use @{yf}`catkin "
                        "clean -b`@| to remove the build space: %s" %
                        (context.build_space_abs, misconfig_lines)))

    # Write the current build config for config error checking
    with open(os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE),
              'w') as buildspace_marker_file:
        buildspace_marker_file.write(
            yaml.dump(buildspace_marker_data, default_flow_style=False))

    # Summarize the context
    summary_notes = []
    if force_cmake:
        summary_notes += [
            clr("@!@{cf}NOTE:@| Forcing CMake to run for each package.")
        ]
    log(context.summary(summary_notes))

    # Find list of packages in the workspace
    packages_to_be_built, packages_to_be_built_deps, all_packages = determine_packages_to_be_built(
        packages, context)
    completed_packages = []
    if no_deps:
        # Consider deps as "completed"
        completed_packages.extend(packages_to_be_built_deps)
    else:
        # Extend packages to be built to include their deps
        packages_to_be_built.extend(packages_to_be_built_deps)
    # Also resort
    packages_to_be_built = topological_order_packages(
        dict(packages_to_be_built))
    max_package_name_length = max(
        [len(pkg.name) for pth, pkg in packages_to_be_built])
    # Assert start_with package is in the workspace
    verify_start_with_option(start_with, packages, all_packages,
                             packages_to_be_built + packages_to_be_built_deps)

    # Setup pool of executors
    executors = {}
    # The communication queue can have ExecutorEvent's or str's passed into it from the executors
    comm_queue = Queue()
    # The job queue has Jobs put into it
    job_queue = Queue()
    # Lock for install space
    install_lock = Lock() if lock_install else FakeLock()
    # Determine the number of executors
    try:
        if jobs:
            jobs = int(jobs)
            if jobs < 1:
                sys.exit(
                    "Specified number of jobs '{0}' is not positive.".format(
                        jobs))
    except ValueError:
        sys.exit("Specified number of jobs '{0}' is no integer.".format(jobs))
    try:
        jobs = cpu_count() if jobs is None else jobs
    except NotImplementedError:
        log('Failed to determine the cpu_count, falling back to 1 jobs as the default.'
            )
        jobs = 1 if jobs is None else jobs
    # If only one set of jobs, turn on interleaving to get more responsive feedback
    if jobs == 1:
        # TODO: make the system more intelligent so that it can automatically switch to streaming output
        #       when only one job is building, even if multiple jobs could be building
        quiet = False
        interleave_output = True
    # Start the executors
    for x in range(jobs):
        e = Executor(x, context, comm_queue, job_queue, install_lock)
        executors[x] = e
        e.start()

    try:  # Finally close out now running executors
        # Variables for tracking running jobs and built/building packages
        start = time.time()
        total_packages = len(packages_to_be_built)
        package_count = 0
        running_jobs = {}
        log_dir = os.path.join(context.build_space_abs, 'build_logs')
        color = True
        if not force_color and not is_tty(sys.stdout):
            color = True
        out = OutputController(log_dir,
                               quiet,
                               interleave_output,
                               color,
                               max_package_name_length,
                               prefix_output=(jobs > 1))
        if no_status:
            disable_wide_log()

        # Prime the job_queue
        ready_packages = []
        if start_with is None:
            ready_packages = get_ready_packages(packages_to_be_built,
                                                running_jobs,
                                                completed_packages)
        while start_with is not None:
            ready_packages.extend(
                get_ready_packages(packages_to_be_built, running_jobs,
                                   completed_packages))
            while ready_packages:
                pth, pkg = ready_packages.pop(0)
                if pkg.name != start_with:
                    completed_packages.append(pkg.name)
                    package_count += 1
                    wide_log("[build] Skipping package '{0}'".format(pkg.name))
                else:
                    ready_packages.insert(0, (pth, pkg))
                    start_with = None
                    break
        running_jobs = queue_ready_packages(ready_packages, running_jobs,
                                            job_queue, context, force_cmake)
        assert running_jobs

        error_state = False
        errors = []

        def set_error_state(error_state):
            if error_state:
                return
            # Set the error state to prevent new jobs
            error_state = True
            # Empty the job queue
            while not job_queue.empty():
                job_queue.get()
            # Kill the executors by sending a None to the job queue for each of them
            for x in range(jobs):
                job_queue.put(None)

        # While any executors are running, process executor events
        while executors:
            try:
                # Try to get an event from the communications queue
                try:
                    event = comm_queue.get(True, 0.1)
                except Empty:
                    # timeout occured, create null event to pass through checks
                    event = ExecutorEvent(None, None, None, None)

                if event.event_type == 'job_started':
                    package_count += 1
                    running_jobs[
                        event.package]['package_number'] = package_count
                    running_jobs[event.package]['start_time'] = time.time()
                    out.job_started(event.package)

                if event.event_type == 'command_started':
                    out.command_started(event.package, event.data['cmd'],
                                        event.data['location'])

                if event.event_type == 'command_log':
                    out.command_log(event.package, event.data['message'])

                if event.event_type == 'command_failed':
                    out.command_failed(event.package, event.data['cmd'],
                                       event.data['location'],
                                       event.data['retcode'])
                    # Add to list of errors
                    errors.append(event)
                    # Remove the command from the running jobs
                    del running_jobs[event.package]
                    # If it hasn't already been done, stop the executors
                    set_error_state(error_state)

                if event.event_type == 'command_finished':
                    out.command_finished(event.package, event.data['cmd'],
                                         event.data['location'],
                                         event.data['retcode'])

                if event.event_type == 'job_finished':
                    completed_packages.append(event.package)
                    run_time = format_time_delta(
                        time.time() -
                        running_jobs[event.package]['start_time'])
                    out.job_finished(event.package, run_time)
                    del running_jobs[event.package]
                    # If shutting down, do not add new packages
                    if error_state:
                        continue
                    # Calculate new packages
                    if not no_status:
                        wide_log('[build] Calculating new jobs...', end='\r')
                        sys.stdout.flush()
                    ready_packages = get_ready_packages(
                        packages_to_be_built, running_jobs, completed_packages)
                    running_jobs = queue_ready_packages(
                        ready_packages, running_jobs, job_queue, context,
                        force_cmake)
                    # Make sure there are jobs to be/being processed, otherwise kill the executors
                    if not running_jobs:
                        # Kill the executors by sending a None to the job queue for each of them
                        for x in range(jobs):
                            job_queue.put(None)

                # If an executor exit event, join it and remove it from the executors list
                if event.event_type == 'exit':
                    # If an executor has an exception, set the error state
                    if event.data['reason'] == 'exception':
                        set_error_state(error_state)
                        errors.append(event)
                    # Join and remove it
                    executors[event.executor_id].join()
                    del executors[event.executor_id]

                if not no_status:
                    # Update the status bar on the screen
                    executing_jobs = []
                    for name, value in running_jobs.items():
                        number, job, start_time = value[
                            'package_number'], value['job'], value[
                                'start_time']
                        if number is None or start_time is None:
                            continue
                        executing_jobs.append({
                            'number':
                            number,
                            'name':
                            name,
                            'run_time':
                            format_time_delta_short(time.time() - start_time)
                        })
                    msg = clr("[build - {run_time}] ").format(
                        run_time=format_time_delta_short(time.time() - start))
                    # If errors post those
                    if errors:
                        for error in errors:
                            msg += clr("[!{package}] ").format(
                                package=error.package)
                    # Print them in order of started number
                    for job_msg_args in sorted(
                            executing_jobs, key=lambda args: args['number']):
                        msg += clr("[{name} - {run_time}] ").format(
                            **job_msg_args)
                    msg_rhs = clr(
                        "[{0}/{1} Active | {2}/{3} Completed]").format(
                            len(executing_jobs), len(executors),
                            len(packages) if no_deps else
                            len(completed_packages), total_packages)
                    # Update title bar
                    sys.stdout.write("\x1b]2;[build] {0}/{1}\x07".format(
                        len(packages) if no_deps else len(completed_packages),
                        total_packages))
                    # Update status bar
                    wide_log(msg, rhs=msg_rhs, end='\r')
                    sys.stdout.flush()
            except KeyboardInterrupt:
                wide_log("[build] User interrupted, stopping.")
                set_error_state(error_state)
        # All executors have shutdown
        sys.stdout.write("\x1b]2;\x07")
        if not errors:
            if context.isolate_devel:
                if not context.install:
                    _create_unmerged_devel_setup(context)
                else:
                    _create_unmerged_devel_setup_for_install(context)
            wide_log("[build] Finished.")
            if not no_notify:
                notify("Build Finished",
                       "{0} packages built".format(total_packages))
            return 0
        else:
            wide_log(clr("[build] There were @!@{rf}errors@|:"))
            if not no_notify:
                notify("Build Failed",
                       "there were {0} errors".format(len(errors)))
            for error in errors:
                if error.event_type == 'exit':
                    wide_log(
                        """Executor '{exec_id}' had an unhandle exception while processing package '{package}':

    {data[exc]}
    """.format(exec_id=error.executor_id + 1, **error.__dict__))
                else:
                    wide_log(
                        clr("""
    @{rf}Failed@| to build package '@{cf}{package}@|' because the following command:

        @!@{kf}# Command run in directory: @|{location}
        {cmd.cmd_str}

    @{rf}Exited@| with return code: @!{retcode}@|""").format(
                            package=error.package, **error.data))
            sys.exit(1)
    finally:
        # Ensure executors go down
        for x in range(jobs):
            job_queue.put(None)
Пример #36
0
def build_isolated_workspace(
    context,
    packages=None,
    start_with=None,
    no_deps=False,
    jobs=None,
    force_cmake=False,
    force_color=False,
    quiet=False,
    interleave_output=False,
    no_status=False,
    lock_install=False,
):
    """Builds a catkin workspace in isolation

    This function will find all of the packages in the source space, start some
    executors, feed them packages to build based on dependencies and topological
    ordering, and then monitor the output of the executors, handling loggings of
    the builds, starting builds, failing builds, and finishing builds of
    packages, and handling the shutdown of the executors when appropriate.

    :param context: context in which to build the catkin workspace
    :type context: :py:class:`catkin_tools.verbs.catkin_build.context.Context`
    :param packages: list of packages to build, by default their dependencies will also be built
    :type packages: list
    :param start_with: package to start with, skipping all packages which proceed it in the topological order
    :type start_with: str
    :param no_deps: If True, the dependencies of packages will not be built first
    :type no_deps: bool
    :param jobs: number of parallel package build jobs
    :type jobs: int
    :param force_cmake: forces invocation of CMake if True, default is False
    :type force_cmake: bool
    :param force_color: forces colored output even if terminal does not support it
    :type force_color: bool
    :param quiet: suppresses the output of commands unless there is an error
    :type quiet: bool
    :param interleave_output: prints the output of commands as they are received
    :type interleave_output: bool
    :param no_status: disables status bar
    :type no_status: bool
    :param lock_install: causes executors to synchronize on access of install commands
    :type lock_install: bool

    :raises: SystemExit if buildspace is a file or no packages were found in the source space
        or if the provided options are invalid
    """
    # If no_deps is given, ensure packages to build are provided
    if no_deps and packages is None:
        sys.exit("With --no-deps, you must specify packages to build.")
    # Make sure there is a build folder and it is not a file
    if os.path.exists(context.build_space):
        if os.path.isfile(context.build_space):
            sys.exit("Build space '{0}' exists but is a file and not a folder.".format(context.build_space))
    # If it dosen't exist, create it
    else:
        log("Creating buildspace directory, '{0}'".format(context.build_space))
        os.makedirs(context.build_space)

    # Summarize the context
    log(context.summary())

    # Find list of packages in the workspace
    packages_to_be_built, packages_to_be_built_deps = determine_packages_to_be_built(packages, context)
    completed_packages = []
    if no_deps:
        # Consider deps as "completed"
        completed_packages.extend(packages_to_be_built_deps)
    else:
        # Extend packages to be built to include their deps
        packages_to_be_built.extend(packages_to_be_built_deps)
    # Also resort
    packages_to_be_built = topological_order_packages(dict(packages_to_be_built))
    max_package_name_length = max([len(pkg.name) for pth, pkg in packages_to_be_built])

    # Setup pool of executors
    executors = {}
    # The communication queue can have ExecutorEvent's or str's passed into it from the executors
    comm_queue = Queue()
    # The job queue has Jobs put into it
    job_queue = Queue()
    # Lock for install space
    install_lock = Lock() if lock_install else FakeLock()
    # Determine the number of executors
    try:
        if jobs:
            jobs = int(jobs)
            if jobs < 1:
                sys.exit("Specified number of jobs '{0}' is not positive.".format(jobs))
    except ValueError:
        sys.exit("Specified number of jobs '{0}' is no integer.".format(jobs))
    try:
        jobs = cpu_count() if jobs is None else jobs
    except NotImplementedError:
        log("Failed to determine the cpu_count, falling back to 1 jobs as the default.")
        jobs = 1 if jobs is None else jobs
    # If only one set of jobs, turn on interleaving to get more responsive feedback
    if jobs == 1:
        # TODO: make the system more intelligent so that it can automatically switch to streaming output
        #       when only one job is building, even if multiple jobs could be building
        quiet = False
        interleave_output = True
    # Start the executors
    for x in range(jobs):
        e = Executor(x, context, comm_queue, job_queue, install_lock)
        executors[x] = e
        e.start()

    # Variables for tracking running jobs and built/building packages
    start = time.time()
    total_packages = len(packages_to_be_built)
    package_count = 0
    running_jobs = {}
    log_dir = os.path.join(context.build_space, "build_logs")
    color = True
    if not force_color and not is_tty(sys.stdout):
        color = True
    out = OutputController(log_dir, quiet, interleave_output, color, max_package_name_length, prefix_output=(jobs > 1))
    if no_status:
        disable_wide_log()

    # Prime the job_queue
    ready_packages = []
    if start_with is None:
        ready_packages = get_ready_packages(packages_to_be_built, running_jobs, completed_packages)
    while start_with is not None:
        ready_packages.extend(get_ready_packages(packages_to_be_built, running_jobs, completed_packages))
        while ready_packages:
            pth, pkg = ready_packages.pop(0)
            if pkg.name != start_with:
                completed_packages.append(pkg.name)
                package_count += 1
                wide_log("[build] Skipping package '{0}'".format(pkg.name))
            else:
                ready_packages.insert(0, (pth, pkg))
                start_with = None
                break
    running_jobs = queue_ready_packages(ready_packages, running_jobs, job_queue, context, force_cmake)
    assert running_jobs

    error_state = False
    errors = []

    def set_error_state(error_state):
        if error_state:
            return
        # Set the error state to prevent new jobs
        error_state = True
        # Empty the job queue
        while not job_queue.empty():
            job_queue.get()
        # Kill the executors by sending a None to the job queue for each of them
        for x in range(jobs):
            job_queue.put(None)

    # While any executors are running, process executor events
    while executors:
        try:
            # Try to get an event from the communications queue
            try:
                event = comm_queue.get(True, 0.1)
            except Empty:
                # timeout occured, create null event to pass through checks
                event = ExecutorEvent(None, None, None, None)

            if event.event_type == "job_started":
                package_count += 1
                running_jobs[event.package]["package_number"] = package_count
                running_jobs[event.package]["start_time"] = time.time()
                out.job_started(event.package)

            if event.event_type == "command_started":
                out.command_started(event.package, event.data["cmd"], event.data["location"])

            if event.event_type == "command_log":
                out.command_log(event.package, event.data["message"])

            if event.event_type == "command_failed":
                out.command_failed(event.package, event.data["cmd"], event.data["location"], event.data["retcode"])
                # Add to list of errors
                errors.append(event)
                # Remove the command from the running jobs
                del running_jobs[event.package]
                # If it hasn't already been done, stop the executors
                set_error_state(error_state)

            if event.event_type == "command_finished":
                out.command_finished(event.package, event.data["cmd"], event.data["location"], event.data["retcode"])

            if event.event_type == "job_finished":
                completed_packages.append(event.package)
                run_time = format_time_delta(time.time() - running_jobs[event.package]["start_time"])
                out.job_finished(event.package, run_time)
                del running_jobs[event.package]
                # If shutting down, do not add new packages
                if error_state:
                    continue
                # Calculate new packages
                if not no_status:
                    wide_log("[build] Calculating new jobs...", end="\r")
                    sys.stdout.flush()
                ready_packages = get_ready_packages(packages_to_be_built, running_jobs, completed_packages)
                running_jobs = queue_ready_packages(ready_packages, running_jobs, job_queue, context, force_cmake)
                # Make sure there are jobs to be/being processed, otherwise kill the executors
                if not running_jobs:
                    # Kill the executors by sending a None to the job queue for each of them
                    for x in range(jobs):
                        job_queue.put(None)

            # If an executor exit event, join it and remove it from the executors list
            if event.event_type == "exit":
                # If an executor has an exception, set the error state
                if event.data["reason"] == "exception":
                    set_error_state(error_state)
                    errors.append(event)
                # Join and remove it
                executors[event.executor_id].join()
                del executors[event.executor_id]

            if not no_status:
                # Update the status bar on the screen
                executing_jobs = []
                for name, value in running_jobs.items():
                    number, job, start_time = value["package_number"], value["job"], value["start_time"]
                    if number is None or start_time is None:
                        continue
                    executing_jobs.append(
                        {"number": number, "name": name, "run_time": format_time_delta_short(time.time() - start_time)}
                    )
                msg = clr("[build - {run_time}] ").format(run_time=format_time_delta_short(time.time() - start))
                # If errors post those
                if errors:
                    for error in errors:
                        msg += clr("[!{package}] ").format(package=error.package)
                # Print them in order of started number
                for job_msg_args in sorted(executing_jobs, key=lambda args: args["number"]):
                    msg += clr("[{name} - {run_time}] ").format(**job_msg_args)
                msg_rhs = clr("[{0}/{1} Active | {2}/{3} Completed]").format(
                    len(executing_jobs),
                    len(executors),
                    len(packages) if no_deps else len(completed_packages),
                    total_packages,
                )
                # Update title bar
                sys.stdout.write(
                    "\x1b]2;[build] {0}/{1}\x07".format(
                        len(packages) if no_deps else len(completed_packages), total_packages
                    )
                )
                # Update status bar
                wide_log(msg, rhs=msg_rhs, end="\r")
                sys.stdout.flush()
        except KeyboardInterrupt:
            wide_log("[build] User interrupted, stopping.")
            set_error_state(error_state)
    # All executors have shutdown
    sys.stdout.write("\x1b]2;\x07")
    if not errors:
        if context.isolate_devel:
            if not context.install:
                _create_unmerged_devel_setup(context)
            else:
                _create_unmerged_devel_setup_for_install(context)
        wide_log("[build] Finished.")
        notify("Build Finished", "{0} packages built".format(total_packages))
        return 0
    else:
        wide_log(clr("[build] There were @!@{rf}errors@|:"))
        notify("Build Failed", "there were {0} errors".format(len(errors)))
        for error in errors:
            if error.event_type == "exit":
                wide_log(
                    """Executor '{exec_id}' had an unhandle exception while processing package '{package}':

{data[exc]}
""".format(
                        exec_id=error.executor_id + 1, **error.__dict__
                    )
                )
            else:
                wide_log(
                    clr(
                        """
@{rf}Failed@| to build package '@{cf}{package}@|' because the following command:

    @!@{kf}# Command run in directory: @|{location}
    {cmd.cmd_str}

@{rf}Exited@| with return code: @!{retcode}@|"""
                    ).format(package=error.package, **error.data)
                )
        sys.exit(1)
Пример #37
0
def build_isolated_workspace(
    context,
    packages=None,
    start_with=None,
    no_deps=False,
    jobs=None,
    force_cmake=False,
    force_color=False,
    quiet=False,
    interleave_output=False,
    no_status=False,
    lock_install=False,
    no_notify=False
):
    """Builds a catkin workspace in isolation

    This function will find all of the packages in the source space, start some
    executors, feed them packages to build based on dependencies and topological
    ordering, and then monitor the output of the executors, handling loggings of
    the builds, starting builds, failing builds, and finishing builds of
    packages, and handling the shutdown of the executors when appropriate.

    :param context: context in which to build the catkin workspace
    :type context: :py:class:`catkin_tools.verbs.catkin_build.context.Context`
    :param packages: list of packages to build, by default their dependencies will also be built
    :type packages: list
    :param start_with: package to start with, skipping all packages which proceed it in the topological order
    :type start_with: str
    :param no_deps: If True, the dependencies of packages will not be built first
    :type no_deps: bool
    :param jobs: number of parallel package build jobs
    :type jobs: int
    :param force_cmake: forces invocation of CMake if True, default is False
    :type force_cmake: bool
    :param force_color: forces colored output even if terminal does not support it
    :type force_color: bool
    :param quiet: suppresses the output of commands unless there is an error
    :type quiet: bool
    :param interleave_output: prints the output of commands as they are received
    :type interleave_output: bool
    :param no_status: disables status bar
    :type no_status: bool
    :param lock_install: causes executors to synchronize on access of install commands
    :type lock_install: bool
    :param no_notify: suppresses system notifications
    :type no_notify: bool

    :raises: SystemExit if buildspace is a file or no packages were found in the source space
        or if the provided options are invalid
    """
    # If no_deps is given, ensure packages to build are provided
    if no_deps and packages is None:
        sys.exit("With --no-deps, you must specify packages to build.")
    # Make sure there is a build folder and it is not a file
    if os.path.exists(context.build_space_abs):
        if os.path.isfile(context.build_space_abs):
            sys.exit(clr(
                "@{rf}Error:@| Build space '{0}' exists but is a file and not a folder."
                .format(context.build_space_abs)))
    # If it dosen't exist, create it
    else:
        log("Creating build space directory, '{0}'".format(context.build_space_abs))
        os.makedirs(context.build_space_abs)

    # Check for catkin_make droppings
    if context.corrupted_by_catkin_make():
        sys.exit(
            clr("@{rf}Error:@| Build space `{0}` exists but appears to have previously been "
                "created by the `catkin_make` or `catkin_make_isolated` tool. "
                "Please choose a different directory to use with `catkin build` "
                "or clean the build space.".format(context.build_space_abs)))

    # Declare a buildspace marker describing the build config for error checking
    buildspace_marker_data = {
        'workspace': context.workspace,
        'profile': context.profile,
        'install': context.install,
        'install_space': context.install_space_abs,
        'devel_space': context.devel_space_abs,
        'source_space': context.source_space_abs}

    # Check build config
    if os.path.exists(os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE)):
        with open(os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE)) as buildspace_marker_file:
            existing_buildspace_marker_data = yaml.load(buildspace_marker_file)
            misconfig_lines = ''
            for (k, v) in existing_buildspace_marker_data.items():
                new_v = buildspace_marker_data.get(k, None)
                if new_v != v:
                    misconfig_lines += (
                        '\n - %s: %s (stored) is not %s (commanded)' %
                        (k, v, new_v))
            if len(misconfig_lines) > 0:
                sys.exit(clr(
                    "\n@{rf}Error:@| Attempting to build a catkin workspace using build space: "
                    "\"%s\" but that build space's most recent configuration "
                    "differs from the commanded one in ways which will cause "
                    "problems. Fix the following options or use @{yf}`catkin "
                    "clean -b`@| to remove the build space: %s" %
                    (context.build_space_abs, misconfig_lines)))

    # Write the current build config for config error checking
    with open(os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE), 'w') as buildspace_marker_file:
        buildspace_marker_file.write(yaml.dump(buildspace_marker_data, default_flow_style=False))

    # Summarize the context
    summary_notes = []
    if force_cmake:
        summary_notes += [clr("@!@{cf}NOTE:@| Forcing CMake to run for each package.")]
    log(context.summary(summary_notes))

    # Find list of packages in the workspace
    packages_to_be_built, packages_to_be_built_deps, all_packages = determine_packages_to_be_built(packages, context)
    completed_packages = []
    if no_deps:
        # Consider deps as "completed"
        completed_packages.extend(packages_to_be_built_deps)
    else:
        # Extend packages to be built to include their deps
        packages_to_be_built.extend(packages_to_be_built_deps)
    # Also resort
    packages_to_be_built = topological_order_packages(dict(packages_to_be_built))
    max_package_name_length = max([len(pkg.name) for pth, pkg in packages_to_be_built])
    # Assert start_with package is in the workspace
    verify_start_with_option(start_with, packages, all_packages, packages_to_be_built + packages_to_be_built_deps)

    # Setup pool of executors
    executors = {}
    # The communication queue can have ExecutorEvent's or str's passed into it from the executors
    comm_queue = Queue()
    # The job queue has Jobs put into it
    job_queue = Queue()
    # Lock for install space
    install_lock = Lock() if lock_install else FakeLock()
    # Determine the number of executors
    try:
        if jobs:
            jobs = int(jobs)
            if jobs < 1:
                sys.exit("Specified number of jobs '{0}' is not positive.".format(jobs))
    except ValueError:
        sys.exit("Specified number of jobs '{0}' is no integer.".format(jobs))
    try:
        jobs = cpu_count() if jobs is None else jobs
    except NotImplementedError:
        log('Failed to determine the cpu_count, falling back to 1 jobs as the default.')
        jobs = 1 if jobs is None else jobs
    # If only one set of jobs, turn on interleaving to get more responsive feedback
    if jobs == 1:
        # TODO: make the system more intelligent so that it can automatically switch to streaming output
        #       when only one job is building, even if multiple jobs could be building
        quiet = False
        interleave_output = True
    # Start the executors
    for x in range(jobs):
        e = Executor(x, context, comm_queue, job_queue, install_lock)
        executors[x] = e
        e.start()

    # Variables for tracking running jobs and built/building packages
    start = time.time()
    total_packages = len(packages_to_be_built)
    package_count = 0
    running_jobs = {}
    log_dir = os.path.join(context.build_space_abs, 'build_logs')
    color = True
    if not force_color and not is_tty(sys.stdout):
        color = True
    out = OutputController(log_dir, quiet, interleave_output, color, max_package_name_length, prefix_output=(jobs > 1))
    if no_status:
        disable_wide_log()

    # Prime the job_queue
    ready_packages = []
    if start_with is None:
        ready_packages = get_ready_packages(packages_to_be_built, running_jobs, completed_packages)
    while start_with is not None:
        ready_packages.extend(get_ready_packages(packages_to_be_built, running_jobs, completed_packages))
        while ready_packages:
            pth, pkg = ready_packages.pop(0)
            if pkg.name != start_with:
                completed_packages.append(pkg.name)
                package_count += 1
                wide_log("[build] Skipping package '{0}'".format(pkg.name))
            else:
                ready_packages.insert(0, (pth, pkg))
                start_with = None
                break
    running_jobs = queue_ready_packages(ready_packages, running_jobs, job_queue, context, force_cmake)
    assert running_jobs

    error_state = False
    errors = []

    def set_error_state(error_state):
        if error_state:
            return
        # Set the error state to prevent new jobs
        error_state = True
        # Empty the job queue
        while not job_queue.empty():
            job_queue.get()
        # Kill the executors by sending a None to the job queue for each of them
        for x in range(jobs):
            job_queue.put(None)

    # While any executors are running, process executor events
    while executors:
        try:
            # Try to get an event from the communications queue
            try:
                event = comm_queue.get(True, 0.1)
            except Empty:
                # timeout occured, create null event to pass through checks
                event = ExecutorEvent(None, None, None, None)

            if event.event_type == 'job_started':
                package_count += 1
                running_jobs[event.package]['package_number'] = package_count
                running_jobs[event.package]['start_time'] = time.time()
                out.job_started(event.package)

            if event.event_type == 'command_started':
                out.command_started(event.package, event.data['cmd'], event.data['location'])

            if event.event_type == 'command_log':
                out.command_log(event.package, event.data['message'])

            if event.event_type == 'command_failed':
                out.command_failed(event.package, event.data['cmd'], event.data['location'], event.data['retcode'])
                # Add to list of errors
                errors.append(event)
                # Remove the command from the running jobs
                del running_jobs[event.package]
                # If it hasn't already been done, stop the executors
                set_error_state(error_state)

            if event.event_type == 'command_finished':
                out.command_finished(event.package, event.data['cmd'], event.data['location'], event.data['retcode'])

            if event.event_type == 'job_finished':
                completed_packages.append(event.package)
                run_time = format_time_delta(time.time() - running_jobs[event.package]['start_time'])
                out.job_finished(event.package, run_time)
                del running_jobs[event.package]
                # If shutting down, do not add new packages
                if error_state:
                    continue
                # Calculate new packages
                if not no_status:
                    wide_log('[build] Calculating new jobs...', end='\r')
                    sys.stdout.flush()
                ready_packages = get_ready_packages(packages_to_be_built, running_jobs, completed_packages)
                running_jobs = queue_ready_packages(ready_packages, running_jobs, job_queue, context, force_cmake)
                # Make sure there are jobs to be/being processed, otherwise kill the executors
                if not running_jobs:
                    # Kill the executors by sending a None to the job queue for each of them
                    for x in range(jobs):
                        job_queue.put(None)

            # If an executor exit event, join it and remove it from the executors list
            if event.event_type == 'exit':
                # If an executor has an exception, set the error state
                if event.data['reason'] == 'exception':
                    set_error_state(error_state)
                    errors.append(event)
                # Join and remove it
                executors[event.executor_id].join()
                del executors[event.executor_id]

            if not no_status:
                # Update the status bar on the screen
                executing_jobs = []
                for name, value in running_jobs.items():
                    number, start_time = value['package_number'], value['start_time']
                    if number is None or start_time is None:
                        continue
                    executing_jobs.append({
                        'number': number,
                        'name': name,
                        'run_time': format_time_delta_short(time.time() - start_time)
                    })
                msg = clr("[build - {run_time}] ").format(run_time=format_time_delta_short(time.time() - start))
                # If errors post those
                if errors:
                    for error in errors:
                        msg += clr("[!{package}] ").format(package=error.package)
                # Print them in order of started number
                for job_msg_args in sorted(executing_jobs, key=lambda args: args['number']):
                    msg += clr("[{name} - {run_time}] ").format(**job_msg_args)
                msg_rhs = clr("[{0}/{1} Active | {2}/{3} Completed]").format(
                    len(executing_jobs),
                    len(executors),
                    len(packages) if no_deps else len(completed_packages),
                    total_packages
                )
                # Update title bar
                sys.stdout.write("\x1b]2;[build] {0}/{1}\x07".format(
                    len(packages) if no_deps else len(completed_packages),
                    total_packages
                ))
                # Update status bar
                wide_log(msg, rhs=msg_rhs, end='\r')
                sys.stdout.flush()
        except KeyboardInterrupt:
            wide_log("[build] User interrupted, stopping.")
            set_error_state(error_state)
    # All executors have shutdown
    sys.stdout.write("\x1b]2;\x07")
    if not errors:
        if context.isolate_devel:
            if not context.install:
                _create_unmerged_devel_setup(context)
            else:
                _create_unmerged_devel_setup_for_install(context)
        wide_log("[build] Finished.")
        if not no_notify:
            notify("Build Finished", "{0} packages built".format(total_packages))
        return 0
    else:
        wide_log(clr("[build] There were @!@{rf}errors@|:"))
        if not no_notify:
            notify("Build Failed", "there were {0} errors".format(len(errors)))
        for error in errors:
            if error.event_type == 'exit':
                wide_log("""Executor '{exec_id}' had an unhandle exception while processing package '{package}':

{data[exc]}
""".format(exec_id=error.executor_id + 1, **error.__dict__))
            else:
                wide_log(clr("""
@{rf}Failed@| to build package '@{cf}{package}@|' because the following command:

    @!@{kf}# Command run in directory: @|{location}
    {cmd.cmd_str}

@{rf}Exited@| with return code: @!{retcode}@|""").format(package=error.package, **error.data))
        sys.exit(1)