Пример #1
0
def get_ready_packages(packages, running_jobs, completed):
    """Returns packages which have no pending depends and are ready to be built

    Iterates through the packages, seeing if any of the packages which
    are not currently in running_jobs and are not in completed jobs, have all of
    their build and buildtool depends met, and are there for ready to be queued
    up and built.

    :param packages: topologically ordered packages in the workspace
    :type packages: dict
    :param running_jobs: currently running jobs which are building packages
    :type running_jobs: dict
    :param completed: list of packages in the workspace which have been built
    :type completed: list
    :returns: list of package_path, package tuples which should be built
    :rtype: list
    """
    ready_packages = []
    workspace_packages = [(path, pkg) for path, pkg in packages]
    for path, package in packages:
        if package.name in (list(running_jobs.keys()) + completed):
            continue
        # Collect build and buildtool depends, plus recursive build, buildtool, and run depends,
        # Excluding depends which are not in the workspace or which are completed
        uncompleted_depends = []
        depends = get_cached_recursive_build_depends_in_workspace(package, workspace_packages)
        for dep_pth, dep in depends:
            if dep.name not in completed:
                uncompleted_depends.append(dep)
        # If there are no uncompleted dependencies, add this package to the queue
        if not uncompleted_depends:
            ready_packages.append((path, package))
    # Return the new ready_packages
    return ready_packages
Пример #2
0
def get_env_loaders(package, context):
    """Get a list of env loaders required to build this package."""

    sources = []
    # If installing to isolated folders or not installing, but devel spaces are not merged
    if (context.install
            and context.isolate_install) or (not context.install
                                             and context.isolate_devel):
        # Source each package's install or devel space
        space = context.install_space_abs if context.install else context.devel_space_abs
        # Get the recursive dependcies
        depends = get_cached_recursive_build_depends_in_workspace(
            package, context.packages)
        # For each dep add a line to source its setup file
        for dep_pth, dep in depends:
            source_path = os.path.join(space, dep.name, 'env.sh')
            sources.append(source_path)
    else:
        # Get the actual destination of this package
        if context.link_devel and not context.install:
            source_path = os.path.join(context.package_final_path(package),
                                       'env.sh')
        else:
            source_path = os.path.join(context.package_dest_path(package),
                                       'env.sh')
        sources = [source_path]

    return sources
Пример #3
0
def get_ready_packages(packages, running_jobs, completed):
    """Returns packages which have no pending depends and are ready to be built

    Iterates through the packages, seeing if any of the packages which
    are not currently in running_jobs and are not in completed jobs, have all of
    their build and buildtool depends met, and are there for ready to be queued
    up and built.

    :param packages: topologically ordered packages in the workspace
    :type packages: dict
    :param running_jobs: currently running jobs which are building packages
    :type running_jobs: dict
    :param completed: list of packages in the workspace which have been built
    :type completed: list
    :returns: list of package_path, package tuples which should be built
    :rtype: list
    """
    ready_packages = []
    workspace_packages = [(path, pkg) for path, pkg in packages]
    for path, package in packages:
        if package.name in (list(running_jobs.keys()) + completed):
            continue
        # Collect build and buildtool depends, plus recursive build, buildtool, and run depends,
        # Excluding depends which are not in the workspace or which are completed
        uncompleted_depends = []
        depends = get_cached_recursive_build_depends_in_workspace(
            package, workspace_packages)
        for dep_pth, dep in depends:
            if dep.name not in completed:
                uncompleted_depends.append(dep)
        # If there are no uncompleted dependencies, add this package to the queue
        if not uncompleted_depends:
            ready_packages.append((path, package))
    # Return the new ready_packages
    return ready_packages
Пример #4
0
def create_env_file(package, context):
    sources = []
    source_snippet = '. "{source_path}"'
    # If installing to isolated folders or not installing, but devel spaces are not merged
    if (context.install
            and context.isolate_install) or (not context.install
                                             and context.isolate_devel):
        # Source each package's install or devel space
        space = context.install_space_abs if context.install else context.devel_space_abs
        # Get the recursive dependcies
        depends = get_cached_recursive_build_depends_in_workspace(
            package, context.packages)
        # For each dep add a line to source its setup file
        for dep_pth, dep in depends:
            source_path = os.path.join(space, dep.name, 'setup.sh')
            sources.append(source_snippet.format(source_path=source_path))
    else:
        # Just source common install or devel space
        source_path = os.path.join(
            context.install_space_abs
            if context.install else context.devel_space_abs, 'setup.sh')
        sources = [source_snippet.format(
            source_path=source_path)] if os.path.exists(source_path) else []
    # Build the env_file
    env_file_path = os.path.abspath(
        os.path.join(context.build_space_abs, package.name, 'build_env.sh'))
    generate_env_file(sources, env_file_path)
    return env_file_path
Пример #5
0
def determine_packages_to_be_built(packages, context):
    """Returns list of packages which should be built, and those package's deps.

    :param packages: list of packages to be built, if None all packages are built
    :type packages: list
    :param context: Workspace context
    :type context: :py:class:`catkin_tools.verbs.catkin_build.context.Context`
    :returns: tuple of packages to be built and those package's deps
    :rtype: tuple
    """
    start = time.time()
    workspace_packages = find_packages(context.source_space_abs,
                                       exclude_subspaces=True)
    # If there are no packages raise
    if not workspace_packages:
        sys.exit("No packages were found in the source space '{0}'".format(
            context.source_space_abs))
    log("Found '{0}' packages in {1}.".format(
        len(workspace_packages), format_time_delta(time.time() - start)))

    # Order the packages by topology
    ordered_packages = topological_order_packages(workspace_packages)
    # Set the packages in the workspace for the context
    context.packages = ordered_packages
    # Determin the packages which should be built
    packages_to_be_built = []
    packages_to_be_built_deps = []
    if packages:
        # First assert all of the packages given are in the workspace
        workspace_package_names = dict([(pkg.name, (path, pkg))
                                        for path, pkg in ordered_packages])
        for package in packages:
            if package not in workspace_package_names:
                sys.exit("Given package '{0}' is not in the workspace".format(
                    package))
            # If metapackage, include run depends which are in the workspace
            package_obj = workspace_package_names[package][1]
            if 'metapackage' in [e.tagname for e in package_obj.exports]:
                for rdep in package_obj.run_depends:
                    if rdep.name in workspace_package_names:
                        packages.append(rdep.name)
        # Limit the packages to be built to just the provided packages
        for pkg_path, package in ordered_packages:
            if package.name in packages:
                packages_to_be_built.append((pkg_path, package))
                # Get the recursive dependencies for each of these packages
                pkg_deps = get_cached_recursive_build_depends_in_workspace(
                    package, ordered_packages)
                packages_to_be_built_deps.extend(pkg_deps)
    else:
        packages_to_be_built = ordered_packages
    return packages_to_be_built, packages_to_be_built_deps, ordered_packages
Пример #6
0
def determine_packages_to_be_built(packages, context):
    """Returns list of packages which should be built, and those package's deps.

    :param packages: list of packages to be built, if None all packages are built
    :type packages: list
    :param context: Workspace context
    :type context: :py:class:`catkin_tools.verbs.catkin_build.context.Context`
    :returns: tuple of packages to be built and those package's deps
    :rtype: tuple
    """
    start = time.time()
    workspace_packages = find_packages(context.source_space_abs, exclude_subspaces=True)
    # If there are no packages raise
    if not workspace_packages:
        sys.exit("No packages were found in the source space '{0}'".format(context.source_space_abs))
    log("Found '{0}' packages in {1}."
        .format(len(workspace_packages), format_time_delta(time.time() - start)))

    # Order the packages by topology
    ordered_packages = topological_order_packages(workspace_packages)
    # Set the packages in the workspace for the context
    context.packages = ordered_packages
    # Determin the packages which should be built
    packages_to_be_built = []
    packages_to_be_built_deps = []
    if packages:
        # First assert all of the packages given are in the workspace
        workspace_package_names = dict([(pkg.name, (path, pkg)) for path, pkg in ordered_packages])
        for package in packages:
            if package not in workspace_package_names:
                sys.exit("Given package '{0}' is not in the workspace".format(package))
            # If metapackage, include run depends which are in the workspace
            package_obj = workspace_package_names[package][1]
            if 'metapackage' in [e.tagname for e in package_obj.exports]:
                for rdep in package_obj.run_depends:
                    if rdep.name in workspace_package_names:
                        packages.append(rdep.name)
        # Limit the packages to be built to just the provided packages
        for pkg_path, package in ordered_packages:
            if package.name in packages:
                packages_to_be_built.append((pkg_path, package))
                # Get the recursive dependencies for each of these packages
                pkg_deps = get_cached_recursive_build_depends_in_workspace(package, ordered_packages)
                packages_to_be_built_deps.extend(pkg_deps)
    else:
        packages_to_be_built = ordered_packages
    return packages_to_be_built, packages_to_be_built_deps, ordered_packages
Пример #7
0
def get_env_loaders(package, context):
    """Get a list of env loaders required to build this package."""

    sources = []
    # If installing to isolated folders or not installing, but devel spaces are not merged
    if (context.install and context.isolate_install) or (not context.install and context.isolate_devel):
        # Source each package's install or devel space
        space = context.install_space_abs if context.install else context.devel_space_abs
        # Get the recursive dependcies
        depends = get_cached_recursive_build_depends_in_workspace(package, context.packages)
        # For each dep add a line to source its setup file
        for dep_pth, dep in depends:
            source_path = os.path.join(space, dep.name, 'env.sh')
            sources.append(source_path)
    else:
        # Get the actual destination of this package
        source_path = os.path.join(context.package_dest_path(package), 'env.sh')
        sources = [source_path]

    return sources
Пример #8
0
def create_env_file(package, context):
    sources = []
    source_snippet = '. "{source_path}"'
    # If installing to isolated folders or not installing, but devel spaces are not merged
    if (context.install and context.isolate_install) or (not context.install and context.isolate_devel):
        # Source each package's install or devel space
        space = context.install_space_abs if context.install else context.devel_space_abs
        # Get the recursive dependcies
        depends = get_cached_recursive_build_depends_in_workspace(package, context.packages)
        # For each dep add a line to source its setup file
        for dep_pth, dep in depends:
            source_path = os.path.join(space, dep.name, 'setup.sh')
            sources.append(source_snippet.format(source_path=source_path))
    else:
        # Just source common install or devel space
        source_path = os.path.join(
            context.install_space_abs if context.install else context.devel_space_abs,
            'setup.sh')
        sources = [source_snippet.format(source_path=source_path)] if os.path.exists(source_path) else []
    # Build the env_file
    env_file_path = os.path.abspath(os.path.join(context.build_space_abs, package.name, 'build_env.sh'))
    generate_env_file(sources, env_file_path)
    return env_file_path
Пример #9
0
def build_isolated_workspace(
    context,
    packages=None,
    start_with=None,
    no_deps=False,
    unbuilt=False,
    n_jobs=None,
    force_cmake=False,
    pre_clean=False,
    force_color=False,
    quiet=False,
    interleave_output=False,
    no_status=False,
    limit_status_rate=10.0,
    lock_install=False,
    no_notify=False,
    continue_on_failure=False,
    summarize_build=None,
):
    """Builds a catkin workspace in isolation

    This function will find all of the packages in the source space, start some
    executors, feed them packages to build based on dependencies and topological
    ordering, and then monitor the output of the executors, handling loggings of
    the builds, starting builds, failing builds, and finishing builds of
    packages, and handling the shutdown of the executors when appropriate.

    :param context: context in which to build the catkin workspace
    :type context: :py:class:`catkin_tools.verbs.catkin_build.context.Context`
    :param packages: list of packages to build, by default their dependencies will also be built
    :type packages: list
    :param start_with: package to start with, skipping all packages which proceed it in the topological order
    :type start_with: str
    :param no_deps: If True, the dependencies of packages will not be built first
    :type no_deps: bool
    :param n_jobs: number of parallel package build n_jobs
    :type n_jobs: int
    :param force_cmake: forces invocation of CMake if True, default is False
    :type force_cmake: bool
    :param force_color: forces colored output even if terminal does not support it
    :type force_color: bool
    :param quiet: suppresses the output of commands unless there is an error
    :type quiet: bool
    :param interleave_output: prints the output of commands as they are received
    :type interleave_output: bool
    :param no_status: disables status bar
    :type no_status: bool
    :param limit_status_rate: rate to which status updates are limited; the default 0, places no limit.
    :type limit_status_rate: float
    :param lock_install: causes executors to synchronize on access of install commands
    :type lock_install: bool
    :param no_notify: suppresses system notifications
    :type no_notify: bool
    :param continue_on_failure: do not stop building other jobs on error
    :type continue_on_failure: bool
    :param summarize_build: if True summarizes the build at the end, if None and continue_on_failure is True and the
        the build fails, then the build will be summarized, but if False it never will be summarized.
    :type summarize_build: bool

    :raises: SystemExit if buildspace is a file or no packages were found in the source space
        or if the provided options are invalid
    """
    pre_start_time = time.time()

    # Assert that the limit_status_rate is valid
    if limit_status_rate < 0:
        sys.exit("[build] @!@{rf}Error:@| The value of --status-rate must be greater than or equal to zero.")

    # Declare a buildspace marker describing the build config for error checking
    buildspace_marker_data = {
        'workspace': context.workspace,
        'profile': context.profile,
        'install': context.install,
        'install_space': context.install_space_abs,
        'devel_space': context.devel_space_abs,
        'source_space': context.source_space_abs}

    # Check build config
    if os.path.exists(os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE)):
        with open(os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE)) as buildspace_marker_file:
            existing_buildspace_marker_data = yaml.load(buildspace_marker_file)
            misconfig_lines = ''
            for (k, v) in existing_buildspace_marker_data.items():
                new_v = buildspace_marker_data.get(k, None)
                if new_v != v:
                    misconfig_lines += (
                        '\n - %s: %s (stored) is not %s (commanded)' %
                        (k, v, new_v))
            if len(misconfig_lines) > 0:
                sys.exit(clr(
                    "\n@{rf}Error:@| Attempting to build a catkin workspace using build space: "
                    "\"%s\" but that build space's most recent configuration "
                    "differs from the commanded one in ways which will cause "
                    "problems. Fix the following options or use @{yf}`catkin "
                    "clean -b`@| to remove the build space: %s" %
                    (context.build_space_abs, misconfig_lines)))

    # Summarize the context
    summary_notes = []
    if force_cmake:
        summary_notes += [clr("@!@{cf}NOTE:@| Forcing CMake to run for each package.")]
    log(context.summary(summary_notes))

    # Make sure there is a build folder and it is not a file
    if os.path.exists(context.build_space_abs):
        if os.path.isfile(context.build_space_abs):
            sys.exit(clr(
                "[build] @{rf}Error:@| Build space '{0}' exists but is a file and not a folder."
                .format(context.build_space_abs)))
    # If it dosen't exist, create it
    else:
        log("[build] Creating build space: '{0}'".format(context.build_space_abs))
        os.makedirs(context.build_space_abs)

    # Write the current build config for config error checking
    with open(os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE), 'w') as buildspace_marker_file:
        buildspace_marker_file.write(yaml.dump(buildspace_marker_data, default_flow_style=False))

    # Get all the packages in the context source space
    # Suppress warnings since this is a utility function
    workspace_packages = find_packages(context.source_space_abs, exclude_subspaces=True, warnings=[])

    # Get packages which have not been built yet
    unbuilt_pkgs = get_unbuilt_packages(context, workspace_packages)

    # Handle unbuilt packages
    if unbuilt:
        # Check if there are any unbuilt
        if len(unbuilt_pkgs) > 0:
            # Add the unbuilt packages
            packages.extend(list(unbuilt_pkgs))
        else:
            log("[build] No unbuilt packages to be built.")
            return

    # If no_deps is given, ensure packages to build are provided
    if no_deps and packages is None:
        log(clr("[build] @!@{rf}Error:@| With no_deps, you must specify packages to build."))
        return

    # Find list of packages in the workspace
    packages_to_be_built, packages_to_be_built_deps, all_packages = determine_packages_to_be_built(
        packages, context, workspace_packages)

    if not no_deps:
        # Extend packages to be built to include their deps
        packages_to_be_built.extend(packages_to_be_built_deps)

    # Also re-sort
    try:
        packages_to_be_built = topological_order_packages(dict(packages_to_be_built))
    except AttributeError:
        log(clr("[build] @!@{rf}Error:@| The workspace packages have a circular "
                "dependency, and cannot be built. Please run `catkin list "
                "--deps` to determine the problematic package(s)."))
        return

    # Check the number of packages to be built
    if len(packages_to_be_built) == 0:
        log(clr('[build] No packages to be built.'))
        return

    # Assert start_with package is in the workspace
    verify_start_with_option(
        start_with,
        packages,
        all_packages,
        packages_to_be_built + packages_to_be_built_deps)

    # Populate .catkin file if we're not installing
    # NOTE: This is done to avoid the Catkin CMake code from doing it,
    # which isn't parallel-safe. Catkin CMake only modifies this file if
    # it's package source path isn't found.
    if not context.install:
        dot_catkin_file_path = os.path.join(context.devel_space_abs, '.catkin')
        # If the file exists, get the current paths
        if os.path.exists(dot_catkin_file_path):
            dot_catkin_paths = open(dot_catkin_file_path, 'r').read().split(';')
        else:
            dot_catkin_paths = []

        # Update the list with the new packages (in topological order)
        packages_to_be_built_paths = [
            os.path.join(context.source_space_abs, path)
            for path, pkg in packages_to_be_built
        ]

        new_dot_catkin_paths = [
            os.path.join(context.source_space_abs, path)
            for path in [os.path.join(context.source_space_abs, path) for path, pkg in all_packages]
            if path in dot_catkin_paths or path in packages_to_be_built_paths
        ]

        # Write the new file if it's different, otherwise, leave it alone
        if dot_catkin_paths == new_dot_catkin_paths:
            wide_log("[build] Package table is up to date.")
        else:
            wide_log("[build] Updating package table.")
            open(dot_catkin_file_path, 'w').write(';'.join(new_dot_catkin_paths))

    # Remove packages before start_with
    if start_with is not None:
        for path, pkg in list(packages_to_be_built):
            if pkg.name != start_with:
                wide_log(clr("@!@{pf}Skipping@| @{gf}---@| @{cf}{}@|").format(pkg.name))
                packages_to_be_built.pop(0)
            else:
                break

    # Get the names of all packages to be built
    packages_to_be_built_names = [p.name for _, p in packages_to_be_built]
    packages_to_be_built_deps_names = [p.name for _, p in packages_to_be_built_deps]

    # Generate prebuild jobs, if necessary
    prebuild_jobs = {}
    setup_util_exists = os.path.exists(os.path.join(context.devel_space_abs, '_setup_util.py'))
    if context.link_devel and (not setup_util_exists or (force_cmake and len(packages) == 0)):
        wide_log('[build] Preparing linked develspace...')

        pkg_dict = dict([(pkg.name, (pth, pkg)) for pth, pkg in all_packages])

        if 'catkin' in packages_to_be_built_names + packages_to_be_built_deps_names:
            # Use catkin as the prebuild package
            prebuild_pkg_path, prebuild_pkg = pkg_dict['catkin']
        else:
            # Generate explicit prebuild package
            prebuild_pkg_path = generate_prebuild_package(context.build_space_abs, context.devel_space_abs, force_cmake)
            prebuild_pkg = parse_package(prebuild_pkg_path)

        # Create the prebuild job
        prebuild_job = create_catkin_build_job(
            context,
            prebuild_pkg,
            prebuild_pkg_path,
            dependencies=[],
            force_cmake=force_cmake,
            pre_clean=pre_clean,
            prebuild=True)

        # Add the prebuld job
        prebuild_jobs[prebuild_job.jid] = prebuild_job

    # Remove prebuild jobs from normal job list
    for prebuild_jid, prebuild_job in prebuild_jobs.items():
        if prebuild_jid in packages_to_be_built_names:
            packages_to_be_built_names.remove(prebuild_jid)

    # Initial jobs list is just the prebuild jobs
    jobs = [] + list(prebuild_jobs.values())

    # Get all build type plugins
    build_job_creators = {
        ep.name: ep.load()['create_build_job']
        for ep in pkg_resources.iter_entry_points(group='catkin_tools.jobs')
    }

    # It's a problem if there aren't any build types available
    if len(build_job_creators) == 0:
        sys.exit('Error: No build types availalbe. Please check your catkin_tools installation.')

    # Construct jobs
    for pkg_path, pkg in all_packages:
        if pkg.name not in packages_to_be_built_names:
            continue

        # Ignore metapackages
        if 'metapackage' in [e.tagname for e in pkg.exports]:
            continue

        # Get actual execution deps
        deps = [
            p.name for _, p
            in get_cached_recursive_build_depends_in_workspace(pkg, packages_to_be_built)
            if p.name not in prebuild_jobs
        ]

        # All jobs depend on the prebuild job if it's defined
        for j in prebuild_jobs.values():
            deps.append(j.jid)

        # Determine the job parameters
        build_job_kwargs = dict(
            context=context,
            package=pkg,
            package_path=pkg_path,
            dependencies=deps,
            force_cmake=force_cmake,
            pre_clean=pre_clean)

        # Create the job based on the build type
        build_type = get_build_type(pkg)

        if build_type in build_job_creators:
            jobs.append(build_job_creators[build_type](**build_job_kwargs))
        else:
            wide_log(clr(
                "[build] @!@{yf}Warning:@| Skipping package `{}` because it "
                "has an unsupported package build type: `{}`"
            ).format(pkg.name, build_type))

            wide_log(clr("[build] Note: Available build types:"))
            for bt_name in build_job_creators.keys():
                wide_log(clr("[build]  - `{}`".format(bt_name)))

    # Queue for communicating status
    event_queue = Queue()

    try:
        # Spin up status output thread
        status_thread = ConsoleStatusController(
            'build',
            ['package', 'packages'],
            jobs,
            n_jobs,
            [pkg.name for _, pkg in context.packages],
            [p for p in context.whitelist],
            [p for p in context.blacklist],
            event_queue,
            show_notifications=not no_notify,
            show_active_status=not no_status,
            show_buffered_stdout=not quiet and not interleave_output,
            show_buffered_stderr=not interleave_output,
            show_live_stdout=interleave_output,
            show_live_stderr=interleave_output,
            show_stage_events=not quiet,
            show_full_summary=(summarize_build is True),
            pre_start_time=pre_start_time,
            active_status_rate=limit_status_rate)
        status_thread.start()

        # Initialize locks
        locks = {
            'installspace': asyncio.Lock() if lock_install else FakeLock()
        }

        # Block while running N jobs asynchronously
        try:
            all_succeeded = run_until_complete(execute_jobs(
                'build',
                jobs,
                locks,
                event_queue,
                os.path.join(context.build_space_abs, '_logs'),
                max_toplevel_jobs=n_jobs,
                continue_on_failure=continue_on_failure,
                continue_without_deps=False))
        except Exception:
            status_thread.keep_running = False
            all_succeeded = False
            status_thread.join(1.0)
            wide_log(str(traceback.format_exc()))

        status_thread.join(1.0)

        # Warn user about new packages
        now_unbuilt_pkgs = get_unbuilt_packages(context, workspace_packages)
        new_pkgs = [p for p in unbuilt_pkgs if p not in now_unbuilt_pkgs]
        if len(new_pkgs) > 0:
            log(clr("[build] @/@!Note:@| @/Workspace packages have changed, "
                    "please re-source setup files to use them.@|"))

        if all_succeeded:
            # Create isolated devel setup if necessary
            if context.isolate_devel:
                if not context.install:
                    _create_unmerged_devel_setup(context, now_unbuilt_pkgs)
                else:
                    _create_unmerged_devel_setup_for_install(context)
            return 0
        else:
            return 1

    except KeyboardInterrupt:
        wide_log("[build] Interrupted by user!")
        event_queue.put(None)
Пример #10
0
def build_isolated_workspace(context,
                             packages=None,
                             start_with=None,
                             no_deps=False,
                             unbuilt=False,
                             n_jobs=None,
                             force_cmake=False,
                             pre_clean=False,
                             force_color=False,
                             quiet=False,
                             interleave_output=False,
                             no_status=False,
                             limit_status_rate=10.0,
                             lock_install=False,
                             no_notify=False,
                             continue_on_failure=False,
                             summarize_build=None,
                             relaxed_constraints=False,
                             influx_url=None,
                             influx_db=None):
    """Builds a catkin workspace in isolation

    This function will find all of the packages in the source space, start some
    executors, feed them packages to build based on dependencies and topological
    ordering, and then monitor the output of the executors, handling loggings of
    the builds, starting builds, failing builds, and finishing builds of
    packages, and handling the shutdown of the executors when appropriate.

    :param context: context in which to build the catkin workspace
    :type context: :py:class:`catkin_tools.verbs.catkin_build.context.Context`
    :param packages: list of packages to build, by default their dependencies will also be built
    :type packages: list
    :param start_with: package to start with, skipping all packages which proceed it in the topological order
    :type start_with: str
    :param no_deps: If True, the dependencies of packages will not be built first
    :type no_deps: bool
    :param n_jobs: number of parallel package build n_jobs
    :type n_jobs: int
    :param force_cmake: forces invocation of CMake if True, default is False
    :type force_cmake: bool
    :param force_color: forces colored output even if terminal does not support it
    :type force_color: bool
    :param quiet: suppresses the output of commands unless there is an error
    :type quiet: bool
    :param interleave_output: prints the output of commands as they are received
    :type interleave_output: bool
    :param no_status: disables status bar
    :type no_status: bool
    :param limit_status_rate: rate to which status updates are limited; the default 0, places no limit.
    :type limit_status_rate: float
    :param lock_install: causes executors to synchronize on access of install commands
    :type lock_install: bool
    :param no_notify: suppresses system notifications
    :type no_notify: bool
    :param continue_on_failure: do not stop building other jobs on error
    :type continue_on_failure: bool
    :param summarize_build: if True summarizes the build at the end, if None and continue_on_failure is True and the
        the build fails, then the build will be summarized, but if False it never will be summarized.
    :type summarize_build: bool
    :param relaxed_constraints If true, do not use exec_deps for topological ordering
    :type relaxed_constraints bool
    :param influx_url Url to access an InfluxDB instance
    :type influx_url string Url of the form user:password@host:port
    :param influx_db Database name in InfluxDB
    :type influx_db string

    :raises: SystemExit if buildspace is a file or no packages were found in the source space
        or if the provided options are invalid
    """
    pre_start_time = time.time()

    # Assert that the limit_status_rate is valid
    if limit_status_rate < 0:
        sys.exit(
            "[build] @!@{rf}Error:@| The value of --status-rate must be greater than or equal to zero."
        )

    # Declare a buildspace marker describing the build config for error checking
    buildspace_marker_data = {
        'workspace': context.workspace,
        'profile': context.profile,
        'install': context.install,
        'install_space': context.install_space_abs,
        'devel_space': context.devel_space_abs,
        'source_space': context.source_space_abs
    }

    # Check build config
    if os.path.exists(
            os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE)):
        with open(os.path.join(
                context.build_space_abs,
                BUILDSPACE_MARKER_FILE)) as buildspace_marker_file:
            existing_buildspace_marker_data = yaml.safe_load(
                buildspace_marker_file)
            misconfig_lines = ''
            for (k, v) in existing_buildspace_marker_data.items():
                new_v = buildspace_marker_data.get(k, None)
                if new_v != v:
                    misconfig_lines += (
                        '\n - %s: %s (stored) is not %s (commanded)' %
                        (k, v, new_v))
            if len(misconfig_lines) > 0:
                sys.exit(
                    clr("\n@{rf}Error:@| Attempting to build a catkin workspace using build space: "
                        "\"%s\" but that build space's most recent configuration "
                        "differs from the commanded one in ways which will cause "
                        "problems. Fix the following options or use @{yf}`catkin "
                        "clean -b`@| to remove the build space: %s" %
                        (context.build_space_abs, misconfig_lines)))

    # Summarize the context
    summary_notes = []
    if force_cmake:
        summary_notes += [
            clr("@!@{cf}NOTE:@| Forcing CMake to run for each package.")
        ]
    log(context.summary(summary_notes))

    # Make sure there is a build folder and it is not a file
    if os.path.exists(context.build_space_abs):
        if os.path.isfile(context.build_space_abs):
            sys.exit(
                clr("[build] @{rf}Error:@| Build space '{0}' exists but is a file and not a folder."
                    .format(context.build_space_abs)))
    # If it dosen't exist, create it
    else:
        log("[build] Creating build space: '{0}'".format(
            context.build_space_abs))
        os.makedirs(context.build_space_abs)

    # Write the current build config for config error checking
    with open(os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE),
              'w') as buildspace_marker_file:
        buildspace_marker_file.write(
            yaml.dump(buildspace_marker_data, default_flow_style=False))

    # Get all the packages in the context source space
    # Suppress warnings since this is a utility function
    workspace_packages = find_packages(context.source_space_abs,
                                       exclude_subspaces=True,
                                       warnings=[])

    # Get packages which have not been built yet
    built_packages, unbuilt_pkgs = get_built_unbuilt_packages(
        context, workspace_packages)

    # Handle unbuilt packages
    if unbuilt:
        # Check if there are any unbuilt
        if len(unbuilt_pkgs) > 0:
            # Add the unbuilt packages
            packages.extend(list(unbuilt_pkgs))
        else:
            log("[build] No unbuilt packages to be built.")
            return

    # If no_deps is given, ensure packages to build are provided
    if no_deps and packages is None:
        log(
            clr("[build] @!@{rf}Error:@| With no_deps, you must specify packages to build."
                ))
        return

    # Find list of packages in the workspace
    packages_to_be_built, packages_to_be_built_deps, all_packages = determine_packages_to_be_built(
        packages, context, workspace_packages)

    if not no_deps:
        # Extend packages to be built to include their deps
        packages_to_be_built.extend(packages_to_be_built_deps)

    # Also re-sort
    try:
        packages_to_be_built = topological_order_packages(
            dict(packages_to_be_built))
    except AttributeError:
        log(
            clr("[build] @!@{rf}Error:@| The workspace packages have a circular "
                "dependency, and cannot be built. Please run `catkin list "
                "--deps` to determine the problematic package(s)."))
        return

    # Check the number of packages to be built
    if len(packages_to_be_built) == 0:
        log(clr('[build] No packages to be built.'))

    # Assert start_with package is in the workspace
    verify_start_with_option(start_with, packages, all_packages,
                             packages_to_be_built)

    # Populate .catkin file if we're not installing
    # NOTE: This is done to avoid the Catkin CMake code from doing it,
    # which isn't parallel-safe. Catkin CMake only modifies this file if
    # it's package source path isn't found.
    if not context.install:
        dot_catkin_file_path = os.path.join(context.devel_space_abs, '.catkin')
        # If the file exists, get the current paths
        if os.path.exists(dot_catkin_file_path):
            dot_catkin_paths = open(dot_catkin_file_path,
                                    'r').read().split(';')
        else:
            dot_catkin_paths = []

        # Update the list with the new packages (in topological order)
        packages_to_be_built_paths = [
            os.path.join(context.source_space_abs, path)
            for path, pkg in packages_to_be_built
        ]

        new_dot_catkin_paths = [
            os.path.join(context.source_space_abs, path) for path in [
                os.path.join(context.source_space_abs, path)
                for path, pkg in all_packages
            ] if path in dot_catkin_paths or path in packages_to_be_built_paths
        ]

        # Write the new file if it's different, otherwise, leave it alone
        if dot_catkin_paths == new_dot_catkin_paths:
            wide_log("[build] Package table is up to date.")
        else:
            wide_log("[build] Updating package table.")
            open(dot_catkin_file_path,
                 'w').write(';'.join(new_dot_catkin_paths))

    # Remove packages before start_with
    if start_with is not None:
        for path, pkg in list(packages_to_be_built):
            if pkg.name != start_with:
                wide_log(
                    clr("@!@{pf}Skipping@|  @{gf}---@| @{cf}{}@|").format(
                        pkg.name))
                packages_to_be_built.pop(0)
            else:
                break

    # Get the names of all packages to be built
    packages_to_be_built_names = [p.name for _, p in packages_to_be_built]
    packages_to_be_built_deps_names = [
        p.name for _, p in packages_to_be_built_deps
    ]

    # Generate prebuild and prebuild clean jobs, if necessary
    prebuild_jobs = {}
    setup_util_present = os.path.exists(
        os.path.join(context.devel_space_abs, '_setup_util.py'))
    catkin_present = 'catkin' in (packages_to_be_built_names +
                                  packages_to_be_built_deps_names)
    catkin_built = 'catkin' in built_packages
    prebuild_built = 'catkin_tools_prebuild' in built_packages

    # Handle the prebuild jobs if the develspace is linked
    prebuild_pkg_deps = []
    if context.link_devel:
        prebuild_pkg = None

        # Construct a dictionary to lookup catkin package by name
        pkg_dict = dict([(pkg.name, (pth, pkg)) for pth, pkg in all_packages])

        if setup_util_present:
            # Setup util is already there, determine if it needs to be
            # regenerated
            if catkin_built:
                if catkin_present:
                    prebuild_pkg_path, prebuild_pkg = pkg_dict['catkin']
            elif prebuild_built:
                if catkin_present:
                    # TODO: Clean prebuild package
                    ct_prebuild_pkg_path = get_prebuild_package(
                        context.build_space_abs, context.devel_space_abs,
                        force_cmake)
                    ct_prebuild_pkg = parse_package(ct_prebuild_pkg_path)

                    prebuild_jobs[
                        'caktin_tools_prebuild'] = create_catkin_clean_job(
                            context,
                            ct_prebuild_pkg,
                            ct_prebuild_pkg_path,
                            dependencies=[],
                            dry_run=False,
                            clean_build=True,
                            clean_devel=True,
                            clean_install=True)

                    # TODO: Build catkin package
                    prebuild_pkg_path, prebuild_pkg = pkg_dict['catkin']
                    prebuild_pkg_deps.append('catkin_tools_prebuild')
            else:
                # How did these get here??
                log("Warning: devel space setup files have an unknown origin.")
        else:
            # Setup util needs to be generated
            if catkin_built or prebuild_built:
                log("Warning: generated devel space setup files have been deleted."
                    )

            if catkin_present:
                # Build catkin package
                prebuild_pkg_path, prebuild_pkg = pkg_dict['catkin']
            else:
                # Generate and buildexplicit prebuild package
                prebuild_pkg_path = get_prebuild_package(
                    context.build_space_abs, context.devel_space_abs,
                    force_cmake)
                prebuild_pkg = parse_package(prebuild_pkg_path)

        if prebuild_pkg is not None:
            # Create the prebuild job
            prebuild_job = create_catkin_build_job(
                context,
                prebuild_pkg,
                prebuild_pkg_path,
                build_dependencies=prebuild_pkg_deps,
                run_dependencies=[],
                force_cmake=force_cmake,
                pre_clean=pre_clean,
                prebuild=True)

            # Add the prebuld job
            prebuild_jobs[prebuild_job.jid] = prebuild_job

    # Remove prebuild jobs from normal job list
    for prebuild_jid, prebuild_job in prebuild_jobs.items():
        if prebuild_jid in packages_to_be_built_names:
            packages_to_be_built_names.remove(prebuild_jid)

    # Initial jobs list is just the prebuild jobs
    jobs = [] + list(prebuild_jobs.values())

    # Get all build type plugins
    build_job_creators = {
        ep.name: ep.load()['create_build_job']
        for ep in pkg_resources.iter_entry_points(group='catkin_tools.jobs')
    }

    # It's a problem if there aren't any build types available
    if len(build_job_creators) == 0:
        sys.exit(
            'Error: No build types available. Please check your catkin_tools installation.'
        )

    # Construct jobs
    for pkg_path, pkg in all_packages:
        if pkg.name not in packages_to_be_built_names:
            continue

        # Ignore metapackages
        if 'metapackage' in [e.tagname for e in pkg.exports]:
            continue

        # Get actual execution deps
        build_deps = [
            p.name for _, p in get_cached_recursive_build_depends_in_workspace(
                pkg, packages_to_be_built) if p.name not in prebuild_jobs
        ]
        build_for_run_deps = [
            p.name for _, p in get_cached_recursive_run_depends_in_workspace(
                pkg, packages_to_be_built) if p.name not in prebuild_jobs
        ]

        # All jobs depend on the prebuild jobs if they're defined
        if not no_deps:
            if relaxed_constraints:
                build_for_run_deps = [
                    p.name for _, p in
                    get_recursive_build_depends_for_run_depends_in_workspace(
                        [pkg], packages_to_be_built)
                    if p.name not in prebuild_jobs
                ]
            else:
                # revert to interpreting all dependencies as build dependencies
                build_deps = list(set(build_deps + build_for_run_deps))
                build_for_run_deps = []

            for j in prebuild_jobs.values():
                build_deps.append(j.jid)

        # Determine the job parameters
        build_job_kwargs = dict(context=context,
                                package=pkg,
                                package_path=pkg_path,
                                build_dependencies=build_deps,
                                run_dependencies=build_for_run_deps,
                                force_cmake=force_cmake,
                                pre_clean=pre_clean)

        # Create the job based on the build type
        build_type = get_build_type(pkg)

        if build_type in build_job_creators:
            jobs.append(build_job_creators[build_type](**build_job_kwargs))
        else:
            wide_log(
                clr("[build] @!@{yf}Warning:@| Skipping package `{}` because it "
                    "has an unsupported package build type: `{}`").format(
                        pkg.name, build_type))

            wide_log(clr("[build] Note: Available build types:"))
            for bt_name in build_job_creators.keys():
                wide_log(clr("[build]  - `{}`".format(bt_name)))

    # Queue for communicating status
    event_queue = Queue()

    status_queue = Queue()
    monitoring_queue = Queue()

    class ForwardingQueue(threading.Thread):
        def __init__(self, queues):
            super(ForwardingQueue, self).__init__()
            self.keep_running = True
            self.queues = queues

        def run(self):
            while self.keep_running:
                event = event_queue.get(True)
                for queue in self.queues:
                    queue.put(event)
                if event is None:
                    break

    queue_thread = ForwardingQueue([status_queue, monitoring_queue])

    threads = [queue_thread]

    try:
        # Spin up status output thread
        status_thread = ConsoleStatusController(
            'build', ['package', 'packages'],
            jobs,
            n_jobs, [pkg.name for _, pkg in context.packages],
            [p for p in context.whitelist], [p for p in context.blacklist],
            status_queue,
            show_notifications=not no_notify,
            show_active_status=not no_status,
            show_buffered_stdout=not quiet and not interleave_output,
            show_buffered_stderr=not interleave_output,
            show_live_stdout=interleave_output,
            show_live_stderr=interleave_output,
            show_stage_events=not quiet,
            show_full_summary=(summarize_build is True),
            pre_start_time=pre_start_time,
            active_status_rate=limit_status_rate)
        threads.append(status_thread)

        if influx_db is not None:
            if not have_influx_db:
                sys.exit(
                    "[build] @!@{rf}Error:@| InfluxDB monitoring is not possible, cannot import influxdb"
                )

            match = re.match('^(.+):(.+)@(.+):(.+)$', influx_url)
            if not match:
                sys.exit(
                    "[build] @!@{rf}Error:@| The value of --influx has to be of the form username:password@host:port"
                )
            username, password, host, port = match.groups()

            influxdb_thread = InfluxDBStatusController(monitoring_queue,
                                                       influx_db, host, port,
                                                       username, password)

            threads.append(influxdb_thread)

        for thread in threads:
            thread.start()

        # Initialize locks
        locks = {
            'installspace': asyncio.Lock() if lock_install else FakeLock()
        }

        # Block while running N jobs asynchronously
        try:
            all_succeeded = run_until_complete(
                execute_jobs('build',
                             jobs,
                             locks,
                             event_queue,
                             context.log_space_abs,
                             max_toplevel_jobs=n_jobs,
                             continue_on_failure=continue_on_failure,
                             continue_without_deps=False,
                             relaxed_constraints=relaxed_constraints))
        except Exception:
            all_succeeded = False
            for thread in threads:
                thread.keep_running = False
            for thread in threads:
                thread.join(1.0)
            wide_log(str(traceback.format_exc()))

        event_queue.put(None)

        for thread in threads:
            thread.join(1.0)

        # Warn user about new packages
        now_built_packages, now_unbuilt_pkgs = get_built_unbuilt_packages(
            context, workspace_packages)
        new_pkgs = [p for p in unbuilt_pkgs if p not in now_unbuilt_pkgs]
        if len(new_pkgs) > 0:
            log(
                clr("[build] @/@!Note:@| @/Workspace packages have changed, "
                    "please re-source setup files to use them.@|"))

        if all_succeeded:
            # Create isolated devel setup if necessary
            if context.isolate_devel:
                if not context.install:
                    _create_unmerged_devel_setup(context, now_unbuilt_pkgs)
                else:
                    _create_unmerged_devel_setup_for_install(context)
            return 0
        else:
            return 1

    except KeyboardInterrupt:
        wide_log("[build] Interrupted by user!")
        event_queue.put(None)

        return 130  # EOWNERDEAD return code is not part of the errno module.
Пример #11
0
def document_workspace(
    context,
    packages=None,
    start_with=None,
    no_deps=False,
    n_jobs=None,
    force_color=False,
    quiet=False,
    interleave_output=False,
    no_status=False,
    limit_status_rate=10.0,
    no_notify=False,
    continue_on_failure=False,
    summarize_build=None
):
    pre_start_time = time.time()

    # Get all the packages in the context source space
    # Suppress warnings since this is a utility function
    workspace_packages = find_packages(context.source_space_abs, exclude_subspaces=True, warnings=[])

    # If no_deps is given, ensure packages to build are provided
    if no_deps and packages is None:
        log(fmt("[document] @!@{rf}Error:@| With no_deps, you must specify packages to build."))
        return

    # Find list of packages in the workspace
    packages_to_be_documented, packages_to_be_documented_deps, all_packages = determine_packages_to_be_built(
        packages, context, workspace_packages)

    if not no_deps:
        # Extend packages to be documented to include their deps
        packages_to_be_documented.extend(packages_to_be_documented_deps)

    # Also re-sort
    try:
        packages_to_be_documented = topological_order_packages(dict(packages_to_be_documented))
    except AttributeError:
        log(fmt("[document] @!@{rf}Error:@| The workspace packages have a circular "
                "dependency, and cannot be documented. Please run `catkin list "
                "--deps` to determine the problematic package(s)."))
        return

    # Check the number of packages to be documented
    if len(packages_to_be_documented) == 0:
        log(fmt('[document] No packages to be documented.'))

    # Assert start_with package is in the workspace
    verify_start_with_option(
        start_with,
        packages,
        all_packages,
        packages_to_be_documented + packages_to_be_documented_deps)

    # Remove packages before start_with
    if start_with is not None:
        for path, pkg in list(packages_to_be_documented):
            if pkg.name != start_with:
                wide_log(fmt("@!@{pf}Skipping@|  @{gf}---@| @{cf}{}@|").format(pkg.name))
                packages_to_be_documented.pop(0)
            else:
                break

    # Get the names of all packages to be built
    packages_to_be_documented_names = [p.name for _, p in packages_to_be_documented]
    packages_to_be_documeted_deps_names = [p.name for _, p in packages_to_be_documented_deps]

    jobs = []

    # Construct jobs
    for pkg_path, pkg in all_packages:
        if pkg.name not in packages_to_be_documented_names:
            continue

        # Get actual execution deps
        deps = [
            p.name for _, p
            in get_cached_recursive_build_depends_in_workspace(pkg, packages_to_be_documented)
        ]

        jobs.append(create_package_job(context, pkg, pkg_path, deps))

    # Special job for post-job summary sphinx step.
    jobs.append(create_summary_job(context, package_names=packages_to_be_documented_names))

    # Queue for communicating status
    event_queue = Queue()

    try:
        # Spin up status output thread
        status_thread = ConsoleStatusController(
            'document',
            ['package', 'packages'],
            jobs,
            n_jobs,
            [pkg.name for _, pkg in context.packages],
            [p for p in context.whitelist],
            [p for p in context.blacklist],
            event_queue,
            show_notifications=not no_notify,
            show_active_status=not no_status,
            show_buffered_stdout=not quiet and not interleave_output,
            show_buffered_stderr=not interleave_output,
            show_live_stdout=interleave_output,
            show_live_stderr=interleave_output,
            show_stage_events=not quiet,
            show_full_summary=(summarize_build is True),
            pre_start_time=pre_start_time,
            active_status_rate=limit_status_rate)
        status_thread.start()

        # Block while running N jobs asynchronously
        try:
            all_succeeded = run_until_complete(execute_jobs(
                'document',
                jobs,
                None,
                event_queue,
                context.log_space_abs,
                max_toplevel_jobs=n_jobs,
                continue_on_failure=continue_on_failure,
                continue_without_deps=False))

        except Exception:
            status_thread.keep_running = False
            all_succeeded = False
            status_thread.join(1.0)
            wide_log(str(traceback.format_exc()))
        status_thread.join(1.0)

        return 0 if all_succeeded else 1

    except KeyboardInterrupt:
        wide_log("[document] Interrupted by user!")
        event_queue.put(None)

        return 130  # EOWNERDEAD
Пример #12
0
def determine_packages_to_be_built(packages, context, workspace_packages):
    """Returns list of packages which should be built, and those package's deps.

    :param packages: list of packages to be built, if None all packages are built
    :type packages: list
    :param context: Workspace context
    :type context: :py:class:`catkin_tools.verbs.catkin_build.context.Context`
    :returns: tuple of packages to be built and those package's deps
    :rtype: tuple
    """
    start = time.time()

    # If there are no packages raise
    if not workspace_packages:
        log("[build] No packages were found in the source space '{0}'".format(
            context.source_space_abs))
    else:
        wide_log("[build] Found '{0}' packages in {1}.".format(
            len(workspace_packages), format_time_delta(time.time() - start)))

    # Order the packages by topology
    ordered_packages = topological_order_packages(workspace_packages)
    # Set the packages in the workspace for the context
    context.packages = ordered_packages
    # Determine the packages which should be built
    packages_to_be_built = []
    packages_to_be_built_deps = []

    # Check if topological_order_packages determined any circular dependencies, if so print an error and fail.
    # If this is the case, the last entry of ordered packages is a tuple that starts with nil.
    if ordered_packages and ordered_packages[-1][0] is None:
        guilty_packages = ", ".join(ordered_packages[-1][1:])
        sys.exit(
            "[build] Circular dependency detected in the following packages: {}"
            .format(guilty_packages))

    workspace_package_names = dict([(pkg.name, (path, pkg))
                                    for path, pkg in ordered_packages])
    # Determine the packages to be built
    if packages:
        # First assert all of the packages given are in the workspace
        for package in packages:
            if package not in workspace_package_names:
                # Try whether package is a pattern and matches
                glob_packages = expand_glob_package(package,
                                                    workspace_package_names)
                if len(glob_packages) > 0:
                    packages.extend(glob_packages)
                    continue
                else:
                    sys.exit(
                        "[build] Given package '{0}' is not in the workspace "
                        "and pattern does not match any package".format(
                            package))
            # If metapackage, include run depends which are in the workspace
            package_obj = workspace_package_names[package][1]
            if 'metapackage' in [e.tagname for e in package_obj.exports]:
                for rdep in package_obj.run_depends:
                    if rdep.name in workspace_package_names:
                        packages.append(rdep.name)
        # Limit the packages to be built to just the provided packages
        for pkg_path, package in ordered_packages:
            if package.name in packages:
                packages_to_be_built.append((pkg_path, package))
                # Get the recursive dependencies for each of these packages
                pkg_deps = get_cached_recursive_build_depends_in_workspace(
                    package, ordered_packages)
                packages_to_be_built_deps.extend(pkg_deps)
    else:
        # Only use whitelist when no other packages are specified
        if len(context.whitelist) > 0:
            # Expand glob patterns in whitelist
            whitelist = []
            for whitelisted_package in context.whitelist:
                whitelist.extend(
                    expand_glob_package(whitelisted_package,
                                        workspace_package_names))
            packages_to_be_built = [
                p for p in ordered_packages if (p[1].name in whitelist)
            ]
        else:
            packages_to_be_built = ordered_packages

    # Filter packages with blacklist
    if len(context.blacklist) > 0:
        # Expand glob patterns in blacklist
        blacklist = []
        for blacklisted_package in context.blacklist:
            blacklist.extend(
                expand_glob_package(blacklisted_package,
                                    workspace_package_names))
        # Apply blacklist to packages and dependencies
        packages_to_be_built = [
            (path, pkg) for path, pkg in packages_to_be_built
            if (pkg.name not in blacklist or pkg.name in packages)
        ]
        packages_to_be_built_deps = [
            (path, pkg) for path, pkg in packages_to_be_built_deps
            if (pkg.name not in blacklist or pkg.name in packages)
        ]

    return packages_to_be_built, packages_to_be_built_deps, ordered_packages