示例#1
0
def build_isolated_workspace(
    context,
    packages=None,
    start_with=None,
    no_deps=False,
    unbuilt=False,
    n_jobs=None,
    force_cmake=False,
    pre_clean=False,
    force_color=False,
    quiet=False,
    interleave_output=False,
    no_status=False,
    limit_status_rate=10.0,
    lock_install=False,
    no_notify=False,
    continue_on_failure=False,
    summarize_build=None,
):
    """Builds a catkin workspace in isolation

    This function will find all of the packages in the source space, start some
    executors, feed them packages to build based on dependencies and topological
    ordering, and then monitor the output of the executors, handling loggings of
    the builds, starting builds, failing builds, and finishing builds of
    packages, and handling the shutdown of the executors when appropriate.

    :param context: context in which to build the catkin workspace
    :type context: :py:class:`catkin_tools.verbs.catkin_build.context.Context`
    :param packages: list of packages to build, by default their dependencies will also be built
    :type packages: list
    :param start_with: package to start with, skipping all packages which proceed it in the topological order
    :type start_with: str
    :param no_deps: If True, the dependencies of packages will not be built first
    :type no_deps: bool
    :param n_jobs: number of parallel package build n_jobs
    :type n_jobs: int
    :param force_cmake: forces invocation of CMake if True, default is False
    :type force_cmake: bool
    :param force_color: forces colored output even if terminal does not support it
    :type force_color: bool
    :param quiet: suppresses the output of commands unless there is an error
    :type quiet: bool
    :param interleave_output: prints the output of commands as they are received
    :type interleave_output: bool
    :param no_status: disables status bar
    :type no_status: bool
    :param limit_status_rate: rate to which status updates are limited; the default 0, places no limit.
    :type limit_status_rate: float
    :param lock_install: causes executors to synchronize on access of install commands
    :type lock_install: bool
    :param no_notify: suppresses system notifications
    :type no_notify: bool
    :param continue_on_failure: do not stop building other jobs on error
    :type continue_on_failure: bool
    :param summarize_build: if True summarizes the build at the end, if None and continue_on_failure is True and the
        the build fails, then the build will be summarized, but if False it never will be summarized.
    :type summarize_build: bool

    :raises: SystemExit if buildspace is a file or no packages were found in the source space
        or if the provided options are invalid
    """
    pre_start_time = time.time()

    # Assert that the limit_status_rate is valid
    if limit_status_rate < 0:
        sys.exit("[build] @!@{rf}Error:@| The value of --status-rate must be greater than or equal to zero.")

    # Declare a buildspace marker describing the build config for error checking
    buildspace_marker_data = {
        'workspace': context.workspace,
        'profile': context.profile,
        'install': context.install,
        'install_space': context.install_space_abs,
        'devel_space': context.devel_space_abs,
        'source_space': context.source_space_abs}

    # Check build config
    if os.path.exists(os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE)):
        with open(os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE)) as buildspace_marker_file:
            existing_buildspace_marker_data = yaml.load(buildspace_marker_file)
            misconfig_lines = ''
            for (k, v) in existing_buildspace_marker_data.items():
                new_v = buildspace_marker_data.get(k, None)
                if new_v != v:
                    misconfig_lines += (
                        '\n - %s: %s (stored) is not %s (commanded)' %
                        (k, v, new_v))
            if len(misconfig_lines) > 0:
                sys.exit(clr(
                    "\n@{rf}Error:@| Attempting to build a catkin workspace using build space: "
                    "\"%s\" but that build space's most recent configuration "
                    "differs from the commanded one in ways which will cause "
                    "problems. Fix the following options or use @{yf}`catkin "
                    "clean -b`@| to remove the build space: %s" %
                    (context.build_space_abs, misconfig_lines)))

    # Summarize the context
    summary_notes = []
    if force_cmake:
        summary_notes += [clr("@!@{cf}NOTE:@| Forcing CMake to run for each package.")]
    log(context.summary(summary_notes))

    # Make sure there is a build folder and it is not a file
    if os.path.exists(context.build_space_abs):
        if os.path.isfile(context.build_space_abs):
            sys.exit(clr(
                "[build] @{rf}Error:@| Build space '{0}' exists but is a file and not a folder."
                .format(context.build_space_abs)))
    # If it dosen't exist, create it
    else:
        log("[build] Creating build space: '{0}'".format(context.build_space_abs))
        os.makedirs(context.build_space_abs)

    # Write the current build config for config error checking
    with open(os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE), 'w') as buildspace_marker_file:
        buildspace_marker_file.write(yaml.dump(buildspace_marker_data, default_flow_style=False))

    # Get all the packages in the context source space
    # Suppress warnings since this is a utility function
    workspace_packages = find_packages(context.source_space_abs, exclude_subspaces=True, warnings=[])

    # Get packages which have not been built yet
    unbuilt_pkgs = get_unbuilt_packages(context, workspace_packages)

    # Handle unbuilt packages
    if unbuilt:
        # Check if there are any unbuilt
        if len(unbuilt_pkgs) > 0:
            # Add the unbuilt packages
            packages.extend(list(unbuilt_pkgs))
        else:
            log("[build] No unbuilt packages to be built.")
            return

    # If no_deps is given, ensure packages to build are provided
    if no_deps and packages is None:
        log(clr("[build] @!@{rf}Error:@| With no_deps, you must specify packages to build."))
        return

    # Find list of packages in the workspace
    packages_to_be_built, packages_to_be_built_deps, all_packages = determine_packages_to_be_built(
        packages, context, workspace_packages)

    if not no_deps:
        # Extend packages to be built to include their deps
        packages_to_be_built.extend(packages_to_be_built_deps)

    # Also re-sort
    try:
        packages_to_be_built = topological_order_packages(dict(packages_to_be_built))
    except AttributeError:
        log(clr("[build] @!@{rf}Error:@| The workspace packages have a circular "
                "dependency, and cannot be built. Please run `catkin list "
                "--deps` to determine the problematic package(s)."))
        return

    # Check the number of packages to be built
    if len(packages_to_be_built) == 0:
        log(clr('[build] No packages to be built.'))
        return

    # Assert start_with package is in the workspace
    verify_start_with_option(
        start_with,
        packages,
        all_packages,
        packages_to_be_built + packages_to_be_built_deps)

    # Populate .catkin file if we're not installing
    # NOTE: This is done to avoid the Catkin CMake code from doing it,
    # which isn't parallel-safe. Catkin CMake only modifies this file if
    # it's package source path isn't found.
    if not context.install:
        dot_catkin_file_path = os.path.join(context.devel_space_abs, '.catkin')
        # If the file exists, get the current paths
        if os.path.exists(dot_catkin_file_path):
            dot_catkin_paths = open(dot_catkin_file_path, 'r').read().split(';')
        else:
            dot_catkin_paths = []

        # Update the list with the new packages (in topological order)
        packages_to_be_built_paths = [
            os.path.join(context.source_space_abs, path)
            for path, pkg in packages_to_be_built
        ]

        new_dot_catkin_paths = [
            os.path.join(context.source_space_abs, path)
            for path in [os.path.join(context.source_space_abs, path) for path, pkg in all_packages]
            if path in dot_catkin_paths or path in packages_to_be_built_paths
        ]

        # Write the new file if it's different, otherwise, leave it alone
        if dot_catkin_paths == new_dot_catkin_paths:
            wide_log("[build] Package table is up to date.")
        else:
            wide_log("[build] Updating package table.")
            open(dot_catkin_file_path, 'w').write(';'.join(new_dot_catkin_paths))

    # Remove packages before start_with
    if start_with is not None:
        for path, pkg in list(packages_to_be_built):
            if pkg.name != start_with:
                wide_log(clr("@!@{pf}Skipping@| @{gf}---@| @{cf}{}@|").format(pkg.name))
                packages_to_be_built.pop(0)
            else:
                break

    # Get the names of all packages to be built
    packages_to_be_built_names = [p.name for _, p in packages_to_be_built]
    packages_to_be_built_deps_names = [p.name for _, p in packages_to_be_built_deps]

    # Generate prebuild jobs, if necessary
    prebuild_jobs = {}
    setup_util_exists = os.path.exists(os.path.join(context.devel_space_abs, '_setup_util.py'))
    if context.link_devel and (not setup_util_exists or (force_cmake and len(packages) == 0)):
        wide_log('[build] Preparing linked develspace...')

        pkg_dict = dict([(pkg.name, (pth, pkg)) for pth, pkg in all_packages])

        if 'catkin' in packages_to_be_built_names + packages_to_be_built_deps_names:
            # Use catkin as the prebuild package
            prebuild_pkg_path, prebuild_pkg = pkg_dict['catkin']
        else:
            # Generate explicit prebuild package
            prebuild_pkg_path = generate_prebuild_package(context.build_space_abs, context.devel_space_abs, force_cmake)
            prebuild_pkg = parse_package(prebuild_pkg_path)

        # Create the prebuild job
        prebuild_job = create_catkin_build_job(
            context,
            prebuild_pkg,
            prebuild_pkg_path,
            dependencies=[],
            force_cmake=force_cmake,
            pre_clean=pre_clean,
            prebuild=True)

        # Add the prebuld job
        prebuild_jobs[prebuild_job.jid] = prebuild_job

    # Remove prebuild jobs from normal job list
    for prebuild_jid, prebuild_job in prebuild_jobs.items():
        if prebuild_jid in packages_to_be_built_names:
            packages_to_be_built_names.remove(prebuild_jid)

    # Initial jobs list is just the prebuild jobs
    jobs = [] + list(prebuild_jobs.values())

    # Get all build type plugins
    build_job_creators = {
        ep.name: ep.load()['create_build_job']
        for ep in pkg_resources.iter_entry_points(group='catkin_tools.jobs')
    }

    # It's a problem if there aren't any build types available
    if len(build_job_creators) == 0:
        sys.exit('Error: No build types availalbe. Please check your catkin_tools installation.')

    # Construct jobs
    for pkg_path, pkg in all_packages:
        if pkg.name not in packages_to_be_built_names:
            continue

        # Ignore metapackages
        if 'metapackage' in [e.tagname for e in pkg.exports]:
            continue

        # Get actual execution deps
        deps = [
            p.name for _, p
            in get_cached_recursive_build_depends_in_workspace(pkg, packages_to_be_built)
            if p.name not in prebuild_jobs
        ]

        # All jobs depend on the prebuild job if it's defined
        for j in prebuild_jobs.values():
            deps.append(j.jid)

        # Determine the job parameters
        build_job_kwargs = dict(
            context=context,
            package=pkg,
            package_path=pkg_path,
            dependencies=deps,
            force_cmake=force_cmake,
            pre_clean=pre_clean)

        # Create the job based on the build type
        build_type = get_build_type(pkg)

        if build_type in build_job_creators:
            jobs.append(build_job_creators[build_type](**build_job_kwargs))
        else:
            wide_log(clr(
                "[build] @!@{yf}Warning:@| Skipping package `{}` because it "
                "has an unsupported package build type: `{}`"
            ).format(pkg.name, build_type))

            wide_log(clr("[build] Note: Available build types:"))
            for bt_name in build_job_creators.keys():
                wide_log(clr("[build]  - `{}`".format(bt_name)))

    # Queue for communicating status
    event_queue = Queue()

    try:
        # Spin up status output thread
        status_thread = ConsoleStatusController(
            'build',
            ['package', 'packages'],
            jobs,
            n_jobs,
            [pkg.name for _, pkg in context.packages],
            [p for p in context.whitelist],
            [p for p in context.blacklist],
            event_queue,
            show_notifications=not no_notify,
            show_active_status=not no_status,
            show_buffered_stdout=not quiet and not interleave_output,
            show_buffered_stderr=not interleave_output,
            show_live_stdout=interleave_output,
            show_live_stderr=interleave_output,
            show_stage_events=not quiet,
            show_full_summary=(summarize_build is True),
            pre_start_time=pre_start_time,
            active_status_rate=limit_status_rate)
        status_thread.start()

        # Initialize locks
        locks = {
            'installspace': asyncio.Lock() if lock_install else FakeLock()
        }

        # Block while running N jobs asynchronously
        try:
            all_succeeded = run_until_complete(execute_jobs(
                'build',
                jobs,
                locks,
                event_queue,
                os.path.join(context.build_space_abs, '_logs'),
                max_toplevel_jobs=n_jobs,
                continue_on_failure=continue_on_failure,
                continue_without_deps=False))
        except Exception:
            status_thread.keep_running = False
            all_succeeded = False
            status_thread.join(1.0)
            wide_log(str(traceback.format_exc()))

        status_thread.join(1.0)

        # Warn user about new packages
        now_unbuilt_pkgs = get_unbuilt_packages(context, workspace_packages)
        new_pkgs = [p for p in unbuilt_pkgs if p not in now_unbuilt_pkgs]
        if len(new_pkgs) > 0:
            log(clr("[build] @/@!Note:@| @/Workspace packages have changed, "
                    "please re-source setup files to use them.@|"))

        if all_succeeded:
            # Create isolated devel setup if necessary
            if context.isolate_devel:
                if not context.install:
                    _create_unmerged_devel_setup(context, now_unbuilt_pkgs)
                else:
                    _create_unmerged_devel_setup_for_install(context)
            return 0
        else:
            return 1

    except KeyboardInterrupt:
        wide_log("[build] Interrupted by user!")
        event_queue.put(None)
示例#2
0
def clean_packages(context, names_of_packages_to_be_cleaned, clean_dependents,
                   verbose, dry_run):

    pre_start_time = time.time()

    # Update the names of packages to be cleaned with dependents
    packages_to_be_cleaned = determine_packages_to_be_cleaned(
        context, clean_dependents, names_of_packages_to_be_cleaned)

    # print(packages_to_be_cleaned)
    # for path, pkg in packages_to_be_cleaned:
    # if os.path.exists(os.path.join(context.build_space_abs, pkg.name)):
    # print("[clean] Cleaning package: %s" % pkg.name)

    # Construct jobs
    jobs = []
    for path, pkg in packages_to_be_cleaned:

        # Get all build type plugins
        clean_job_creators = {
            ep.name: ep.load()['create_clean_job']
            for ep in pkg_resources.iter_entry_points(
                group='catkin_tools.jobs')
        }

        # It's a problem if there aren't any build types available
        if len(clean_job_creators) == 0:
            sys.exit(
                'Error: No build types availalbe. Please check your catkin_tools installation.'
            )

        # Determine the job parameters
        clean_job_kwargs = dict(
            context=context,
            package=pkg,
            package_path=path,
            dependencies=[],  # Unused because clean jobs are not parallelized
            dry_run=dry_run,
            clean_build=True,
            clean_devel=True,
            clean_install=True)

        # Create the job based on the build type
        build_type = get_build_type(pkg)

        if build_type in clean_job_creators:
            jobs.append(clean_job_creators[build_type](**clean_job_kwargs))

    if len(jobs) == 0:
        print(
            "[clean] There are no products from the given packages to clean.")
        return False

    # Queue for communicating status
    event_queue = Queue()

    # Spin up status output thread
    status_thread = ConsoleStatusController(
        'clean', ['package', 'packages'],
        jobs,
        1, [pkg.name for _, pkg in context.packages],
        [p for p in context.whitelist], [p for p in context.blacklist],
        event_queue,
        show_notifications=False,
        show_active_status=False,
        show_buffered_stdout=verbose or False,
        show_buffered_stderr=True,
        show_live_stdout=False,
        show_live_stderr=False,
        show_stage_events=False,
        show_full_summary=False,
        pre_start_time=pre_start_time,
        active_status_rate=10.0)
    status_thread.start()

    # Initialize locks (none need to be configured here)
    locks = {}

    # Block while running N jobs asynchronously
    try:
        ej = execute_jobs('clean',
                          jobs,
                          locks,
                          event_queue,
                          context.log_space_abs,
                          max_toplevel_jobs=1,
                          continue_on_failure=True,
                          continue_without_deps=False)
        all_succeeded = run_until_complete(ej)
    except Exception:
        status_thread.keep_running = False
        all_succeeded = False
        status_thread.join(1.0)
        wide_log(str(traceback.format_exc()))

    status_thread.join(1.0)

    return all_succeeded
def document_workspace(
    context,
    packages=None,
    start_with=None,
    no_deps=False,
    n_jobs=None,
    force_color=False,
    quiet=False,
    interleave_output=False,
    no_status=False,
    limit_status_rate=10.0,
    no_notify=False,
    continue_on_failure=False,
    summarize_build=None
):
    pre_start_time = time.time()

    # Get all the packages in the context source space
    # Suppress warnings since this is a utility function
    workspace_packages = find_packages(context.source_space_abs, exclude_subspaces=True, warnings=[])

    # If no_deps is given, ensure packages to build are provided
    if no_deps and packages is None:
        log(fmt("[document] @!@{rf}Error:@| With no_deps, you must specify packages to build."))
        return

    # Find list of packages in the workspace
    packages_to_be_documented, packages_to_be_documented_deps, all_packages = determine_packages_to_be_built(
        packages, context, workspace_packages)

    if not no_deps:
        # Extend packages to be documented to include their deps
        packages_to_be_documented.extend(packages_to_be_documented_deps)

    # Also re-sort
    try:
        packages_to_be_documented = topological_order_packages(dict(packages_to_be_documented))
    except AttributeError:
        log(fmt("[document] @!@{rf}Error:@| The workspace packages have a circular "
                "dependency, and cannot be documented. Please run `catkin list "
                "--deps` to determine the problematic package(s)."))
        return

    # Check the number of packages to be documented
    if len(packages_to_be_documented) == 0:
        log(fmt('[document] No packages to be documented.'))

    # Assert start_with package is in the workspace
    verify_start_with_option(
        start_with,
        packages,
        all_packages,
        packages_to_be_documented + packages_to_be_documented_deps)

    # Remove packages before start_with
    if start_with is not None:
        for path, pkg in list(packages_to_be_documented):
            if pkg.name != start_with:
                wide_log(fmt("@!@{pf}Skipping@|  @{gf}---@| @{cf}{}@|").format(pkg.name))
                packages_to_be_documented.pop(0)
            else:
                break

    # Get the names of all packages to be built
    packages_to_be_documented_names = [p.name for _, p in packages_to_be_documented]
    packages_to_be_documeted_deps_names = [p.name for _, p in packages_to_be_documented_deps]

    jobs = []

    # Construct jobs
    for pkg_path, pkg in all_packages:
        if pkg.name not in packages_to_be_documented_names:
            continue

        # Get actual execution deps
        deps = [
            p.name for _, p
            in get_cached_recursive_build_depends_in_workspace(pkg, packages_to_be_documented)
        ]

        jobs.append(create_package_job(context, pkg, pkg_path, deps))

    # Special job for post-job summary sphinx step.
    jobs.append(create_summary_job(context, package_names=packages_to_be_documented_names))

    # Queue for communicating status
    event_queue = Queue()

    try:
        # Spin up status output thread
        status_thread = ConsoleStatusController(
            'document',
            ['package', 'packages'],
            jobs,
            n_jobs,
            [pkg.name for _, pkg in context.packages],
            [p for p in context.whitelist],
            [p for p in context.blacklist],
            event_queue,
            show_notifications=not no_notify,
            show_active_status=not no_status,
            show_buffered_stdout=not quiet and not interleave_output,
            show_buffered_stderr=not interleave_output,
            show_live_stdout=interleave_output,
            show_live_stderr=interleave_output,
            show_stage_events=not quiet,
            show_full_summary=(summarize_build is True),
            pre_start_time=pre_start_time,
            active_status_rate=limit_status_rate)
        status_thread.start()

        # Block while running N jobs asynchronously
        try:
            all_succeeded = run_until_complete(execute_jobs(
                'document',
                jobs,
                None,
                event_queue,
                context.log_space_abs,
                max_toplevel_jobs=n_jobs,
                continue_on_failure=continue_on_failure,
                continue_without_deps=False))

        except Exception:
            status_thread.keep_running = False
            all_succeeded = False
            status_thread.join(1.0)
            wide_log(str(traceback.format_exc()))
        status_thread.join(1.0)

        return 0 if all_succeeded else 1

    except KeyboardInterrupt:
        wide_log("[document] Interrupted by user!")
        event_queue.put(None)

        return 130  # EOWNERDEAD
示例#4
0
def build_isolated_workspace(
    context,
    packages=None,
    start_with=None,
    no_deps=False,
    unbuilt=False,
    n_jobs=None,
    force_cmake=False,
    pre_clean=False,
    force_color=False,
    quiet=False,
    interleave_output=False,
    no_status=False,
    limit_status_rate=10.0,
    lock_install=False,
    no_notify=False,
    continue_on_failure=False,
    summarize_build=None,
):
    """Builds a catkin workspace in isolation

    This function will find all of the packages in the source space, start some
    executors, feed them packages to build based on dependencies and topological
    ordering, and then monitor the output of the executors, handling loggings of
    the builds, starting builds, failing builds, and finishing builds of
    packages, and handling the shutdown of the executors when appropriate.

    :param context: context in which to build the catkin workspace
    :type context: :py:class:`catkin_tools.verbs.catkin_build.context.Context`
    :param packages: list of packages to build, by default their dependencies will also be built
    :type packages: list
    :param start_with: package to start with, skipping all packages which proceed it in the topological order
    :type start_with: str
    :param no_deps: If True, the dependencies of packages will not be built first
    :type no_deps: bool
    :param n_jobs: number of parallel package build n_jobs
    :type n_jobs: int
    :param force_cmake: forces invocation of CMake if True, default is False
    :type force_cmake: bool
    :param force_color: forces colored output even if terminal does not support it
    :type force_color: bool
    :param quiet: suppresses the output of commands unless there is an error
    :type quiet: bool
    :param interleave_output: prints the output of commands as they are received
    :type interleave_output: bool
    :param no_status: disables status bar
    :type no_status: bool
    :param limit_status_rate: rate to which status updates are limited; the default 0, places no limit.
    :type limit_status_rate: float
    :param lock_install: causes executors to synchronize on access of install commands
    :type lock_install: bool
    :param no_notify: suppresses system notifications
    :type no_notify: bool
    :param continue_on_failure: do not stop building other jobs on error
    :type continue_on_failure: bool
    :param summarize_build: if True summarizes the build at the end, if None and continue_on_failure is True and the
        the build fails, then the build will be summarized, but if False it never will be summarized.
    :type summarize_build: bool

    :raises: SystemExit if buildspace is a file or no packages were found in the source space
        or if the provided options are invalid
    """
    pre_start_time = time.time()

    # Assert that the limit_status_rate is valid
    if limit_status_rate < 0:
        sys.exit(
            "[build] @!@{rf}Error:@| The value of --status-rate must be greater than or equal to zero."
        )

    # Declare a buildspace marker describing the build config for error checking
    buildspace_marker_data = {
        'workspace': context.workspace,
        'profile': context.profile,
        'install': context.install,
        'install_space': context.install_space_abs,
        'devel_space': context.devel_space_abs,
        'source_space': context.source_space_abs
    }

    # Check build config
    if os.path.exists(
            os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE)):
        with open(os.path.join(
                context.build_space_abs,
                BUILDSPACE_MARKER_FILE)) as buildspace_marker_file:
            existing_buildspace_marker_data = yaml.safe_load(
                buildspace_marker_file)
            misconfig_lines = ''
            for (k, v) in existing_buildspace_marker_data.items():
                new_v = buildspace_marker_data.get(k, None)
                if new_v != v:
                    misconfig_lines += (
                        '\n - %s: %s (stored) is not %s (commanded)' %
                        (k, v, new_v))
            if len(misconfig_lines) > 0:
                sys.exit(
                    clr("\n@{rf}Error:@| Attempting to build a catkin workspace using build space: "
                        "\"%s\" but that build space's most recent configuration "
                        "differs from the commanded one in ways which will cause "
                        "problems. Fix the following options or use @{yf}`catkin "
                        "clean -b`@| to remove the build space: %s" %
                        (context.build_space_abs, misconfig_lines)))

    # Summarize the context
    summary_notes = []
    if force_cmake:
        summary_notes += [
            clr("@!@{cf}NOTE:@| Forcing CMake to run for each package.")
        ]
    log(context.summary(summary_notes))

    # Make sure there is a build folder and it is not a file
    if os.path.exists(context.build_space_abs):
        if os.path.isfile(context.build_space_abs):
            sys.exit(
                clr("[build] @{rf}Error:@| " +
                    "Build space '{0}' exists but is a file and not a folder.".
                    format(context.build_space_abs)))
    # If it doesn't exist, create it
    else:
        log("[build] Creating build space: '{0}'".format(
            context.build_space_abs))
        os.makedirs(context.build_space_abs)

    # Write the current build config for config error checking
    with open(os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE),
              'w') as buildspace_marker_file:
        buildspace_marker_file.write(
            yaml.dump(buildspace_marker_data, default_flow_style=False))

    # Get all the packages in the context source space
    # Suppress warnings since this is a utility function
    try:
        workspace_packages = find_packages(context.source_space_abs,
                                           exclude_subspaces=True,
                                           warnings=[])
    except InvalidPackage as ex:
        sys.exit(
            clr("@{rf}Error:@| The file %s is an invalid package.xml file."
                " See below for details:\n\n%s" % (ex.package_path, ex.msg)))

    # Get packages which have not been built yet
    built_packages, unbuilt_pkgs = get_built_unbuilt_packages(
        context, workspace_packages)

    # Handle unbuilt packages
    if unbuilt:
        # Check if there are any unbuilt
        if len(unbuilt_pkgs) > 0:
            # Add the unbuilt packages
            packages.extend(list(unbuilt_pkgs))
        else:
            log("[build] No unbuilt packages to be built.")
            return

    # If no_deps is given, ensure packages to build are provided
    if no_deps and packages is None:
        log(
            clr("[build] @!@{rf}Error:@| With no_deps, you must specify packages to build."
                ))
        return

    # Find list of packages in the workspace
    packages_to_be_built, packages_to_be_built_deps, all_packages = determine_packages_to_be_built(
        packages, context, workspace_packages)

    if not no_deps:
        # Extend packages to be built to include their deps
        packages_to_be_built.extend(packages_to_be_built_deps)

    # Also re-sort
    try:
        packages_to_be_built = topological_order_packages(
            dict(packages_to_be_built))
    except AttributeError:
        log(
            clr("[build] @!@{rf}Error:@| The workspace packages have a circular "
                "dependency, and cannot be built. Please run `catkin list "
                "--deps` to determine the problematic package(s)."))
        return

    # Check the number of packages to be built
    if len(packages_to_be_built) == 0:
        log(clr('[build] No packages to be built.'))

    # Assert start_with package is in the workspace
    verify_start_with_option(start_with, packages, all_packages,
                             packages_to_be_built + packages_to_be_built_deps)

    # Populate .catkin file if we're not installing
    # NOTE: This is done to avoid the Catkin CMake code from doing it,
    # which isn't parallel-safe. Catkin CMake only modifies this file if
    # it's package source path isn't found.
    if not context.install:
        dot_catkin_file_path = os.path.join(context.devel_space_abs, '.catkin')
        # If the file exists, get the current paths
        if os.path.exists(dot_catkin_file_path):
            dot_catkin_paths = open(dot_catkin_file_path,
                                    'r').read().split(';')
        else:
            dot_catkin_paths = []

        # Update the list with the new packages (in topological order)
        packages_to_be_built_paths = [
            os.path.join(context.source_space_abs, path)
            for path, pkg in packages_to_be_built
        ]

        new_dot_catkin_paths = [
            os.path.join(context.source_space_abs, path) for path in [
                os.path.join(context.source_space_abs, path)
                for path, pkg in all_packages
            ] if path in dot_catkin_paths or path in packages_to_be_built_paths
        ]

        # Write the new file if it's different, otherwise, leave it alone
        if dot_catkin_paths == new_dot_catkin_paths:
            wide_log("[build] Package table is up to date.")
        else:
            wide_log("[build] Updating package table.")
            open(dot_catkin_file_path,
                 'w').write(';'.join(new_dot_catkin_paths))

    # Remove packages before start_with
    if start_with is not None:
        for path, pkg in list(packages_to_be_built):
            if pkg.name != start_with:
                wide_log(
                    clr("@!@{pf}Skipping@|  @{gf}---@| @{cf}{}@|").format(
                        pkg.name))
                packages_to_be_built.pop(0)
            else:
                break

    # Get the names of all packages to be built
    packages_to_be_built_names = [p.name for _, p in packages_to_be_built]
    packages_to_be_built_deps_names = [
        p.name for _, p in packages_to_be_built_deps
    ]

    # Generate prebuild and prebuild clean jobs, if necessary
    prebuild_jobs = {}
    setup_util_present = os.path.exists(
        os.path.join(context.devel_space_abs, '_setup_util.py'))
    catkin_present = 'catkin' in (packages_to_be_built_names +
                                  packages_to_be_built_deps_names)
    catkin_built = 'catkin' in built_packages
    prebuild_built = 'catkin_tools_prebuild' in built_packages

    # Handle the prebuild jobs if the develspace is linked
    prebuild_pkg_deps = []
    if context.link_devel:
        prebuild_pkg = None

        # Construct a dictionary to lookup catkin package by name
        pkg_dict = dict([(pkg.name, (pth, pkg)) for pth, pkg in all_packages])

        if setup_util_present:
            # Setup util is already there, determine if it needs to be
            # regenerated
            if catkin_built:
                if catkin_present:
                    prebuild_pkg_path, prebuild_pkg = pkg_dict['catkin']
            elif prebuild_built:
                if catkin_present:
                    # TODO: Clean prebuild package
                    ct_prebuild_pkg_path = get_prebuild_package(
                        context.build_space_abs, context.devel_space_abs,
                        force_cmake)
                    ct_prebuild_pkg = parse_package(ct_prebuild_pkg_path)

                    prebuild_jobs[
                        'caktin_tools_prebuild'] = create_catkin_clean_job(
                            context,
                            ct_prebuild_pkg,
                            ct_prebuild_pkg_path,
                            dependencies=[],
                            dry_run=False,
                            clean_build=True,
                            clean_devel=True,
                            clean_install=True)

                    # TODO: Build catkin package
                    prebuild_pkg_path, prebuild_pkg = pkg_dict['catkin']
                    prebuild_pkg_deps.append('catkin_tools_prebuild')
            else:
                # How did these get here??
                log("Warning: devel space setup files have an unknown origin.")
        else:
            # Setup util needs to be generated
            if catkin_built or prebuild_built:
                log("Warning: generated devel space setup files have been deleted."
                    )

            if catkin_present:
                # Build catkin package
                prebuild_pkg_path, prebuild_pkg = pkg_dict['catkin']
            else:
                # Generate and buildexplicit prebuild package
                prebuild_pkg_path = get_prebuild_package(
                    context.build_space_abs, context.devel_space_abs,
                    force_cmake)
                prebuild_pkg = parse_package(prebuild_pkg_path)

        if prebuild_pkg is not None:
            # Create the prebuild job
            prebuild_job = create_catkin_build_job(
                context,
                prebuild_pkg,
                prebuild_pkg_path,
                dependencies=prebuild_pkg_deps,
                force_cmake=force_cmake,
                pre_clean=pre_clean,
                prebuild=True)

            # Add the prebuld job
            prebuild_jobs[prebuild_job.jid] = prebuild_job

    # Remove prebuild jobs from normal job list
    for prebuild_jid, prebuild_job in prebuild_jobs.items():
        if prebuild_jid in packages_to_be_built_names:
            packages_to_be_built_names.remove(prebuild_jid)

    # Initial jobs list is just the prebuild jobs
    jobs = [] + list(prebuild_jobs.values())

    # Get all build type plugins
    build_job_creators = {
        ep.name: ep.load()['create_build_job']
        for ep in pkg_resources.iter_entry_points(group='catkin_tools.jobs')
    }

    # It's a problem if there aren't any build types available
    if len(build_job_creators) == 0:
        sys.exit(
            'Error: No build types available. Please check your catkin_tools installation.'
        )

    # Construct jobs
    for pkg_path, pkg in all_packages:
        if pkg.name not in packages_to_be_built_names:
            continue

        # Get actual build deps
        deps = [
            p.name for _, p in get_cached_recursive_build_depends_in_workspace(
                pkg, packages_to_be_built) if p.name not in prebuild_jobs
        ]
        # All jobs depend on the prebuild jobs if they're defined
        if not no_deps:
            for j in prebuild_jobs.values():
                deps.append(j.jid)

        # Determine the job parameters
        build_job_kwargs = dict(context=context,
                                package=pkg,
                                package_path=pkg_path,
                                dependencies=deps,
                                force_cmake=force_cmake,
                                pre_clean=pre_clean)

        # Create the job based on the build type
        build_type = pkg.get_build_type()

        if build_type in build_job_creators:
            jobs.append(build_job_creators[build_type](**build_job_kwargs))
        else:
            wide_log(
                clr("[build] @!@{yf}Warning:@| Skipping package `{}` because it "
                    "has an unsupported package build type: `{}`").format(
                        pkg.name, build_type))

            wide_log(clr("[build] Note: Available build types:"))
            for bt_name in build_job_creators.keys():
                wide_log(clr("[build]  - `{}`".format(bt_name)))

    # Queue for communicating status
    event_queue = Queue()

    try:
        # Spin up status output thread
        status_thread = ConsoleStatusController(
            'build', ['package', 'packages'],
            jobs,
            n_jobs, [pkg.name for _, pkg in context.packages],
            [p for p in context.whitelist], [p for p in context.blacklist],
            event_queue,
            show_notifications=not no_notify,
            show_active_status=not no_status,
            show_buffered_stdout=not quiet and not interleave_output,
            show_buffered_stderr=not interleave_output,
            show_live_stdout=interleave_output,
            show_live_stderr=interleave_output,
            show_stage_events=not quiet,
            show_full_summary=(summarize_build is True),
            pre_start_time=pre_start_time,
            active_status_rate=limit_status_rate)
        status_thread.start()

        # Initialize locks
        locks = {
            'installspace': asyncio.Lock() if lock_install else FakeLock()
        }

        # Block while running N jobs asynchronously
        try:
            all_succeeded = run_until_complete(
                execute_jobs('build',
                             jobs,
                             locks,
                             event_queue,
                             context.log_space_abs,
                             max_toplevel_jobs=n_jobs,
                             continue_on_failure=continue_on_failure,
                             continue_without_deps=False))
        except Exception:
            status_thread.keep_running = False
            all_succeeded = False
            status_thread.join(1.0)
            wide_log(str(traceback.format_exc()))

        status_thread.join(1.0)

        # Warn user about new packages
        now_built_packages, now_unbuilt_pkgs = get_built_unbuilt_packages(
            context, workspace_packages)
        new_pkgs = [p for p in unbuilt_pkgs if p not in now_unbuilt_pkgs]
        if len(new_pkgs) > 0:
            log(
                clr("[build] @/@!Note:@| @/Workspace packages have changed, "
                    "please re-source setup files to use them.@|"))

        if all_succeeded:
            # Create isolated devel setup if necessary
            if context.isolate_devel:
                if not context.install:
                    _create_unmerged_devel_setup(context, now_unbuilt_pkgs)
                else:
                    _create_unmerged_devel_setup_for_install(context)
            return 0
        else:
            return 1

    except KeyboardInterrupt:
        wide_log("[build] Interrupted by user!")
        event_queue.put(None)

        return 130  # EOWNERDEAD return code is not part of the errno module.
示例#5
0
def test_workspace(
    context,
    packages=None,
    n_jobs=None,
    quiet=False,
    interleave_output=False,
    no_status=False,
    limit_status_rate=10.0,
    no_notify=False,
    continue_on_failure=False,
    summarize_build=False,
    catkin_test_target='run_tests',
    cmake_test_target='test',
):
    """Tests a catkin workspace

    :param context: context in which to test the catkin workspace
    :type context: :py:class:`catkin_tools.context.Context`
    :param packages: list of packages to test
    :type packages: list
    :param n_jobs: number of parallel package test jobs
    :type n_jobs: int
    :param quiet: suppresses verbose build or test information
    :type quiet: bool
    :param interleave_output: prints the output of commands as they are received
    :type interleave_output: bool
    :param no_status: suppresses the bottom status line
    :type no_status: bool
    :param limit_status_rate: rate to which status updates are limited; the default 0, places no limit.
    :type limit_status_rate: float
    :param no_notify: suppresses system notifications
    :type no_notify: bool
    :param continue_on_failure: do not stop testing other packages on error
    :type continue_on_failure: bool
    :param summarize_build: summarizes the build at the end
    :type summarize_build: bool
    :param catkin_test_target: make target for tests in catkin packages
    :type catkin_test_target: str
    :param cmake_test_target: make target for tests in cmake packages
    :type cmake_test_target: str
    """
    pre_start_time = time.time()

    # Assert that the limit_status_rate is valid
    if limit_status_rate < 0:
        sys.exit(
            "[test] @!@{rf}Error:@| The value of --limit-status-rate must be greater than or equal to zero."
        )

    # Get all the packages in the context source space
    # Suppress warnings since this is a utility function
    try:
        workspace_packages = find_packages(context.source_space_abs,
                                           exclude_subspaces=True,
                                           warnings=[])
    except InvalidPackage as ex:
        sys.exit(
            clr("@{rf}Error:@| The file {} is an invalid package.xml file."
                " See below for details:\n\n{}").format(
                    ex.package_path, ex.msg))

    # Get all build type plugins
    test_job_creators = {
        ep.name: ep.load()['create_test_job']
        for ep in pkg_resources.iter_entry_points(group='catkin_tools.jobs')
    }

    # It's a problem if there aren't any build types available
    if len(test_job_creators) == 0:
        sys.exit(
            'Error: No build types available. Please check your catkin_tools installation.'
        )

    # Get list of packages to test
    ordered_packages = topological_order_packages(workspace_packages)

    # Check if topological_order_packages determined any circular dependencies, if so print an error and fail.
    # If this is the case, the last entry of ordered packages is a tuple that starts with nil.
    if ordered_packages and ordered_packages[-1][0] is None:
        guilty_packages = ", ".join(ordered_packages[-1][1:])
        sys.exit(
            "[test] Circular dependency detected in the following packages: {}"
            .format(guilty_packages))

    workspace_packages = dict([(pkg.name, (path, pkg))
                               for path, pkg in ordered_packages])
    packages_to_test = []
    if packages:
        for package in packages:
            if package not in workspace_packages:
                # Try whether package is a pattern and matches
                glob_packages = expand_glob_package(package,
                                                    workspace_packages)
                if len(glob_packages) > 0:
                    packages.extend(glob_packages)
                else:
                    sys.exit(
                        "[test] Given packages '{}' is not in the workspace "
                        "and pattern does not match any package".format(
                            package))
        for pkg_path, package in ordered_packages:
            if package.name in packages:
                packages_to_test.append((pkg_path, package))
    else:
        # Only use buildlist when no other packages are specified
        if len(context.buildlist) > 0:
            # Expand glob patterns in buildlist
            buildlist = []
            for buildlisted_package in context.buildlist:
                buildlist.extend(
                    expand_glob_package(buildlisted_package,
                                        workspace_packages))
            packages_to_test = [
                p for p in ordered_packages if (p[1].name in buildlist)
            ]
        else:
            packages_to_test = ordered_packages

    # Filter packages on skiplist
    if len(context.skiplist) > 0:
        # Expand glob patterns in skiplist
        skiplist = []
        for skiplisted_package in context.skiplist:
            skiplist.extend(
                expand_glob_package(skiplisted_package, workspace_packages))
        # Apply skiplist to packages and dependencies
        packages_to_test = [
            (path, pkg) for path, pkg in packages_to_test
            if (pkg.name not in skiplist or pkg.name in packages)
        ]

    # Check if all packages to test are already built
    built_packages = set([
        pkg.name
        for (path, pkg) in find_packages(context.package_metadata_path(),
                                         warnings=[]).items()
    ])

    packages_to_test_names = set(pkg.name for path, pkg in packages_to_test)
    if not built_packages.issuperset(packages_to_test_names):
        wide_log(
            clr("@{rf}Error: Packages have to be built before they can be tested.@|"
                ))
        wide_log(clr("The following requested packages are not built yet:"))
        for package_name in packages_to_test_names.difference(built_packages):
            wide_log(' - ' + package_name)
        sys.exit(1)

    # Construct jobs
    jobs = []
    for pkg_path, pkg in packages_to_test:
        # Determine the job parameters
        test_job_kwargs = dict(context=context,
                               package=pkg,
                               package_path=pkg_path,
                               verbose=not quiet)

        # Create the job based on the build type
        build_type = pkg.get_build_type()

        if build_type == 'catkin':
            test_job_kwargs['test_target'] = catkin_test_target
        elif build_type == 'cmake':
            test_job_kwargs['test_target'] = cmake_test_target

        if build_type in test_job_creators:
            jobs.append(test_job_creators[build_type](**test_job_kwargs))

    # Queue for communicating status
    event_queue = Queue()

    # Initialize job server
    job_server.initialize(
        max_jobs=n_jobs,
        max_load=None,
        gnu_make_enabled=context.use_internal_make_jobserver,
    )

    try:
        # Spin up status output thread
        status_thread = ConsoleStatusController(
            'test',
            ['package', 'packages'],
            jobs,
            n_jobs,
            [pkg.name for path, pkg in packages_to_test],
            [p for p in context.buildlist],
            [p for p in context.skiplist],
            event_queue,
            show_notifications=not no_notify,
            show_active_status=not no_status,
            show_buffered_stdout=not interleave_output,
            show_buffered_stderr=not interleave_output,
            show_live_stdout=interleave_output,
            show_live_stderr=interleave_output,
            show_full_summary=summarize_build,
            show_stage_events=not quiet,
            pre_start_time=pre_start_time,
            active_status_rate=limit_status_rate,
        )

        status_thread.start()

        locks = {}

        # Block while running N jobs asynchronously
        try:
            all_succeeded = run_until_complete(
                execute_jobs('test',
                             jobs,
                             locks,
                             event_queue,
                             context.log_space_abs,
                             max_toplevel_jobs=n_jobs,
                             continue_on_failure=continue_on_failure,
                             continue_without_deps=False))
        except Exception:
            status_thread.keep_running = False
            all_succeeded = False
            status_thread.join(1.0)
            wide_log(str(traceback.format_exc()))

        status_thread.join(1.0)

        if all_succeeded:
            return 0
        else:
            return 1

    except KeyboardInterrupt:
        wide_log("[test] Interrupted by user!")
        event_queue.put(None)

        return 130
示例#6
0
def test_workspace(context,
                   packages=None,
                   tests=None,
                   list_tests=False,
                   start_with=None,
                   n_jobs=None,
                   force_color=False,
                   quiet=False,
                   interleave_output=False,
                   no_status=False,
                   limit_status_rate=10.0,
                   no_notify=False,
                   summarize_build=None):
    pre_start_time = time.time()

    # Get our list of packages based on what's in the source space and our
    # command line switches.
    packages_to_test = get_packages_to_test(context, packages)
    if len(packages_to_test) == 0:
        log(fmt('[test] No tests in the available packages.'))

    # Get the full list of tests available in those packages, as configured.
    packages_tests = get_packages_tests(context, packages_to_test)
    print packages_tests

    if list_tests:
        # Don't build or run, just list available targets.
        log(fmt('[test] Tests available in workspace packages:'))
        for package, tests in sorted(packages_tests):
            log(fmt('[test] * %s' % package.name))
            for test in sorted(tests):
                log(fmt('[test]   - %s' % test))
        return 0

    else:
        jobs = []

        # Construct jobs for running tests.
        for package, package_tests in packages_tests:
            jobs.append(create_package_job(context, package, package_tests))
        package_names = [p[0].name for p in packages_tests]
        jobs.append(create_results_check_job(context, package_names))

        # Queue for communicating status.
        event_queue = Queue()

        try:
            # Spin up status output thread.
            status_thread = ConsoleStatusController(
                'test', ['package', 'packages'],
                jobs,
                n_jobs, [pkg.name for _, pkg in context.packages],
                [p for p in context.whitelist], [p for p in context.blacklist],
                event_queue,
                show_notifications=not no_notify,
                show_active_status=not no_status,
                show_buffered_stdout=not quiet and not interleave_output,
                show_buffered_stderr=not interleave_output,
                show_live_stdout=interleave_output,
                show_live_stderr=interleave_output,
                show_stage_events=not quiet,
                show_full_summary=(summarize_build is True),
                pre_start_time=pre_start_time,
                active_status_rate=limit_status_rate)
            status_thread.start()

            # Block while running N jobs asynchronously
            try:
                all_succeeded = run_until_complete(
                    execute_jobs('test',
                                 jobs,
                                 None,
                                 event_queue,
                                 context.log_space_abs,
                                 max_toplevel_jobs=n_jobs))

            except Exception:
                status_thread.keep_running = False
                all_succeeded = False
                status_thread.join(1.0)
                wide_log(str(traceback.format_exc()))
            status_thread.join(1.0)

            return 0 if all_succeeded else 1

        except KeyboardInterrupt:
            wide_log("[test] Interrupted by user!")
            event_queue.put(None)

            return 130  # EOWNERDEAD
示例#7
0
文件: clean.py 项目: DLu/catkin_tools
def clean_packages(
        context,
        names_of_packages_to_be_cleaned,
        clean_dependents,
        verbose,
        dry_run):

    pre_start_time = time.time()

    # Update the names of packages to be cleaned with dependents
    packages_to_be_cleaned = determine_packages_to_be_cleaned(
        context,
        clean_dependents,
        names_of_packages_to_be_cleaned)

    # print(packages_to_be_cleaned)
    # for path, pkg in packages_to_be_cleaned:
    # if os.path.exists(os.path.join(context.build_space_abs, pkg.name)):
    # print("[clean] Cleaning package: %s" % pkg.name)

    # Construct jobs
    jobs = []
    for path, pkg in packages_to_be_cleaned:

        # Get all build type plugins
        clean_job_creators = {
            ep.name: ep.load()['create_clean_job']
            for ep in pkg_resources.iter_entry_points(group='catkin_tools.jobs')
        }

        # It's a problem if there aren't any build types available
        if len(clean_job_creators) == 0:
            sys.exit('Error: No build types availalbe. Please check your catkin_tools installation.')

        # Determine the job parameters
        clean_job_kwargs = dict(
            context=context,
            package=pkg,
            package_path=path,
            dependencies=[],  # Unused because clean jobs are not parallelized
            dry_run=dry_run,
            clean_build=True,
            clean_devel=True,
            clean_install=True)

        # Create the job based on the build type
        build_type = get_build_type(pkg)

        if build_type in clean_job_creators:
            jobs.append(clean_job_creators[build_type](**clean_job_kwargs))

    if len(jobs) == 0:
        print("[clean] There are no products from the given packages to clean.")
        return False

    # Queue for communicating status
    event_queue = Queue()

    # Spin up status output thread
    status_thread = ConsoleStatusController(
        'clean',
        ['package', 'packages'],
        jobs,
        1,
        [pkg.name for _, pkg in context.packages],
        [p for p in context.whitelist],
        [p for p in context.blacklist],
        event_queue,
        show_notifications=False,
        show_active_status=False,
        show_buffered_stdout=verbose or False,
        show_buffered_stderr=True,
        show_live_stdout=False,
        show_live_stderr=False,
        show_stage_events=False,
        show_full_summary=False,
        pre_start_time=pre_start_time,
        active_status_rate=10.0)
    status_thread.start()

    # Initialize locks (none need to be configured here)
    locks = {
    }

    # Block while running N jobs asynchronously
    try:
        ej = execute_jobs(
            'clean',
            jobs,
            locks,
            event_queue,
            context.log_space_abs,
            max_toplevel_jobs=1,
            continue_on_failure=True,
            continue_without_deps=False)
        all_succeeded = run_until_complete(ej)
    except Exception:
        status_thread.keep_running = False
        all_succeeded = False
        status_thread.join(1.0)
        wide_log(str(traceback.format_exc()))

    status_thread.join(1.0)

    return all_succeeded