Esempio n. 1
0
def main(opts):
    # Context-aware args
    if opts.build_this or opts.start_with_this:
        # Determine the enclosing package
        try:
            this_package = find_enclosing_package()
        except InvalidPackage:
            pass

        # Handle context-based package building
        if opts.build_this:
            if this_package:
                opts.packages += [this_package]
            else:
                sys.exit(
                    "catkin build: --this was specified, but this directory is not in a catkin package."
                )

        # If --start--with was used without any packages and --this was specified, start with this package
        if opts.start_with_this:
            if this_package:
                opts.start_with = this_package
            else:
                sys.exit(
                    "catkin build: --this was specified, but this directory is not in a catkin package."
                )

    if opts.no_deps and not opts.packages:
        sys.exit("With --no-deps, you must specify packages to build.")

    if not opts.force_color and not is_tty(sys.stdout):
        set_color(False)

    # Load the context
    ctx = Context.Load(opts.workspace, opts.profile, opts)

    # Load the environment of the workspace to extend
    if ctx.extend_path is not None:
        try:
            load_resultspace_environment(ctx.extend_path)
        except IOError as exc:
            log(
                clr("@!@{rf}Error:@| Unable to extend workspace from \"%s\": %s"
                    % (ctx.extend_path, exc.message)))
            return 1

    # Display list and leave the filesystem untouched
    if opts.dry_run:
        dry_run(ctx, opts.packages, opts.no_deps, opts.start_with)
        return

    # Check if the context is valid before writing any metadata
    if not ctx.source_space_exists():
        print("catkin build: error: Unable to find source space `%s`" %
              ctx.source_space_abs)
        return 1

    # Always save the last context under the build verb
    update_metadata(ctx.workspace, ctx.profile, 'build', ctx.get_stored_dict())

    build_metadata = get_metadata(ctx.workspace, ctx.profile, 'build')
    if build_metadata.get('needs_force', False):
        opts.force_cmake = True
        update_metadata(ctx.workspace, ctx.profile, 'build',
                        {'needs_force': False})

    # Save the context as the configuration
    if opts.save_config:
        Context.Save(ctx)

    start = time.time()
    try:
        return build_isolated_workspace(
            ctx,
            packages=opts.packages,
            start_with=opts.start_with,
            no_deps=opts.no_deps,
            jobs=opts.parallel_jobs,
            force_cmake=opts.force_cmake,
            force_color=opts.force_color,
            quiet=not opts.verbose,
            interleave_output=opts.interleave_output,
            no_status=opts.no_status,
            limit_status_rate=opts.limit_status_rate,
            lock_install=not opts.no_install_lock,
            no_notify=opts.no_notify)
    finally:
        log("[build] Runtime: {0}".format(
            format_time_delta(time.time() - start)))
Esempio n. 2
0
def catkin_main(sysargs):
    # Initialize config
    try:
        initialize_config()
    except RuntimeError as exc:
        sys.exit("Failed to initialize config: {0}".format(exc))

    # Create a top level parser
    parser = argparse.ArgumentParser(
        description="catkin command", formatter_class=argparse.RawDescriptionHelpFormatter)
    add = parser.add_argument
    add('-a', '--list-aliases', action="store_true", default=False,
        help="Lists the current verb aliases and then quits, all other arguments are ignored")
    add('--test-colors', action='store_true', default=False,
        help="Prints a color test pattern to the screen and then quits, all other arguments are ignored")
    add('--version', action='store_true', default=False,
        help="Prints the catkin_tools version.")
    color_control_group = parser.add_mutually_exclusive_group()
    add = color_control_group.add_argument
    add('--force-color', action='store_true', default=False,
        help='Forces catkin to output in color, even when the terminal does not appear to support it.')
    add('--no-color', action='store_true', default=False,
        help='Forces catkin to not use color in the output, regardless of the detect terminal type.')

    # Deprecated, moved to `catkin locate --shell-verbs
    add('--locate-extra-shell-verbs', action='store_true', help=argparse.SUPPRESS)

    # Generate a list of verbs available
    verbs = list_verbs()

    # Create the subparsers for each verb and collect the argument preprocessors
    argument_preprocessors = create_subparsers(parser, verbs)

    # Get verb aliases
    verb_aliases = get_verb_aliases()

    # Setup sysargs
    sysargs = sys.argv[1:] if sysargs is None else sysargs

    # Get colors config
    no_color = False
    force_color = os.environ.get('CATKIN_TOOLS_FORCE_COLOR', False)
    for arg in sysargs:
        if arg == '--no-color':
            no_color = True
        if arg == '--force-color':
            force_color = True

    if no_color or not force_color and not is_tty(sys.stdout):
        set_color(False)

    # Check for version
    if '--version' in sysargs:
        print('catkin_tools {} (C) 2014-{} Open Source Robotics Foundation'.format(
            pkg_resources.get_distribution('catkin_tools').version,
            date.today().year)
        )
        print('catkin_tools is released under the Apache License,'
              ' Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)')
        print('---')
        print('Using Python {}'.format(''.join(sys.version.split('\n'))))
        sys.exit(0)

    # Deprecated option
    if '--locate-extra-shell-verbs' in sysargs:
        print('Please use `catkin locate --shell-verbs` instead of `catkin --locate-extra-shell-verbs`',
              file=sys.stderr)
        sys.exit(0)

    # Check for --test-colors
    for arg in sysargs:
        if arg == '--test-colors':
            test_colors()
            sys.exit(0)
        if not arg.startswith('-'):
            break

    # Check for --list-aliases
    for arg in sysargs:
        if arg == '--list-aliases' or arg == '-a':
            for alias in sorted(list(verb_aliases.keys())):
                print("{0}: {1}".format(alias, ' '.join([cmd_quote(aarg) for aarg in verb_aliases[alias]])))
            sys.exit(0)
        if not arg.startswith('-'):
            break

    # Do verb alias expansion
    sysargs = expand_verb_aliases(sysargs, verb_aliases)

    # Determine the verb, splitting arguments into pre and post verb
    verb = None
    pre_verb_args = []
    post_verb_args = []
    for index, arg in enumerate(sysargs):
        # If the arg does not start with a `-` then it is a positional argument
        # The first positional argument must be the verb
        if not arg.startswith('-'):
            verb = arg
            post_verb_args = sysargs[index + 1:]
            break
        # If the `-h` or `--help` option comes before the verb, parse_args
        if arg in ['-h', '--help']:
            parser.parse_args(sysargs)
        # Otherwise it is a pre-verb option
        pre_verb_args.append(arg)

    # Error on no verb provided
    if verb is None:
        print(parser.format_usage())
        sys.exit("Error: No verb provided.")
    # Error on unknown verb provided
    if verb not in verbs:
        print(parser.format_usage())
        sys.exit("Error: Unknown verb '{0}' provided.".format(verb))

    # First allow the verb's argument preprocessor to strip any args
    # and return any "extra" information it wants as a dict
    processed_post_verb_args, extras = argument_preprocessors[verb](post_verb_args)
    # Then allow argparse to process the left over post-verb arguments along
    # with the pre-verb arguments and the verb itself
    args = parser.parse_args(pre_verb_args + [verb] + processed_post_verb_args)
    # Extend the argparse result with the extras from the preprocessor
    for key, value in extras.items():
        setattr(args, key, value)

    # Finally call the subparser's main function with the processed args
    # and the extras which the preprocessor may have returned
    sys.exit(args.main(args) or 0)
Esempio n. 3
0
def main(opts):

    # Context-aware args
    if opts.build_this or opts.start_with_this:
        # Determine the enclosing package
        try:
            this_package = find_enclosing_package()
        except InvalidPackage:
            pass

        # Handle context-based package building
        if opts.build_this:
            if this_package:
                opts.packages += [this_package]
            else:
                sys.exit("catkin build: --this was specified, but this directory is not contained by a catkin package.")

        # If --start--with was used without any packages and --this was specified, start with this package
        if opts.start_with_this:
            if this_package:
                opts.start_with = this_package
            else:
                sys.exit("catkin build: --this was specified, but this directory is not contained by a catkin package.")

    if opts.no_deps and not opts.packages:
        sys.exit("With --no-deps, you must specify packages to build.")

    if not opts.force_color and not is_tty(sys.stdout):
        set_color(False)

    # Load the context
    ctx = Context.Load(opts.workspace, opts.profile, opts)

    # Load the environment of the workspace to extend
    if ctx.extend_path is not None:
        try:
            load_resultspace_environment(ctx.extend_path)
        except IOError as exc:
            log(clr("@!@{rf}Error:@| Unable to extend workspace from \"%s\": %s" %
                    (ctx.extend_path, exc.message)))
            return 1

    # Display list and leave the filesystem untouched
    if opts.dry_run:
        dry_run(ctx, opts.packages, opts.no_deps, opts.start_with)
        return

    # Check if the context is valid before writing any metadata
    if not ctx.source_space_exists():
        print("catkin build: error: Unable to find source space `%s`" % ctx.source_space_abs)
        return 1

    # Always save the last context under the build verb
    update_metadata(ctx.workspace, ctx.profile, 'build', ctx.get_stored_dict())

    build_metadata = get_metadata(ctx.workspace, ctx.profile, 'build')
    if build_metadata.get('needs_force', False):
        opts.force_cmake = True
        update_metadata(ctx.workspace, ctx.profile, 'build', {'needs_force': False})

    # Save the context as the configuration
    if opts.save_config:
        Context.Save(ctx)

    start = time.time()
    try:
        return build_isolated_workspace(
            ctx,
            packages=opts.packages,
            start_with=opts.start_with,
            no_deps=opts.no_deps,
            jobs=opts.parallel_jobs,
            force_cmake=opts.force_cmake,
            force_color=opts.force_color,
            quiet=not opts.verbose,
            interleave_output=opts.interleave_output,
            no_status=opts.no_status,
            lock_install=not opts.no_install_lock,
            no_notify=opts.no_notify
        )
    finally:
        log("[build] Runtime: {0}".format(format_time_delta(time.time() - start)))
Esempio n. 4
0
def catkin_main(sysargs):
    # Initialize config
    try:
        initialize_config()
    except RuntimeError as exc:
        sys.exit("Failed to initialize config: {0}".format(exc))

    # Create a top level parser
    parser = argparse.ArgumentParser(
        description="catkin command", formatter_class=argparse.RawDescriptionHelpFormatter)
    add = parser.add_argument
    add('-a', '--list-aliases', action="store_true", default=False,
        help="Lists the current verb aliases and then quits, all other arguments are ignored")
    add('--test-colors', action='store_true', default=False,
        help="Prints a color test pattern to the screen and then quits, all other arguments are ignored")
    color_control_group = parser.add_mutually_exclusive_group()
    add = color_control_group.add_argument
    add('--force-color', action='store_true', default=False,
        help='Forces catkin to output in color, even when the terminal does not appear to support it.')
    add('--no-color', action='store_true', default=False,
        help='Forces catkin to not use color in the output, regardless of the detect terminal type.')
    add('--locate-extra-shell-verbs', action='store_true',
        help='Returns the full path of the file to source for extra shell verbs, then exits.')

    # Generate a list of verbs available
    verbs = list_verbs()

    # Create the subparsers for each verb and collect the argument preprocessors
    argument_preprocessors = create_subparsers(parser, verbs)

    # Get verb aliases
    verb_aliases = get_verb_aliases()

    # Setup sysargs
    sysargs = sys.argv[1:] if sysargs is None else sysargs

    # Get colors config
    no_color = False
    force_color = False
    for arg in sysargs:
        if arg == '--no-color':
            no_color = True
        if arg == '--force-color':
            force_color = True

    if no_color or not force_color and not is_tty(sys.stdout):
        set_color(False)

    # Check for --test-colors
    for arg in sysargs:
        if arg == '--test-colors':
            test_colors()
            sys.exit(0)
        if not arg.startswith('-'):
            break

    # Check for --list-aliases
    for arg in sysargs:
        if arg == '--list-aliases' or arg == '-a':
            for alias in sorted(list(verb_aliases.keys())):
                print("{0}: {1}".format(alias, verb_aliases[alias]))
            sys.exit(0)
        if not arg.startswith('-'):
            break

    # Do verb alias expansion
    sysargs = expand_verb_aliases(sysargs, verb_aliases)

    # Check for --locate-extra-shell-verbs
    for arg in sysargs:
        if arg == '--locate-extra-shell-verbs':
            this_dir = os.path.dirname(__file__)
            shell_verbs = os.path.join(this_dir, '..', 'verbs', 'catkin_shell_verbs.bash')
            print(os.path.normpath(shell_verbs))
            sys.exit(0)

    # Determine the verb, splitting arguments into pre and post verb
    verb = None
    pre_verb_args = []
    post_verb_args = []
    for index, arg in enumerate(sysargs):
        # If the arg does not start with a `-` then it is a positional argument
        # The first positional argument must be the verb
        if not arg.startswith('-'):
            verb = arg
            post_verb_args = sysargs[index + 1:]
            break
        # If the `-h` or `--help` option comes before the verb, parse_args
        if arg in ['-h', '--help']:
            parser.parse_args(sysargs)
        # Otherwise it is a pre-verb option
        pre_verb_args.append(arg)

    # Error on no verb provided
    if verb is None:
        print(parser.format_usage())
        sys.exit("Error: No verb provided.")
    # Error on unknown verb provided
    if verb not in verbs:
        print(parser.format_usage())
        sys.exit("Error: Unknown verb '{0}' provided.".format(verb))

    # First allow the verb's argument preprocessor to strip any args
    # and return any "extra" information it wants as a dict
    processed_post_verb_args, extras = argument_preprocessors[verb](post_verb_args)
    # Then allow argparse to process the left over post-verb arguments along
    # with the pre-verb arguments and the verb itself
    args = parser.parse_args(pre_verb_args + [verb] + processed_post_verb_args)
    # Extend the argparse result with the extras from the preprocessor
    for key, value in extras.items():
        setattr(args, key, value)

    # Finally call the subparser's main function with the processed args
    # and the extras which the preprocessor may have returned
    sys.exit(args.main(args) or 0)
Esempio n. 5
0
def main(opts):

    # Check for develdebug mode
    if opts.develdebug is not None:
        os.environ['TROLLIUSDEBUG'] = opts.develdebug.lower()
        logging.basicConfig(level=opts.develdebug.upper())

    # Set color options
    if (opts.force_color or is_tty(sys.stdout)) and not opts.no_color:
        set_color(True)
    else:
        set_color(False)

    # Context-aware args
    if opts.build_this or opts.start_with_this:
        # Determine the enclosing package
        try:
            ws_path = find_enclosing_workspace(getcwd())
            # Suppress warnings since this won't necessaraly find all packages
            # in the workspace (it stops when it finds one package), and
            # relying on it for warnings could mislead people.
            this_package = find_enclosing_package(
                search_start_path=getcwd(),
                ws_path=ws_path,
                warnings=[])
        except (InvalidPackage, RuntimeError):
            this_package = None

        # Handle context-based package building
        if opts.build_this:
            if this_package:
                opts.packages += [this_package]
            else:
                sys.exit(
                    "[build] Error: In order to use --this, the current directory must be part of a catkin package.")

        # If --start--with was used without any packages and --this was specified, start with this package
        if opts.start_with_this:
            if this_package:
                opts.start_with = this_package
            else:
                sys.exit(
                    "[build] Error: In order to use --this, the current directory must be part of a catkin package.")

    if opts.no_deps and not opts.packages and not opts.unbuilt:
        sys.exit(clr("[build] @!@{rf}Error:@| With --no-deps, you must specify packages to build."))

    # Load the context
    ctx = Context.load(opts.workspace, opts.profile, opts, append=True)

    # Initialize the build configuration
    make_args, makeflags, cli_flags, jobserver = configure_make_args(
        ctx.make_args, ctx.jobs_args, ctx.use_internal_make_jobserver)

    # Set the jobserver memory limit
    if jobserver and opts.mem_limit:
        log(clr("@!@{pf}EXPERIMENTAL: limit memory to '%s'@|" % str(opts.mem_limit)))
        # At this point psuitl will be required, check for it and bail out if not set
        try:
            import psutil  # noqa
        except ImportError as exc:
            log("Could not import psutil, but psutil is required when using --mem-limit.")
            log("Please either install psutil or avoid using --mem-limit.")
            sys.exit("Exception: {0}".format(exc))
        job_server.set_max_mem(opts.mem_limit)

    ctx.make_args = make_args

    # Load the environment of the workspace to extend
    if ctx.extend_path is not None:
        try:
            load_resultspace_environment(ctx.extend_path)
        except IOError as exc:
            sys.exit(clr("[build] @!@{rf}Error:@| Unable to extend workspace from \"%s\": %s" %
                         (ctx.extend_path, exc.message)))

    # Check if the context is valid before writing any metadata
    if not ctx.source_space_exists():
        sys.exit(clr("[build] @!@{rf}Error:@| Unable to find source space `%s`") % ctx.source_space_abs)

    # ensure the build space was previously built by catkin_tools
    previous_tool = get_previous_tool_used_on_the_space(ctx.build_space_abs)
    if previous_tool is not None and previous_tool != 'catkin build':
        if opts.override_build_tool_check:
            log(clr(
                "@{yf}Warning: build space at '%s' was previously built by '%s', "
                "but --override-build-tool-check was passed so continuing anyways."
                % (ctx.build_space_abs, previous_tool)))
        else:
            sys.exit(clr(
                "@{rf}The build space at '%s' was previously built by '%s'. "
                "Please remove the build space or pick a different build space."
                % (ctx.build_space_abs, previous_tool)))
    # the build space will be marked as catkin build's if dry run doesn't return

    # ensure the devel space was previously built by catkin_tools
    previous_tool = get_previous_tool_used_on_the_space(ctx.devel_space_abs)
    if previous_tool is not None and previous_tool != 'catkin build':
        if opts.override_build_tool_check:
            log(clr(
                "@{yf}Warning: devel space at '%s' was previously built by '%s', "
                "but --override-build-tool-check was passed so continuing anyways."
                % (ctx.devel_space_abs, previous_tool)))
        else:
            sys.exit(clr(
                "@{rf}The devel space at '%s' was previously built by '%s'. "
                "Please remove the devel space or pick a different devel space."
                % (ctx.devel_space_abs, previous_tool)))
    # the devel space will be marked as catkin build's if dry run doesn't return

    # Display list and leave the file system untouched
    if opts.dry_run:
        # TODO: Add unbuilt
        dry_run(ctx, opts.packages, opts.no_deps, opts.start_with)
        return

    # Print the build environment for a given package and leave the filesystem untouched
    if opts.get_env:
        return print_build_env(ctx, opts.get_env[0])

    # Now mark the build and devel spaces as catkin build's since dry run didn't return.
    mark_space_as_built_by(ctx.build_space_abs, 'catkin build')
    mark_space_as_built_by(ctx.devel_space_abs, 'catkin build')

    # Get the last build context
    build_metadata = get_metadata(ctx.workspace, ctx.profile, 'build')

    if build_metadata.get('cmake_args') != ctx.cmake_args or build_metadata.get('cmake_args') != opts.cmake_args:
        opts.force_cmake = True

    if build_metadata.get('needs_force', False):
        opts.force_cmake = True
        update_metadata(ctx.workspace, ctx.profile, 'build', {'needs_force': False})

    # Always save the last context under the build verb
    update_metadata(ctx.workspace, ctx.profile, 'build', ctx.get_stored_dict())

    # Save the context as the configuration
    if opts.save_config:
        Context.save(ctx)

    # Get parallel toplevel jobs
    try:
        parallel_jobs = int(opts.parallel_jobs)
    except TypeError:
        parallel_jobs = None

    # Set VERBOSE environment variable
    if opts.verbose:
        os.environ['VERBOSE'] = '1'

    return build_isolated_workspace(
        ctx,
        packages=opts.packages,
        start_with=opts.start_with,
        no_deps=opts.no_deps,
        unbuilt=opts.unbuilt,
        n_jobs=parallel_jobs,
        force_cmake=opts.force_cmake,
        pre_clean=opts.pre_clean,
        force_color=opts.force_color,
        quiet=not opts.verbose,
        interleave_output=opts.interleave_output,
        no_status=opts.no_status,
        limit_status_rate=opts.limit_status_rate,
        lock_install=not opts.no_install_lock,
        no_notify=opts.no_notify,
        continue_on_failure=opts.continue_on_failure,
        summarize_build=opts.summarize  # Can be True, False, or None
    )
Esempio n. 6
0
def build_isolated_workspace(context,
                             packages=None,
                             start_with=None,
                             no_deps=False,
                             jobs=None,
                             force_cmake=False,
                             force_color=False,
                             quiet=False,
                             interleave_output=False,
                             no_status=False,
                             lock_install=False,
                             no_notify=False):
    """Builds a catkin workspace in isolation

    This function will find all of the packages in the source space, start some
    executors, feed them packages to build based on dependencies and topological
    ordering, and then monitor the output of the executors, handling loggings of
    the builds, starting builds, failing builds, and finishing builds of
    packages, and handling the shutdown of the executors when appropriate.

    :param context: context in which to build the catkin workspace
    :type context: :py:class:`catkin_tools.verbs.catkin_build.context.Context`
    :param packages: list of packages to build, by default their dependencies will also be built
    :type packages: list
    :param start_with: package to start with, skipping all packages which proceed it in the topological order
    :type start_with: str
    :param no_deps: If True, the dependencies of packages will not be built first
    :type no_deps: bool
    :param jobs: number of parallel package build jobs
    :type jobs: int
    :param force_cmake: forces invocation of CMake if True, default is False
    :type force_cmake: bool
    :param force_color: forces colored output even if terminal does not support it
    :type force_color: bool
    :param quiet: suppresses the output of commands unless there is an error
    :type quiet: bool
    :param interleave_output: prints the output of commands as they are received
    :type interleave_output: bool
    :param no_status: disables status bar
    :type no_status: bool
    :param lock_install: causes executors to synchronize on access of install commands
    :type lock_install: bool
    :param no_notify: suppresses system notifications
    :type no_notify: bool

    :raises: SystemExit if buildspace is a file or no packages were found in the source space
        or if the provided options are invalid
    """
    # If no_deps is given, ensure packages to build are provided
    if no_deps and packages is None:
        sys.exit("With --no-deps, you must specify packages to build.")
    # Make sure there is a build folder and it is not a file
    if os.path.exists(context.build_space_abs):
        if os.path.isfile(context.build_space_abs):
            sys.exit(
                clr("@{rf}Error:@| Build space '{0}' exists but is a file and not a folder."
                    .format(context.build_space_abs)))
    # If it dosen't exist, create it
    else:
        log("Creating build space directory, '{0}'".format(
            context.build_space_abs))
        os.makedirs(context.build_space_abs)

    # Check for catkin_make droppings
    if context.corrupted_by_catkin_make():
        sys.exit(
            clr("@{rf}Error:@| Build space `{0}` exists but appears to have previously been "
                "created by the `catkin_make` or `catkin_make_isolated` tool. "
                "Please choose a different directory to use with `catkin build` "
                "or clean the build space.".format(context.build_space_abs)))

    # Declare a buildspace marker describing the build config for error checking
    buildspace_marker_data = {
        'workspace': context.workspace,
        'profile': context.profile,
        'install': context.install,
        'install_space': context.install_space_abs,
        'devel_space': context.devel_space_abs,
        'source_space': context.source_space_abs
    }

    # Check build config
    if os.path.exists(
            os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE)):
        with open(os.path.join(
                context.build_space_abs,
                BUILDSPACE_MARKER_FILE)) as buildspace_marker_file:
            existing_buildspace_marker_data = yaml.load(buildspace_marker_file)
            misconfig_lines = ''
            for (k, v) in existing_buildspace_marker_data.items():
                new_v = buildspace_marker_data.get(k, None)
                if new_v != v:
                    misconfig_lines += (
                        '\n - %s: %s (stored) is not %s (commanded)' %
                        (k, v, new_v))
            if len(misconfig_lines) > 0:
                sys.exit(
                    clr("\n@{rf}Error:@| Attempting to build a catkin workspace using build space: "
                        "\"%s\" but that build space's most recent configuration "
                        "differs from the commanded one in ways which will cause "
                        "problems. Fix the following options or use @{yf}`catkin "
                        "clean -b`@| to remove the build space: %s" %
                        (context.build_space_abs, misconfig_lines)))

    # Write the current build config for config error checking
    with open(os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE),
              'w') as buildspace_marker_file:
        buildspace_marker_file.write(
            yaml.dump(buildspace_marker_data, default_flow_style=False))

    # Summarize the context
    summary_notes = []
    if force_cmake:
        summary_notes += [
            clr("@!@{cf}NOTE:@| Forcing CMake to run for each package.")
        ]
    log(context.summary(summary_notes))

    # Find list of packages in the workspace
    packages_to_be_built, packages_to_be_built_deps, all_packages = determine_packages_to_be_built(
        packages, context)
    completed_packages = []
    if no_deps:
        # Consider deps as "completed"
        completed_packages.extend(packages_to_be_built_deps)
    else:
        # Extend packages to be built to include their deps
        packages_to_be_built.extend(packages_to_be_built_deps)
    # Also resort
    packages_to_be_built = topological_order_packages(
        dict(packages_to_be_built))
    max_package_name_length = max(
        [len(pkg.name) for pth, pkg in packages_to_be_built])
    # Assert start_with package is in the workspace
    verify_start_with_option(start_with, packages, all_packages,
                             packages_to_be_built + packages_to_be_built_deps)

    # Setup pool of executors
    executors = {}
    # The communication queue can have ExecutorEvent's or str's passed into it from the executors
    comm_queue = Queue()
    # The job queue has Jobs put into it
    job_queue = Queue()
    # Lock for install space
    install_lock = Lock() if lock_install else FakeLock()
    # Determine the number of executors
    try:
        if jobs:
            jobs = int(jobs)
            if jobs < 1:
                sys.exit(
                    "Specified number of jobs '{0}' is not positive.".format(
                        jobs))
    except ValueError:
        sys.exit("Specified number of jobs '{0}' is no integer.".format(jobs))
    try:
        jobs = cpu_count() if jobs is None else jobs
    except NotImplementedError:
        log('Failed to determine the cpu_count, falling back to 1 jobs as the default.'
            )
        jobs = 1 if jobs is None else jobs
    # If only one set of jobs, turn on interleaving to get more responsive feedback
    if jobs == 1:
        # TODO: make the system more intelligent so that it can automatically switch to streaming output
        #       when only one job is building, even if multiple jobs could be building
        quiet = False
        interleave_output = True
    # Start the executors
    for x in range(jobs):
        e = Executor(x, context, comm_queue, job_queue, install_lock)
        executors[x] = e
        e.start()

    try:  # Finally close out now running executors
        # Variables for tracking running jobs and built/building packages
        start = time.time()
        total_packages = len(packages_to_be_built)
        package_count = 0
        running_jobs = {}
        log_dir = os.path.join(context.build_space_abs, 'build_logs')
        color = True
        if not force_color and not is_tty(sys.stdout):
            color = True
        out = OutputController(log_dir,
                               quiet,
                               interleave_output,
                               color,
                               max_package_name_length,
                               prefix_output=(jobs > 1))
        if no_status:
            disable_wide_log()

        # Prime the job_queue
        ready_packages = []
        if start_with is None:
            ready_packages = get_ready_packages(packages_to_be_built,
                                                running_jobs,
                                                completed_packages)
        while start_with is not None:
            ready_packages.extend(
                get_ready_packages(packages_to_be_built, running_jobs,
                                   completed_packages))
            while ready_packages:
                pth, pkg = ready_packages.pop(0)
                if pkg.name != start_with:
                    completed_packages.append(pkg.name)
                    package_count += 1
                    wide_log("[build] Skipping package '{0}'".format(pkg.name))
                else:
                    ready_packages.insert(0, (pth, pkg))
                    start_with = None
                    break
        running_jobs = queue_ready_packages(ready_packages, running_jobs,
                                            job_queue, context, force_cmake)
        assert running_jobs

        error_state = False
        errors = []

        def set_error_state(error_state):
            if error_state:
                return
            # Set the error state to prevent new jobs
            error_state = True
            # Empty the job queue
            while not job_queue.empty():
                job_queue.get()
            # Kill the executors by sending a None to the job queue for each of them
            for x in range(jobs):
                job_queue.put(None)

        # While any executors are running, process executor events
        while executors:
            try:
                # Try to get an event from the communications queue
                try:
                    event = comm_queue.get(True, 0.1)
                except Empty:
                    # timeout occured, create null event to pass through checks
                    event = ExecutorEvent(None, None, None, None)

                if event.event_type == 'job_started':
                    package_count += 1
                    running_jobs[
                        event.package]['package_number'] = package_count
                    running_jobs[event.package]['start_time'] = time.time()
                    out.job_started(event.package)

                if event.event_type == 'command_started':
                    out.command_started(event.package, event.data['cmd'],
                                        event.data['location'])

                if event.event_type == 'command_log':
                    out.command_log(event.package, event.data['message'])

                if event.event_type == 'command_failed':
                    out.command_failed(event.package, event.data['cmd'],
                                       event.data['location'],
                                       event.data['retcode'])
                    # Add to list of errors
                    errors.append(event)
                    # Remove the command from the running jobs
                    del running_jobs[event.package]
                    # If it hasn't already been done, stop the executors
                    set_error_state(error_state)

                if event.event_type == 'command_finished':
                    out.command_finished(event.package, event.data['cmd'],
                                         event.data['location'],
                                         event.data['retcode'])

                if event.event_type == 'job_finished':
                    completed_packages.append(event.package)
                    run_time = format_time_delta(
                        time.time() -
                        running_jobs[event.package]['start_time'])
                    out.job_finished(event.package, run_time)
                    del running_jobs[event.package]
                    # If shutting down, do not add new packages
                    if error_state:
                        continue
                    # Calculate new packages
                    if not no_status:
                        wide_log('[build] Calculating new jobs...', end='\r')
                        sys.stdout.flush()
                    ready_packages = get_ready_packages(
                        packages_to_be_built, running_jobs, completed_packages)
                    running_jobs = queue_ready_packages(
                        ready_packages, running_jobs, job_queue, context,
                        force_cmake)
                    # Make sure there are jobs to be/being processed, otherwise kill the executors
                    if not running_jobs:
                        # Kill the executors by sending a None to the job queue for each of them
                        for x in range(jobs):
                            job_queue.put(None)

                # If an executor exit event, join it and remove it from the executors list
                if event.event_type == 'exit':
                    # If an executor has an exception, set the error state
                    if event.data['reason'] == 'exception':
                        set_error_state(error_state)
                        errors.append(event)
                    # Join and remove it
                    executors[event.executor_id].join()
                    del executors[event.executor_id]

                if not no_status:
                    # Update the status bar on the screen
                    executing_jobs = []
                    for name, value in running_jobs.items():
                        number, job, start_time = value[
                            'package_number'], value['job'], value[
                                'start_time']
                        if number is None or start_time is None:
                            continue
                        executing_jobs.append({
                            'number':
                            number,
                            'name':
                            name,
                            'run_time':
                            format_time_delta_short(time.time() - start_time)
                        })
                    msg = clr("[build - {run_time}] ").format(
                        run_time=format_time_delta_short(time.time() - start))
                    # If errors post those
                    if errors:
                        for error in errors:
                            msg += clr("[!{package}] ").format(
                                package=error.package)
                    # Print them in order of started number
                    for job_msg_args in sorted(
                            executing_jobs, key=lambda args: args['number']):
                        msg += clr("[{name} - {run_time}] ").format(
                            **job_msg_args)
                    msg_rhs = clr(
                        "[{0}/{1} Active | {2}/{3} Completed]").format(
                            len(executing_jobs), len(executors),
                            len(packages) if no_deps else
                            len(completed_packages), total_packages)
                    # Update title bar
                    sys.stdout.write("\x1b]2;[build] {0}/{1}\x07".format(
                        len(packages) if no_deps else len(completed_packages),
                        total_packages))
                    # Update status bar
                    wide_log(msg, rhs=msg_rhs, end='\r')
                    sys.stdout.flush()
            except KeyboardInterrupt:
                wide_log("[build] User interrupted, stopping.")
                set_error_state(error_state)
        # All executors have shutdown
        sys.stdout.write("\x1b]2;\x07")
        if not errors:
            if context.isolate_devel:
                if not context.install:
                    _create_unmerged_devel_setup(context)
                else:
                    _create_unmerged_devel_setup_for_install(context)
            wide_log("[build] Finished.")
            if not no_notify:
                notify("Build Finished",
                       "{0} packages built".format(total_packages))
            return 0
        else:
            wide_log(clr("[build] There were @!@{rf}errors@|:"))
            if not no_notify:
                notify("Build Failed",
                       "there were {0} errors".format(len(errors)))
            for error in errors:
                if error.event_type == 'exit':
                    wide_log(
                        """Executor '{exec_id}' had an unhandle exception while processing package '{package}':

    {data[exc]}
    """.format(exec_id=error.executor_id + 1, **error.__dict__))
                else:
                    wide_log(
                        clr("""
    @{rf}Failed@| to build package '@{cf}{package}@|' because the following command:

        @!@{kf}# Command run in directory: @|{location}
        {cmd.cmd_str}

    @{rf}Exited@| with return code: @!{retcode}@|""").format(
                            package=error.package, **error.data))
            sys.exit(1)
    finally:
        # Ensure executors go down
        for x in range(jobs):
            job_queue.put(None)
Esempio n. 7
0
def main(opts):

    # Check for develdebug mode
    if opts.develdebug is not None:
        os.environ['TROLLIUSDEBUG'] = opts.develdebug.lower()
        logging.basicConfig(level=opts.develdebug.upper())

    # Set color options
    opts.force_color = os.environ.get('CATKIN_TOOLS_FORCE_COLOR',
                                      opts.force_color)
    if (opts.force_color or is_tty(sys.stdout)) and not opts.no_color:
        set_color(True)
    else:
        set_color(False)

    # Context-aware args
    if opts.build_this or opts.start_with_this:
        # Determine the enclosing package
        try:
            ws_path = find_enclosing_workspace(getcwd())
            # Suppress warnings since this won't necessaraly find all packages
            # in the workspace (it stops when it finds one package), and
            # relying on it for warnings could mislead people.
            this_package = find_enclosing_package(search_start_path=getcwd(),
                                                  ws_path=ws_path,
                                                  warnings=[])
        except (InvalidPackage, RuntimeError):
            this_package = None

        # Handle context-based package building
        if opts.build_this:
            if this_package:
                opts.packages += [this_package]
            else:
                sys.exit(
                    "[build] Error: In order to use --this, the current directory must be part of a catkin package."
                )

        # If --start--with was used without any packages and --this was specified, start with this package
        if opts.start_with_this:
            if this_package:
                opts.start_with = this_package
            else:
                sys.exit(
                    "[build] Error: In order to use --this, the current directory must be part of a catkin package."
                )

    if opts.no_deps and not opts.packages and not opts.unbuilt:
        sys.exit(
            clr("[build] @!@{rf}Error:@| With --no-deps, you must specify packages to build."
                ))

    # Load the context
    ctx = Context.load(opts.workspace, opts.profile, opts, append=True)

    # Initialize the build configuration
    make_args, makeflags, cli_flags, jobserver = configure_make_args(
        ctx.make_args, ctx.jobs_args, ctx.use_internal_make_jobserver)

    # Set the jobserver memory limit
    if jobserver and opts.mem_limit:
        log(
            clr("@!@{pf}EXPERIMENTAL: limit memory to '%s'@|" %
                str(opts.mem_limit)))
        # At this point psuitl will be required, check for it and bail out if not set
        try:
            import psutil  # noqa
        except ImportError as exc:
            log("Could not import psutil, but psutil is required when using --mem-limit."
                )
            log("Please either install psutil or avoid using --mem-limit.")
            sys.exit("Exception: {0}".format(exc))
        job_server.set_max_mem(opts.mem_limit)

    ctx.make_args = make_args

    # Load the environment of the workspace to extend
    if ctx.extend_path is not None:
        try:
            load_resultspace_environment(ctx.extend_path)
        except IOError as exc:
            sys.exit(
                clr("[build] @!@{rf}Error:@| Unable to extend workspace from \"%s\": %s"
                    % (ctx.extend_path, exc.message)))

    # Check if the context is valid before writing any metadata
    if not ctx.source_space_exists():
        sys.exit(
            clr("[build] @!@{rf}Error:@| Unable to find source space `%s`") %
            ctx.source_space_abs)

    # ensure the build space was previously built by catkin_tools
    previous_tool = get_previous_tool_used_on_the_space(ctx.build_space_abs)
    if previous_tool is not None and previous_tool != 'catkin build':
        if opts.override_build_tool_check:
            log(
                clr("@{yf}Warning: build space at '%s' was previously built by '%s', "
                    "but --override-build-tool-check was passed so continuing anyways."
                    % (ctx.build_space_abs, previous_tool)))
        else:
            sys.exit(
                clr("@{rf}The build space at '%s' was previously built by '%s'. "
                    "Please remove the build space or pick a different build space."
                    % (ctx.build_space_abs, previous_tool)))
    # the build space will be marked as catkin build's if dry run doesn't return

    # ensure the devel space was previously built by catkin_tools
    previous_tool = get_previous_tool_used_on_the_space(ctx.devel_space_abs)
    if previous_tool is not None and previous_tool != 'catkin build':
        if opts.override_build_tool_check:
            log(
                clr("@{yf}Warning: devel space at '%s' was previously built by '%s', "
                    "but --override-build-tool-check was passed so continuing anyways."
                    % (ctx.devel_space_abs, previous_tool)))
        else:
            sys.exit(
                clr("@{rf}The devel space at '%s' was previously built by '%s'. "
                    "Please remove the devel space or pick a different devel space."
                    % (ctx.devel_space_abs, previous_tool)))
    # the devel space will be marked as catkin build's if dry run doesn't return

    # Display list and leave the file system untouched
    if opts.dry_run:
        # TODO: Add unbuilt
        dry_run(ctx, opts.packages, opts.no_deps, opts.start_with)
        return

    # Print the build environment for a given package and leave the filesystem untouched
    if opts.get_env:
        return print_build_env(ctx, opts.get_env[0])

    # Now mark the build and devel spaces as catkin build's since dry run didn't return.
    mark_space_as_built_by(ctx.build_space_abs, 'catkin build')
    mark_space_as_built_by(ctx.devel_space_abs, 'catkin build')

    # Get the last build context
    build_metadata = get_metadata(ctx.workspace, ctx.profile, 'build')

    # Force cmake if the CMake arguments have changed
    if build_metadata.get('cmake_args') != ctx.cmake_args:
        opts.force_cmake = True

    # Check the devel layout compatibility
    last_devel_layout = build_metadata.get('devel_layout', ctx.devel_layout)
    if last_devel_layout != ctx.devel_layout:
        sys.exit(
            clr("@{rf}@!Error:@|@{rf} The current devel space layout, `{}`,"
                "is incompatible with the configured layout, `{}`.@|").format(
                    last_devel_layout, ctx.devel_layout))

    # Check if some other verb has changed the workspace in such a way that it needs to be forced
    if build_metadata.get('needs_force', False):
        opts.force_cmake = True
        update_metadata(ctx.workspace, ctx.profile, 'build',
                        {'needs_force': False})

    # Always save the last context under the build verb
    update_metadata(ctx.workspace, ctx.profile, 'build', ctx.get_stored_dict())

    # Save the context as the configuration
    if opts.save_config:
        Context.save(ctx)

    # Get parallel toplevel jobs
    try:
        parallel_jobs = int(opts.parallel_jobs)
    except TypeError:
        parallel_jobs = None

    # Set VERBOSE environment variable
    if opts.verbose:
        os.environ['VERBOSE'] = '1'

    return build_isolated_workspace(
        ctx,
        packages=opts.packages,
        start_with=opts.start_with,
        no_deps=opts.no_deps,
        unbuilt=opts.unbuilt,
        n_jobs=parallel_jobs,
        force_cmake=opts.force_cmake,
        pre_clean=opts.pre_clean,
        force_color=opts.force_color,
        quiet=not opts.verbose,
        interleave_output=opts.interleave_output,
        no_status=opts.no_status,
        limit_status_rate=opts.limit_status_rate,
        lock_install=not opts.no_install_lock,
        no_notify=opts.no_notify,
        continue_on_failure=opts.continue_on_failure,
        summarize_build=opts.summarize  # Can be True, False, or None
    )
Esempio n. 8
0
def main(opts):
    # Set color options
    opts.force_color = os.environ.get('CATKIN_TOOLS_FORCE_COLOR',
                                      opts.force_color)
    if (opts.force_color or is_tty(sys.stdout)) and not opts.no_color:
        set_color(True)
    else:
        set_color(False)

    # Context-aware args
    if opts.build_this:
        # Determine the enclosing package
        try:
            ws_path = find_enclosing_workspace(getcwd())
            # Suppress warnings since this won't necessarily find all packages
            # in the workspace (it stops when it finds one package), and
            # relying on it for warnings could mislead people.
            this_package = find_enclosing_package(search_start_path=getcwd(),
                                                  ws_path=ws_path,
                                                  warnings=[])
        except InvalidPackage as ex:
            sys.exit(
                clr("[test] @!@{rf}Error:@| The file {} is an invalid package.xml file."
                    " See below for details:\n\n{}").format(
                        ex.package_path, ex.msg))

        # Handle context-based package building
        if this_package:
            opts.packages += [this_package]
        else:
            sys.exit(
                clr("[test] @!@{rf}Error:@| In order to use --this, "
                    "the current directory must be part of a catkin package."))

    # Load the context
    ctx = Context.load(opts.workspace, opts.profile, opts, append=True)

    # Load the environment of the workspace to extend
    if ctx.extend_path is not None:
        try:
            load_resultspace_environment(ctx.extend_path)
        except IOError as exc:
            sys.exit(
                clr("[test] @!@{rf}Error:@| Unable to extend workspace from \"{}\": {}"
                    ).format(ctx.extend_path, str(exc)))

    # Check if the context is valid before writing any metadata
    if not ctx.source_space_exists():
        sys.exit(
            clr("[test] @!@{rf}Error:@| Unable to find source space `{}`").
            format(ctx.source_space_abs))

    # Extract make arguments
    make_args, _, _, _ = configure_make_args(ctx.make_args, ctx.jobs_args,
                                             ctx.use_internal_make_jobserver)
    ctx.make_args = make_args

    # Get parallel toplevel jobs
    try:
        parallel_jobs = int(opts.parallel_jobs)
    except TypeError:
        parallel_jobs = None

    # Set VERBOSE environment variable
    if opts.verbose and 'VERBOSE' not in os.environ:
        os.environ['VERBOSE'] = '1'

    # Get test targets
    catkin_test_target = 'run_tests'
    cmake_test_target = 'test'
    if opts.test_target:
        catkin_test_target = opts.test_target
        cmake_test_target = opts.test_target
    if opts.catkin_test_target:
        catkin_test_target = opts.catkin_test_target

    return test_workspace(
        ctx,
        packages=opts.packages,
        n_jobs=parallel_jobs,
        quiet=not opts.verbose,
        interleave_output=opts.interleave_output,
        no_status=opts.no_status,
        limit_status_rate=opts.limit_status_rate,
        no_notify=opts.no_notify,
        continue_on_failure=opts.continue_on_failure,
        summarize_build=opts.summarize,
        catkin_test_target=catkin_test_target,
        cmake_test_target=cmake_test_target,
    )
Esempio n. 9
0
def build_isolated_workspace(
    context,
    packages=None,
    start_with=None,
    no_deps=False,
    jobs=None,
    force_cmake=False,
    force_color=False,
    quiet=False,
    interleave_output=False,
    no_status=False,
    lock_install=False,
    no_notify=False
):
    """Builds a catkin workspace in isolation

    This function will find all of the packages in the source space, start some
    executors, feed them packages to build based on dependencies and topological
    ordering, and then monitor the output of the executors, handling loggings of
    the builds, starting builds, failing builds, and finishing builds of
    packages, and handling the shutdown of the executors when appropriate.

    :param context: context in which to build the catkin workspace
    :type context: :py:class:`catkin_tools.verbs.catkin_build.context.Context`
    :param packages: list of packages to build, by default their dependencies will also be built
    :type packages: list
    :param start_with: package to start with, skipping all packages which proceed it in the topological order
    :type start_with: str
    :param no_deps: If True, the dependencies of packages will not be built first
    :type no_deps: bool
    :param jobs: number of parallel package build jobs
    :type jobs: int
    :param force_cmake: forces invocation of CMake if True, default is False
    :type force_cmake: bool
    :param force_color: forces colored output even if terminal does not support it
    :type force_color: bool
    :param quiet: suppresses the output of commands unless there is an error
    :type quiet: bool
    :param interleave_output: prints the output of commands as they are received
    :type interleave_output: bool
    :param no_status: disables status bar
    :type no_status: bool
    :param lock_install: causes executors to synchronize on access of install commands
    :type lock_install: bool
    :param no_notify: suppresses system notifications
    :type no_notify: bool

    :raises: SystemExit if buildspace is a file or no packages were found in the source space
        or if the provided options are invalid
    """
    # If no_deps is given, ensure packages to build are provided
    if no_deps and packages is None:
        sys.exit("With --no-deps, you must specify packages to build.")
    # Make sure there is a build folder and it is not a file
    if os.path.exists(context.build_space_abs):
        if os.path.isfile(context.build_space_abs):
            sys.exit(clr(
                "@{rf}Error:@| Build space '{0}' exists but is a file and not a folder."
                .format(context.build_space_abs)))
    # If it dosen't exist, create it
    else:
        log("Creating build space directory, '{0}'".format(context.build_space_abs))
        os.makedirs(context.build_space_abs)

    # Check for catkin_make droppings
    if context.corrupted_by_catkin_make():
        sys.exit(
            clr("@{rf}Error:@| Build space `{0}` exists but appears to have previously been "
                "created by the `catkin_make` or `catkin_make_isolated` tool. "
                "Please choose a different directory to use with `catkin build` "
                "or clean the build space.".format(context.build_space_abs)))

    # Declare a buildspace marker describing the build config for error checking
    buildspace_marker_data = {
        'workspace': context.workspace,
        'profile': context.profile,
        'install': context.install,
        'install_space': context.install_space_abs,
        'devel_space': context.devel_space_abs,
        'source_space': context.source_space_abs}

    # Check build config
    if os.path.exists(os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE)):
        with open(os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE)) as buildspace_marker_file:
            existing_buildspace_marker_data = yaml.load(buildspace_marker_file)
            misconfig_lines = ''
            for (k, v) in existing_buildspace_marker_data.items():
                new_v = buildspace_marker_data.get(k, None)
                if new_v != v:
                    misconfig_lines += (
                        '\n - %s: %s (stored) is not %s (commanded)' %
                        (k, v, new_v))
            if len(misconfig_lines) > 0:
                sys.exit(clr(
                    "\n@{rf}Error:@| Attempting to build a catkin workspace using build space: "
                    "\"%s\" but that build space's most recent configuration "
                    "differs from the commanded one in ways which will cause "
                    "problems. Fix the following options or use @{yf}`catkin "
                    "clean -b`@| to remove the build space: %s" %
                    (context.build_space_abs, misconfig_lines)))

    # Write the current build config for config error checking
    with open(os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE), 'w') as buildspace_marker_file:
        buildspace_marker_file.write(yaml.dump(buildspace_marker_data, default_flow_style=False))

    # Summarize the context
    summary_notes = []
    if force_cmake:
        summary_notes += [clr("@!@{cf}NOTE:@| Forcing CMake to run for each package.")]
    log(context.summary(summary_notes))

    # Find list of packages in the workspace
    packages_to_be_built, packages_to_be_built_deps, all_packages = determine_packages_to_be_built(packages, context)
    completed_packages = []
    if no_deps:
        # Consider deps as "completed"
        completed_packages.extend(packages_to_be_built_deps)
    else:
        # Extend packages to be built to include their deps
        packages_to_be_built.extend(packages_to_be_built_deps)
    # Also resort
    packages_to_be_built = topological_order_packages(dict(packages_to_be_built))
    max_package_name_length = max([len(pkg.name) for pth, pkg in packages_to_be_built])
    # Assert start_with package is in the workspace
    verify_start_with_option(start_with, packages, all_packages, packages_to_be_built + packages_to_be_built_deps)

    # Setup pool of executors
    executors = {}
    # The communication queue can have ExecutorEvent's or str's passed into it from the executors
    comm_queue = Queue()
    # The job queue has Jobs put into it
    job_queue = Queue()
    # Lock for install space
    install_lock = Lock() if lock_install else FakeLock()
    # Determine the number of executors
    try:
        if jobs:
            jobs = int(jobs)
            if jobs < 1:
                sys.exit("Specified number of jobs '{0}' is not positive.".format(jobs))
    except ValueError:
        sys.exit("Specified number of jobs '{0}' is no integer.".format(jobs))
    try:
        jobs = cpu_count() if jobs is None else jobs
    except NotImplementedError:
        log('Failed to determine the cpu_count, falling back to 1 jobs as the default.')
        jobs = 1 if jobs is None else jobs
    # If only one set of jobs, turn on interleaving to get more responsive feedback
    if jobs == 1:
        # TODO: make the system more intelligent so that it can automatically switch to streaming output
        #       when only one job is building, even if multiple jobs could be building
        quiet = False
        interleave_output = True
    # Start the executors
    for x in range(jobs):
        e = Executor(x, context, comm_queue, job_queue, install_lock)
        executors[x] = e
        e.start()

    # Variables for tracking running jobs and built/building packages
    start = time.time()
    total_packages = len(packages_to_be_built)
    package_count = 0
    running_jobs = {}
    log_dir = os.path.join(context.build_space_abs, 'build_logs')
    color = True
    if not force_color and not is_tty(sys.stdout):
        color = True
    out = OutputController(log_dir, quiet, interleave_output, color, max_package_name_length, prefix_output=(jobs > 1))
    if no_status:
        disable_wide_log()

    # Prime the job_queue
    ready_packages = []
    if start_with is None:
        ready_packages = get_ready_packages(packages_to_be_built, running_jobs, completed_packages)
    while start_with is not None:
        ready_packages.extend(get_ready_packages(packages_to_be_built, running_jobs, completed_packages))
        while ready_packages:
            pth, pkg = ready_packages.pop(0)
            if pkg.name != start_with:
                completed_packages.append(pkg.name)
                package_count += 1
                wide_log("[build] Skipping package '{0}'".format(pkg.name))
            else:
                ready_packages.insert(0, (pth, pkg))
                start_with = None
                break
    running_jobs = queue_ready_packages(ready_packages, running_jobs, job_queue, context, force_cmake)
    assert running_jobs

    error_state = False
    errors = []

    def set_error_state(error_state):
        if error_state:
            return
        # Set the error state to prevent new jobs
        error_state = True
        # Empty the job queue
        while not job_queue.empty():
            job_queue.get()
        # Kill the executors by sending a None to the job queue for each of them
        for x in range(jobs):
            job_queue.put(None)

    # While any executors are running, process executor events
    while executors:
        try:
            # Try to get an event from the communications queue
            try:
                event = comm_queue.get(True, 0.1)
            except Empty:
                # timeout occured, create null event to pass through checks
                event = ExecutorEvent(None, None, None, None)

            if event.event_type == 'job_started':
                package_count += 1
                running_jobs[event.package]['package_number'] = package_count
                running_jobs[event.package]['start_time'] = time.time()
                out.job_started(event.package)

            if event.event_type == 'command_started':
                out.command_started(event.package, event.data['cmd'], event.data['location'])

            if event.event_type == 'command_log':
                out.command_log(event.package, event.data['message'])

            if event.event_type == 'command_failed':
                out.command_failed(event.package, event.data['cmd'], event.data['location'], event.data['retcode'])
                # Add to list of errors
                errors.append(event)
                # Remove the command from the running jobs
                del running_jobs[event.package]
                # If it hasn't already been done, stop the executors
                set_error_state(error_state)

            if event.event_type == 'command_finished':
                out.command_finished(event.package, event.data['cmd'], event.data['location'], event.data['retcode'])

            if event.event_type == 'job_finished':
                completed_packages.append(event.package)
                run_time = format_time_delta(time.time() - running_jobs[event.package]['start_time'])
                out.job_finished(event.package, run_time)
                del running_jobs[event.package]
                # If shutting down, do not add new packages
                if error_state:
                    continue
                # Calculate new packages
                if not no_status:
                    wide_log('[build] Calculating new jobs...', end='\r')
                    sys.stdout.flush()
                ready_packages = get_ready_packages(packages_to_be_built, running_jobs, completed_packages)
                running_jobs = queue_ready_packages(ready_packages, running_jobs, job_queue, context, force_cmake)
                # Make sure there are jobs to be/being processed, otherwise kill the executors
                if not running_jobs:
                    # Kill the executors by sending a None to the job queue for each of them
                    for x in range(jobs):
                        job_queue.put(None)

            # If an executor exit event, join it and remove it from the executors list
            if event.event_type == 'exit':
                # If an executor has an exception, set the error state
                if event.data['reason'] == 'exception':
                    set_error_state(error_state)
                    errors.append(event)
                # Join and remove it
                executors[event.executor_id].join()
                del executors[event.executor_id]

            if not no_status:
                # Update the status bar on the screen
                executing_jobs = []
                for name, value in running_jobs.items():
                    number, start_time = value['package_number'], value['start_time']
                    if number is None or start_time is None:
                        continue
                    executing_jobs.append({
                        'number': number,
                        'name': name,
                        'run_time': format_time_delta_short(time.time() - start_time)
                    })
                msg = clr("[build - {run_time}] ").format(run_time=format_time_delta_short(time.time() - start))
                # If errors post those
                if errors:
                    for error in errors:
                        msg += clr("[!{package}] ").format(package=error.package)
                # Print them in order of started number
                for job_msg_args in sorted(executing_jobs, key=lambda args: args['number']):
                    msg += clr("[{name} - {run_time}] ").format(**job_msg_args)
                msg_rhs = clr("[{0}/{1} Active | {2}/{3} Completed]").format(
                    len(executing_jobs),
                    len(executors),
                    len(packages) if no_deps else len(completed_packages),
                    total_packages
                )
                # Update title bar
                sys.stdout.write("\x1b]2;[build] {0}/{1}\x07".format(
                    len(packages) if no_deps else len(completed_packages),
                    total_packages
                ))
                # Update status bar
                wide_log(msg, rhs=msg_rhs, end='\r')
                sys.stdout.flush()
        except KeyboardInterrupt:
            wide_log("[build] User interrupted, stopping.")
            set_error_state(error_state)
    # All executors have shutdown
    sys.stdout.write("\x1b]2;\x07")
    if not errors:
        if context.isolate_devel:
            if not context.install:
                _create_unmerged_devel_setup(context)
            else:
                _create_unmerged_devel_setup_for_install(context)
        wide_log("[build] Finished.")
        if not no_notify:
            notify("Build Finished", "{0} packages built".format(total_packages))
        return 0
    else:
        wide_log(clr("[build] There were @!@{rf}errors@|:"))
        if not no_notify:
            notify("Build Failed", "there were {0} errors".format(len(errors)))
        for error in errors:
            if error.event_type == 'exit':
                wide_log("""Executor '{exec_id}' had an unhandle exception while processing package '{package}':

{data[exc]}
""".format(exec_id=error.executor_id + 1, **error.__dict__))
            else:
                wide_log(clr("""
@{rf}Failed@| to build package '@{cf}{package}@|' because the following command:

    @!@{kf}# Command run in directory: @|{location}
    {cmd.cmd_str}

@{rf}Exited@| with return code: @!{retcode}@|""").format(package=error.package, **error.data))
        sys.exit(1)
Esempio n. 10
0
def main(sysargs=None):
    # Initialize config
    try:
        initialize_config()
    except RuntimeError as exc:
        sys.exit("Failed to initialize config: {0}".format(exc))

    # Create a top level parser
    parser = argparse.ArgumentParser(
        description="catkin command",
        formatter_class=argparse.RawDescriptionHelpFormatter)
    add = parser.add_argument
    add('-a',
        '--list-aliases',
        action="store_true",
        default=False,
        help=
        "Lists the current verb aliases and then quits, all other arguments are ignored"
        )
    add('--test-colors',
        action='store_true',
        default=False,
        help=
        "Prints a color test pattern to the screen and then quits, all other arguments are ignored"
        )
    color_control_group = parser.add_mutually_exclusive_group()
    add = color_control_group.add_argument
    add('--force-color',
        action='store_true',
        default=False,
        help=
        'Forces catkin to output in color, even when the terminal does not appear to support it.'
        )
    add('--no-color',
        action='store_true',
        default=False,
        help=
        'Forces catkin to not use color in the output, regardless of the detect terminal type.'
        )

    # Generate a list of verbs available
    verbs = list_verbs()

    # Create the subparsers for each verb and collect the argument preprocessors
    argument_preprocessors = create_subparsers(parser, verbs)

    # Get verb aliases
    verb_aliases = get_verb_aliases()

    # Setup sysargs
    sysargs = sys.argv[1:] if sysargs is None else sysargs
    cmd = os.path.basename(sys.argv[0])

    # Get colors config
    no_color = False
    force_color = False
    for arg in sysargs:
        if arg == '--no-color':
            no_color = True
        if arg == '--force-color':
            force_color = True

    if no_color or not force_color and not is_tty(sys.stdout):
        set_color(False)

    # Check for --test-colors
    for arg in sysargs:
        if arg == '--test-colors':
            test_colors()
            sys.exit(0)
        if not arg.startswith('-'):
            break

    # Check for --list-aliases
    for arg in sysargs:
        if arg == '--list-aliases' or arg == '-a':
            for alias in sorted(list(verb_aliases.keys())):
                print("{0}: {1}".format(alias, verb_aliases[alias]))
            sys.exit(0)
        if not arg.startswith('-'):
            break

    # Do alias expansion
    expanding_verb_aliases = True
    used_aliases = []
    while expanding_verb_aliases:
        expanding_verb_aliases = False
        for index, arg in enumerate(sysargs):
            if not arg.startswith('-'):
                if arg in used_aliases:
                    print(
                        fmt("@!@{gf}==>@| Expanding alias '@!@{yf}" + arg +
                            "@|' was previously expanded, ignoring this time to prevent infinite recursion."
                            ))
                if arg in verb_aliases:
                    before = [] if index == 0 else sysargs[:index - 1]
                    after = [] if index == len(sysargs) else sysargs[index +
                                                                     1:]
                    sysargs = before + verb_aliases[arg].split() + after
                    print(
                        fmt("@!@{gf}==>@| Expanding alias "
                            "'@!@{yf}{alias}@|' "
                            "from '@{yf}{before} @!{alias}@{boldoff}{after}@|' "
                            "to '@{yf}{before} @!{expansion}@{boldoff}{after}@|'"
                            ).format(alias=arg,
                                     expansion=verb_aliases[arg],
                                     before=' '.join([cmd] + before),
                                     after=(' '.join([''] +
                                                     after) if after else '')))
                    expanding_verb_aliases = True
                    # Prevent the alias from being used again, to prevent infinite recursion
                    used_aliases.append(arg)
                    del verb_aliases[arg]
                break

    # Determine the verb, splitting arguments into pre and post verb
    verb = None
    pre_verb_args = []
    post_verb_args = []
    for index, arg in enumerate(sysargs):
        # If the arg does not start with a `-` then it is a positional argument
        # The first positional argument must be the verb
        if not arg.startswith('-'):
            verb = arg
            post_verb_args = sysargs[index + 1:]
            break
        # If the `-h` or `--help` option comes before the verb, parse_args
        if arg in ['-h', '--help']:
            parser.parse_args(sysargs)
        # Otherwise it is a pre-verb option
        pre_verb_args.append(arg)

    # Error on no verb provided
    if verb is None:
        print(parser.format_usage())
        sys.exit("Error: No verb provided.")
    # Error on unknown verb provided
    if verb not in verbs:
        print(parser.format_usage())
        sys.exit("Error: Unknown verb '{0}' provided.".format(verb))

    # First allow the verb's argument preprocessor to strip any args
    # and return any "extra" information it wants as a dict
    processed_post_verb_args, extras = argument_preprocessors[verb](
        post_verb_args)
    # Then allow argparse to process the left over post-verb arguments along
    # with the pre-verb arguments and the verb itself
    args = parser.parse_args(pre_verb_args + [verb] + processed_post_verb_args)
    # Extend the argparse result with the extras from the preprocessor
    for key, value in extras.items():
        setattr(args, key, value)

    # Finally call the subparser's main function with the processed args
    # and the extras which the preprocessor may have returned
    sys.exit(args.main(args) or 0)
Esempio n. 11
0
def main(sysargs=None):
    # Initialize config
    try:
        initialize_config()
    except RuntimeError as exc:
        sys.exit("Failed to initialize config: {0}".format(exc))

    # Create a top level parser
    parser = argparse.ArgumentParser(description="catkin command", formatter_class=argparse.RawDescriptionHelpFormatter)
    add = parser.add_argument
    add('-a', '--list-aliases', action="store_true", default=False,
        help="Lists the current verb aliases and then quits, all other arguments are ignored")
    add('--test-colors', action='store_true', default=False,
        help="Prints a color test pattern to the screen and then quits, all other arguments are ignored")
    color_control_group = parser.add_mutually_exclusive_group()
    add = color_control_group.add_argument
    add('--force-color', action='store_true', default=False,
        help='Forces catkin to output in color, even when the terminal does not appear to support it.')
    add('--no-color', action='store_true', default=False,
        help='Forces catkin to not use color in the output, regardless of the detect terminal type.')

    # Generate a list of verbs available
    verbs = list_verbs()

    # Create the subparsers for each verb and collect the argument preprocessors
    argument_preprocessors = create_subparsers(parser, verbs)

    # Get verb aliases
    verb_aliases = get_verb_aliases()

    # Setup sysargs
    sysargs = sys.argv[1:] if sysargs is None else sysargs
    cmd = os.path.basename(sys.argv[0])

    # Get colors config
    no_color = False
    force_color = False
    for arg in sysargs:
        if arg == '--no-color':
            no_color = True
        if arg == '--force-color':
            force_color = True

    if no_color or not force_color and not is_tty(sys.stdout):
        set_color(False)

    # Check for --test-colors
    for arg in sysargs:
        if arg == '--test-colors':
            test_colors()
            sys.exit(0)
        if not arg.startswith('-'):
            break

    # Check for --list-aliases
    for arg in sysargs:
        if arg == '--list-aliases' or arg == '-a':
            for alias in sorted(list(verb_aliases.keys())):
                print("{0}: {1}".format(alias, verb_aliases[alias]))
            sys.exit(0)
        if not arg.startswith('-'):
            break

    # Do alias expansion
    expanding_verb_aliases = True
    used_aliases = []
    while expanding_verb_aliases:
        expanding_verb_aliases = False
        for index, arg in enumerate(sysargs):
            if not arg.startswith('-'):
                if arg in used_aliases:
                    print(fmt(
                        "@!@{gf}==>@| Expanding alias '@!@{yf}" +
                        arg +
                        "@|' was previously expanded, ignoring this time to prevent infinite recursion."
                    ))
                if arg in verb_aliases:
                    before = [] if index == 0 else sysargs[:index - 1]
                    after = [] if index == len(sysargs) else sysargs[index + 1:]
                    sysargs = before + verb_aliases[arg].split() + after
                    print(fmt(
                        "@!@{gf}==>@| Expanding alias "
                        "'@!@{yf}{alias}@|' "
                        "from '@{yf}{before} @!{alias}@{boldoff}{after}@|' "
                        "to '@{yf}{before} @!{expansion}@{boldoff}{after}@|'"
                    ).format(
                        alias=arg,
                        expansion=verb_aliases[arg],
                        before=' '.join([cmd] + before),
                        after=(' '.join([''] + after) if after else '')
                    ))
                    expanding_verb_aliases = True
                    # Prevent the alias from being used again, to prevent infinite recursion
                    used_aliases.append(arg)
                    del verb_aliases[arg]
                break

    # Determine the verb, splitting arguments into pre and post verb
    verb = None
    pre_verb_args = []
    post_verb_args = []
    for index, arg in enumerate(sysargs):
        # If the arg does not start with a `-` then it is a positional argument
        # The first positional argument must be the verb
        if not arg.startswith('-'):
            verb = arg
            post_verb_args = sysargs[index + 1:]
            break
        # If the `-h` or `--help` option comes before the verb, parse_args
        if arg in ['-h', '--help']:
            parser.parse_args(sysargs)
        # Otherwise it is a pre-verb option
        pre_verb_args.append(arg)

    # Error on no verb provided
    if verb is None:
        print(parser.format_usage())
        sys.exit("Error: No verb provided.")
    # Error on unknown verb provided
    if verb not in verbs:
        print(parser.format_usage())
        sys.exit("Error: Unknown verb '{0}' provided.".format(verb))

    # First allow the verb's argument preprocessor to strip any args
    # and return any "extra" information it wants as a dict
    processed_post_verb_args, extras = argument_preprocessors[verb](post_verb_args)
    # Then allow argparse to process the left over post-verb arguments along
    # with the pre-verb arguments and the verb itself
    args = parser.parse_args(pre_verb_args + [verb] + processed_post_verb_args)
    # Extend the argparse result with the extras from the preprocessor
    for key, value in extras.items():
        setattr(args, key, value)

    # Finally call the subparser's main function with the processed args
    # and the extras which the preprocessor may have returned
    sys.exit(args.main(args) or 0)