Пример #1
0
    def run(self):
        queued_jobs = []
        active_jobs = []
        completed_jobs = {}
        failed_jobs = []
        warned_jobs = []

        cumulative_times = dict()
        start_times = dict()
        active_stages = dict()

        start_time = self.pre_start_time or time.time()
        last_update_time = time.time()

        # If the status rate is too low, just disable it
        if self.active_status_rate < 1E-3:
            self.show_active_status = False
        else:
            update_duration = 1.0 / self.active_status_rate

        # Disable the wide log padding if the status is disabled
        if not self.show_active_status:
            disable_wide_log()

        while True:
            # Check if we should stop
            if not self.keep_running:
                wide_log(
                    clr('[{}] An internal error occurred!').format(self.label))
                return

            # Write a continuously-updated status line
            if self.show_active_status:

                # Try to get an event from the queue (non-blocking)
                try:
                    event = self.event_queue.get(False)
                except Empty:
                    # Determine if the status should be shown based on the desired
                    # status rate
                    elapsed_time = time.time() - last_update_time
                    show_status_now = elapsed_time > update_duration

                    if show_status_now:
                        # Print live status (overwrites last line)
                        status_line = clr(
                            '[{} {} s] [{}/{} complete] [{}/{} jobs] [{} queued]'
                        ).format(
                            self.label,
                            format_time_delta_short(time.time() - start_time),
                            len(completed_jobs), len(self.jobs),
                            job_server.running_jobs(), job_server.max_jobs(),
                            len(queued_jobs) + len(active_jobs) -
                            len(active_stages))

                        # Show failed jobs
                        if len(failed_jobs) > 0:
                            status_line += clr(
                                ' [@!@{rf}{}@| @{rf}failed@|]').format(
                                    len(failed_jobs))

                        # Check load / mem
                        if not job_server.load_ok():
                            status_line += clr(' [@!@{rf}High Load@|]')
                        if not job_server.mem_ok():
                            status_line += clr(' [@!@{rf}Low Memory@|]')

                        # Add active jobs
                        if len(active_jobs) == 0:
                            status_line += clr(
                                ' @/@!@{kf}Waiting for jobs...@|')
                        else:
                            active_labels = []

                            for j, (s, t, p) in active_stages.items():
                                d = format_time_delta_short(
                                    cumulative_times[j] + time.time() - t)
                                if p == '':
                                    active_labels.append(
                                        clr('[{}:{} - {}]').format(j, s, d))
                                else:
                                    active_labels.append(
                                        clr('[{}:{} ({}%) - {}]').format(
                                            j, s, p, d))

                            status_line += ' ' + ' '.join(active_labels)

                        # Print the status line
                        # wide_log(status_line)
                        wide_log(status_line, rhs='', end='\r')
                        sys.stdout.flush()

                        # Store this update time
                        last_update_time = time.time()
                    else:
                        time.sleep(
                            max(0.0, min(update_duration - elapsed_time,
                                         0.01)))

                    # Only continue when no event was received
                    continue
            else:
                # Try to get an event from the queue (blocking)
                try:
                    event = self.event_queue.get(True)
                except Empty:
                    break

            # A `None` event is a signal to terminate
            if event is None:
                break

            # Handle the received events
            eid = event.event_id

            if 'JOB_STATUS' == eid:
                queued_jobs = event.data['queued']
                active_jobs = event.data['active']
                completed_jobs = event.data['completed']

                # Check if all jobs have finished in some way
                if all([
                        len(event.data[t]) == 0
                        for t in ['pending', 'queued', 'active']
                ]):
                    break

            elif 'STARTED_JOB' == eid:
                cumulative_times[event.data['job_id']] = 0.0
                wide_log(
                    clr('Starting >>> {:<{}}').format(event.data['job_id'],
                                                      self.max_jid_length))

            elif 'FINISHED_JOB' == eid:
                duration = format_time_delta(
                    cumulative_times[event.data['job_id']])

                if event.data['succeeded']:
                    wide_log(
                        clr('Finished <<< {:<{}} [ {} ]').format(
                            event.data['job_id'], self.max_jid_length,
                            duration))
                else:
                    failed_jobs.append(event.data['job_id'])
                    wide_log(
                        clr('Failed <<< {:<{}} [ {} ]').format(
                            event.data['job_id'], self.max_jid_length,
                            duration))

            elif 'ABANDONED_JOB' == eid:
                # Create a human-readable reason string
                if 'DEP_FAILED' == event.data['reason']:
                    direct = event.data['dep_job_id'] == event.data[
                        'direct_dep_job_id']
                    if direct:
                        reason = clr('Depends on failed job {}').format(
                            event.data['dep_job_id'])
                    else:
                        reason = clr('Depends on failed job {} via {}').format(
                            event.data['dep_job_id'],
                            event.data['direct_dep_job_id'])
                elif 'PEER_FAILED' == event.data['reason']:
                    reason = clr('Unrelated job failed')
                elif 'MISSING_DEPS' == event.data['reason']:
                    reason = clr('Depends on unknown jobs: {}').format(
                        ', '.join([
                            clr('@!{}@|').format(jid)
                            for jid in event.data['dep_ids']
                        ]))

                wide_log(
                    clr('Abandoned <<< {:<{}} [ {} ]').format(
                        event.data['job_id'], self.max_jid_length, reason))

            elif 'STARTED_STAGE' == eid:
                active_stages[event.data['job_id']] = [
                    event.data['stage_label'], event.time, ''
                ]
                start_times[event.data['job_id']] = event.time

                if self.show_stage_events:
                    wide_log(
                        clr('Starting >> {}:{}').format(
                            event.data['job_id'], event.data['stage_label']))

            elif 'STAGE_PROGRESS' == eid:
                active_stages[event.data['job_id']][2] = event.data['percent']

            elif 'SUBPROCESS' == eid:
                if self.show_stage_events:
                    wide_log(
                        clr('Subprocess > {}:{} `{}`').format(
                            event.data['job_id'], event.data['stage_label'],
                            event.data['stage_repro']))

            elif 'FINISHED_STAGE' == eid:
                # Get the stage duration
                duration = event.time - start_times[event.data['job_id']]
                cumulative_times[event.data['job_id']] += duration

                # This is no longer the active stage for this job
                del active_stages[event.data['job_id']]

                header_border = None
                header_border_file = sys.stdout
                header_title = None
                header_title_file = sys.stdout
                lines = []
                footer_title = None
                footer_title_file = sys.stdout
                footer_border = None
                footer_border_file = sys.stdout

                # Generate headers / borders for output
                if event.data['succeeded']:
                    footer_title = clr('Finished << {}:{}').format(
                        event.data['job_id'], event.data['stage_label'])

                    if len(event.data['stderr']) > 0:
                        # Mark that this job warned about something
                        if event.data['job_id'] not in warned_jobs:
                            warned_jobs.append(event.data['job_id'])

                        # Output contains warnings
                        header_border = clr('@!@{yf}' + '_' *
                                            (terminal_width() - 1) + '@|')
                        header_border_file = sys.stderr
                        header_title = clr('Warnings << {}:{} {}').format(
                            event.data['job_id'], event.data['stage_label'],
                            event.data['logfile_filename'])
                        header_title_file = sys.stderr
                        footer_border = clr('@{yf}' + '.' *
                                            (terminal_width() - 1) + '@|')
                        footer_border_file = sys.stderr
                    else:
                        # Normal output, no warnings
                        header_title = clr('Output << {}:{} {}').format(
                            event.data['job_id'], event.data['stage_label'],
                            event.data['logfile_filename'])

                    # Don't print footer title
                    if not self.show_stage_events:
                        footer_title = None
                else:
                    # Output contains errors
                    header_border = clr('@!@{rf}' + '_' *
                                        (terminal_width() - 1) + '@|')
                    header_border_file = sys.stderr
                    header_title = clr('Errors << {}:{} {}').format(
                        event.data['job_id'], event.data['stage_label'],
                        event.data['logfile_filename'])
                    header_title_file = sys.stderr
                    footer_border = clr('@{rf}' + '.' *
                                        (terminal_width() - 1) + '@|')
                    footer_border_file = sys.stderr

                    footer_title = clr(
                        'Failed << {}:{:<{}} [ Exited with code {} ]').format(
                            event.data['job_id'], event.data['stage_label'],
                            max(
                                0, self.max_jid_length -
                                len(event.data['job_id'])),
                            event.data['retcode'])
                    footer_title_file = sys.stderr

                lines_target = sys.stdout
                if self.show_buffered_stdout:
                    if len(event.data['interleaved']) > 0:
                        lines = [
                            line for line in
                            event.data['interleaved'].splitlines(True)
                            if (self.show_compact_io is False
                                or len(line.strip()) > 0)
                        ]
                    else:
                        header_border = None
                        header_title = None
                        footer_border = None
                elif self.show_buffered_stderr:
                    if len(event.data['stderr']) > 0:
                        lines = [
                            line
                            for line in event.data['stderr'].splitlines(True)
                            if (self.show_compact_io is False
                                or len(line.strip()) > 0)
                        ]
                        lines_target = sys.stderr
                    else:
                        header_border = None
                        header_title = None
                        footer_border = None

                if len(lines) > 0:
                    if self.show_repro_cmd:
                        if event.data['repro'] is not None:
                            lines.append(
                                clr('@!@{kf}{}@|\n').format(
                                    event.data['repro']))

                    # Print the output
                    if header_border:
                        wide_log(header_border, file=header_border_file)
                    if header_title:
                        wide_log(header_title, file=header_title_file)
                    if len(lines) > 0:
                        wide_log(''.join(lines), end='\r', file=lines_target)
                    if footer_border:
                        wide_log(footer_border, file=footer_border_file)
                    if footer_title:
                        wide_log(footer_title, file=footer_title_file)

            elif 'STDERR' == eid:
                if self.show_live_stderr and len(event.data['data']) > 0:
                    wide_log(self.format_interleaved_lines(event.data),
                             end='\r',
                             file=sys.stderr)

            elif 'STDOUT' == eid:
                if self.show_live_stdout and len(event.data['data']) > 0:
                    wide_log(self.format_interleaved_lines(event.data),
                             end='\r')

            elif 'MESSAGE' == eid:
                wide_log(event.data['msg'])

        # Print the full summary
        if self.show_full_summary:
            self.print_exec_summary(completed_jobs, warned_jobs, failed_jobs)

        # Print a compact summary
        if self.show_summary or self.show_full_summary:
            self.print_compact_summary(completed_jobs, warned_jobs,
                                       failed_jobs)

        # Print final runtime
        wide_log(
            clr('[{}] Runtime: {} total.').format(
                self.label, format_time_delta(time.time() - start_time)))
Пример #2
0
def build_isolated_workspace(context,
                             packages=None,
                             start_with=None,
                             no_deps=False,
                             jobs=None,
                             force_cmake=False,
                             force_color=False,
                             quiet=False,
                             interleave_output=False,
                             no_status=False,
                             lock_install=False,
                             no_notify=False):
    """Builds a catkin workspace in isolation

    This function will find all of the packages in the source space, start some
    executors, feed them packages to build based on dependencies and topological
    ordering, and then monitor the output of the executors, handling loggings of
    the builds, starting builds, failing builds, and finishing builds of
    packages, and handling the shutdown of the executors when appropriate.

    :param context: context in which to build the catkin workspace
    :type context: :py:class:`catkin_tools.verbs.catkin_build.context.Context`
    :param packages: list of packages to build, by default their dependencies will also be built
    :type packages: list
    :param start_with: package to start with, skipping all packages which proceed it in the topological order
    :type start_with: str
    :param no_deps: If True, the dependencies of packages will not be built first
    :type no_deps: bool
    :param jobs: number of parallel package build jobs
    :type jobs: int
    :param force_cmake: forces invocation of CMake if True, default is False
    :type force_cmake: bool
    :param force_color: forces colored output even if terminal does not support it
    :type force_color: bool
    :param quiet: suppresses the output of commands unless there is an error
    :type quiet: bool
    :param interleave_output: prints the output of commands as they are received
    :type interleave_output: bool
    :param no_status: disables status bar
    :type no_status: bool
    :param lock_install: causes executors to synchronize on access of install commands
    :type lock_install: bool
    :param no_notify: suppresses system notifications
    :type no_notify: bool

    :raises: SystemExit if buildspace is a file or no packages were found in the source space
        or if the provided options are invalid
    """
    # If no_deps is given, ensure packages to build are provided
    if no_deps and packages is None:
        sys.exit("With --no-deps, you must specify packages to build.")
    # Make sure there is a build folder and it is not a file
    if os.path.exists(context.build_space_abs):
        if os.path.isfile(context.build_space_abs):
            sys.exit(
                clr("@{rf}Error:@| Build space '{0}' exists but is a file and not a folder."
                    .format(context.build_space_abs)))
    # If it dosen't exist, create it
    else:
        log("Creating build space directory, '{0}'".format(
            context.build_space_abs))
        os.makedirs(context.build_space_abs)

    # Check for catkin_make droppings
    if context.corrupted_by_catkin_make():
        sys.exit(
            clr("@{rf}Error:@| Build space `{0}` exists but appears to have previously been "
                "created by the `catkin_make` or `catkin_make_isolated` tool. "
                "Please choose a different directory to use with `catkin build` "
                "or clean the build space.".format(context.build_space_abs)))

    # Declare a buildspace marker describing the build config for error checking
    buildspace_marker_data = {
        'workspace': context.workspace,
        'profile': context.profile,
        'install': context.install,
        'install_space': context.install_space_abs,
        'devel_space': context.devel_space_abs,
        'source_space': context.source_space_abs
    }

    # Check build config
    if os.path.exists(
            os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE)):
        with open(os.path.join(
                context.build_space_abs,
                BUILDSPACE_MARKER_FILE)) as buildspace_marker_file:
            existing_buildspace_marker_data = yaml.load(buildspace_marker_file)
            misconfig_lines = ''
            for (k, v) in existing_buildspace_marker_data.items():
                new_v = buildspace_marker_data.get(k, None)
                if new_v != v:
                    misconfig_lines += (
                        '\n - %s: %s (stored) is not %s (commanded)' %
                        (k, v, new_v))
            if len(misconfig_lines) > 0:
                sys.exit(
                    clr("\n@{rf}Error:@| Attempting to build a catkin workspace using build space: "
                        "\"%s\" but that build space's most recent configuration "
                        "differs from the commanded one in ways which will cause "
                        "problems. Fix the following options or use @{yf}`catkin "
                        "clean -b`@| to remove the build space: %s" %
                        (context.build_space_abs, misconfig_lines)))

    # Write the current build config for config error checking
    with open(os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE),
              'w') as buildspace_marker_file:
        buildspace_marker_file.write(
            yaml.dump(buildspace_marker_data, default_flow_style=False))

    # Summarize the context
    summary_notes = []
    if force_cmake:
        summary_notes += [
            clr("@!@{cf}NOTE:@| Forcing CMake to run for each package.")
        ]
    log(context.summary(summary_notes))

    # Find list of packages in the workspace
    packages_to_be_built, packages_to_be_built_deps, all_packages = determine_packages_to_be_built(
        packages, context)
    completed_packages = []
    if no_deps:
        # Consider deps as "completed"
        completed_packages.extend(packages_to_be_built_deps)
    else:
        # Extend packages to be built to include their deps
        packages_to_be_built.extend(packages_to_be_built_deps)
    # Also resort
    packages_to_be_built = topological_order_packages(
        dict(packages_to_be_built))
    max_package_name_length = max(
        [len(pkg.name) for pth, pkg in packages_to_be_built])
    # Assert start_with package is in the workspace
    verify_start_with_option(start_with, packages, all_packages,
                             packages_to_be_built + packages_to_be_built_deps)

    # Setup pool of executors
    executors = {}
    # The communication queue can have ExecutorEvent's or str's passed into it from the executors
    comm_queue = Queue()
    # The job queue has Jobs put into it
    job_queue = Queue()
    # Lock for install space
    install_lock = Lock() if lock_install else FakeLock()
    # Determine the number of executors
    try:
        if jobs:
            jobs = int(jobs)
            if jobs < 1:
                sys.exit(
                    "Specified number of jobs '{0}' is not positive.".format(
                        jobs))
    except ValueError:
        sys.exit("Specified number of jobs '{0}' is no integer.".format(jobs))
    try:
        jobs = cpu_count() if jobs is None else jobs
    except NotImplementedError:
        log('Failed to determine the cpu_count, falling back to 1 jobs as the default.'
            )
        jobs = 1 if jobs is None else jobs
    # If only one set of jobs, turn on interleaving to get more responsive feedback
    if jobs == 1:
        # TODO: make the system more intelligent so that it can automatically switch to streaming output
        #       when only one job is building, even if multiple jobs could be building
        quiet = False
        interleave_output = True
    # Start the executors
    for x in range(jobs):
        e = Executor(x, context, comm_queue, job_queue, install_lock)
        executors[x] = e
        e.start()

    try:  # Finally close out now running executors
        # Variables for tracking running jobs and built/building packages
        start = time.time()
        total_packages = len(packages_to_be_built)
        package_count = 0
        running_jobs = {}
        log_dir = os.path.join(context.build_space_abs, 'build_logs')
        color = True
        if not force_color and not is_tty(sys.stdout):
            color = True
        out = OutputController(log_dir,
                               quiet,
                               interleave_output,
                               color,
                               max_package_name_length,
                               prefix_output=(jobs > 1))
        if no_status:
            disable_wide_log()

        # Prime the job_queue
        ready_packages = []
        if start_with is None:
            ready_packages = get_ready_packages(packages_to_be_built,
                                                running_jobs,
                                                completed_packages)
        while start_with is not None:
            ready_packages.extend(
                get_ready_packages(packages_to_be_built, running_jobs,
                                   completed_packages))
            while ready_packages:
                pth, pkg = ready_packages.pop(0)
                if pkg.name != start_with:
                    completed_packages.append(pkg.name)
                    package_count += 1
                    wide_log("[build] Skipping package '{0}'".format(pkg.name))
                else:
                    ready_packages.insert(0, (pth, pkg))
                    start_with = None
                    break
        running_jobs = queue_ready_packages(ready_packages, running_jobs,
                                            job_queue, context, force_cmake)
        assert running_jobs

        error_state = False
        errors = []

        def set_error_state(error_state):
            if error_state:
                return
            # Set the error state to prevent new jobs
            error_state = True
            # Empty the job queue
            while not job_queue.empty():
                job_queue.get()
            # Kill the executors by sending a None to the job queue for each of them
            for x in range(jobs):
                job_queue.put(None)

        # While any executors are running, process executor events
        while executors:
            try:
                # Try to get an event from the communications queue
                try:
                    event = comm_queue.get(True, 0.1)
                except Empty:
                    # timeout occured, create null event to pass through checks
                    event = ExecutorEvent(None, None, None, None)

                if event.event_type == 'job_started':
                    package_count += 1
                    running_jobs[
                        event.package]['package_number'] = package_count
                    running_jobs[event.package]['start_time'] = time.time()
                    out.job_started(event.package)

                if event.event_type == 'command_started':
                    out.command_started(event.package, event.data['cmd'],
                                        event.data['location'])

                if event.event_type == 'command_log':
                    out.command_log(event.package, event.data['message'])

                if event.event_type == 'command_failed':
                    out.command_failed(event.package, event.data['cmd'],
                                       event.data['location'],
                                       event.data['retcode'])
                    # Add to list of errors
                    errors.append(event)
                    # Remove the command from the running jobs
                    del running_jobs[event.package]
                    # If it hasn't already been done, stop the executors
                    set_error_state(error_state)

                if event.event_type == 'command_finished':
                    out.command_finished(event.package, event.data['cmd'],
                                         event.data['location'],
                                         event.data['retcode'])

                if event.event_type == 'job_finished':
                    completed_packages.append(event.package)
                    run_time = format_time_delta(
                        time.time() -
                        running_jobs[event.package]['start_time'])
                    out.job_finished(event.package, run_time)
                    del running_jobs[event.package]
                    # If shutting down, do not add new packages
                    if error_state:
                        continue
                    # Calculate new packages
                    if not no_status:
                        wide_log('[build] Calculating new jobs...', end='\r')
                        sys.stdout.flush()
                    ready_packages = get_ready_packages(
                        packages_to_be_built, running_jobs, completed_packages)
                    running_jobs = queue_ready_packages(
                        ready_packages, running_jobs, job_queue, context,
                        force_cmake)
                    # Make sure there are jobs to be/being processed, otherwise kill the executors
                    if not running_jobs:
                        # Kill the executors by sending a None to the job queue for each of them
                        for x in range(jobs):
                            job_queue.put(None)

                # If an executor exit event, join it and remove it from the executors list
                if event.event_type == 'exit':
                    # If an executor has an exception, set the error state
                    if event.data['reason'] == 'exception':
                        set_error_state(error_state)
                        errors.append(event)
                    # Join and remove it
                    executors[event.executor_id].join()
                    del executors[event.executor_id]

                if not no_status:
                    # Update the status bar on the screen
                    executing_jobs = []
                    for name, value in running_jobs.items():
                        number, job, start_time = value[
                            'package_number'], value['job'], value[
                                'start_time']
                        if number is None or start_time is None:
                            continue
                        executing_jobs.append({
                            'number':
                            number,
                            'name':
                            name,
                            'run_time':
                            format_time_delta_short(time.time() - start_time)
                        })
                    msg = clr("[build - {run_time}] ").format(
                        run_time=format_time_delta_short(time.time() - start))
                    # If errors post those
                    if errors:
                        for error in errors:
                            msg += clr("[!{package}] ").format(
                                package=error.package)
                    # Print them in order of started number
                    for job_msg_args in sorted(
                            executing_jobs, key=lambda args: args['number']):
                        msg += clr("[{name} - {run_time}] ").format(
                            **job_msg_args)
                    msg_rhs = clr(
                        "[{0}/{1} Active | {2}/{3} Completed]").format(
                            len(executing_jobs), len(executors),
                            len(packages) if no_deps else
                            len(completed_packages), total_packages)
                    # Update title bar
                    sys.stdout.write("\x1b]2;[build] {0}/{1}\x07".format(
                        len(packages) if no_deps else len(completed_packages),
                        total_packages))
                    # Update status bar
                    wide_log(msg, rhs=msg_rhs, end='\r')
                    sys.stdout.flush()
            except KeyboardInterrupt:
                wide_log("[build] User interrupted, stopping.")
                set_error_state(error_state)
        # All executors have shutdown
        sys.stdout.write("\x1b]2;\x07")
        if not errors:
            if context.isolate_devel:
                if not context.install:
                    _create_unmerged_devel_setup(context)
                else:
                    _create_unmerged_devel_setup_for_install(context)
            wide_log("[build] Finished.")
            if not no_notify:
                notify("Build Finished",
                       "{0} packages built".format(total_packages))
            return 0
        else:
            wide_log(clr("[build] There were @!@{rf}errors@|:"))
            if not no_notify:
                notify("Build Failed",
                       "there were {0} errors".format(len(errors)))
            for error in errors:
                if error.event_type == 'exit':
                    wide_log(
                        """Executor '{exec_id}' had an unhandle exception while processing package '{package}':

    {data[exc]}
    """.format(exec_id=error.executor_id + 1, **error.__dict__))
                else:
                    wide_log(
                        clr("""
    @{rf}Failed@| to build package '@{cf}{package}@|' because the following command:

        @!@{kf}# Command run in directory: @|{location}
        {cmd.cmd_str}

    @{rf}Exited@| with return code: @!{retcode}@|""").format(
                            package=error.package, **error.data))
            sys.exit(1)
    finally:
        # Ensure executors go down
        for x in range(jobs):
            job_queue.put(None)
Пример #3
0
    def run(self):
        queued_jobs = []
        active_jobs = []
        completed_jobs = {}
        failed_jobs = []
        warned_jobs = []

        cumulative_times = dict()
        start_times = dict()
        active_stages = dict()

        start_time = self.pre_start_time or time.time()
        last_update_time = time.time()

        # If the status rate is too low, just disable it
        if self.active_status_rate < 1E-3:
            self.show_active_status = False
        else:
            update_duration = 1.0 / self.active_status_rate

        # Disable the wide log padding if the status is disabled
        if not self.show_active_status:
            disable_wide_log()

        while True:
            # Check if we should stop
            if not self.keep_running:
                wide_log(clr('[{}] An internal error occurred!').format(self.label))
                return

            # Write a continuously-updated status line
            if self.show_active_status:

                # Try to get an event from the queue (non-blocking)
                try:
                    event = self.event_queue.get(False)
                except Empty:
                    # Determine if the status should be shown based on the desired
                    # status rate
                    elapsed_time = time.time() - last_update_time
                    show_status_now = elapsed_time > update_duration

                    if show_status_now:
                        # Print live status (overwrites last line)
                        status_line = clr('[{} {} s] [{}/{} complete] [{}/{} jobs] [{} queued]').format(
                            self.label,
                            format_time_delta_short(time.time() - start_time),
                            len(completed_jobs),
                            len(self.jobs),
                            job_server.running_jobs(),
                            job_server.max_jobs(),
                            len(queued_jobs) + len(active_jobs) - len(active_stages)
                        )

                        # Show failed jobs
                        if len(failed_jobs) > 0:
                            status_line += clr(' [@!@{rf}{}@| @{rf}failed@|]').format(len(failed_jobs))

                        # Check load / mem
                        if not job_server.load_ok():
                            status_line += clr(' [@!@{rf}High Load@|]')
                        if not job_server.mem_ok():
                            status_line += clr(' [@!@{rf}Low Memory@|]')

                        # Add active jobs
                        if len(active_jobs) == 0:
                            status_line += clr(' @/@!@{kf}Waiting for jobs...@|')
                        else:
                            active_labels = []

                            for j, (s, t, p) in active_stages.items():
                                d = format_time_delta_short(cumulative_times[j] + time.time() - t)
                                if p == '':
                                    active_labels.append(clr('[{}:{} - {}]').format(j, s, d))
                                else:
                                    active_labels.append(clr('[{}:{} ({}%) - {}]').format(j, s, p, d))

                            status_line += ' ' + ' '.join(active_labels)

                        # Print the status line
                        # wide_log(status_line)
                        wide_log(status_line, rhs='', end='\r')
                        sys.stdout.flush()

                        # Store this update time
                        last_update_time = time.time()
                    else:
                        time.sleep(max(0.0, min(update_duration - elapsed_time, 0.01)))

                    # Only continue when no event was received
                    continue
            else:
                # Try to get an event from the queue (blocking)
                try:
                    event = self.event_queue.get(True)
                except Empty:
                    break

            # A `None` event is a signal to terminate
            if event is None:
                break

            # Handle the received events
            eid = event.event_id

            if 'JOB_STATUS' == eid:
                queued_jobs = event.data['queued']
                active_jobs = event.data['active']
                completed_jobs = event.data['completed']

                # Check if all jobs have finished in some way
                if all([len(event.data[t]) == 0 for t in ['pending', 'queued', 'active']]):
                    break

            elif 'STARTED_JOB' == eid:
                cumulative_times[event.data['job_id']] = 0.0
                wide_log(clr('Starting >>> {:<{}}').format(
                    event.data['job_id'],
                    self.max_jid_length))

            elif 'FINISHED_JOB' == eid:
                duration = format_time_delta(cumulative_times[event.data['job_id']])

                if event.data['succeeded']:
                    wide_log(clr('Finished <<< {:<{}} [ {} ]').format(
                        event.data['job_id'],
                        self.max_jid_length,
                        duration))
                else:
                    failed_jobs.append(event.data['job_id'])
                    wide_log(clr('Failed <<< {:<{}} [ {} ]').format(
                        event.data['job_id'],
                        self.max_jid_length,
                        duration))

            elif 'ABANDONED_JOB' == eid:
                # Create a human-readable reason string
                if 'DEP_FAILED' == event.data['reason']:
                    direct = event.data['dep_job_id'] == event.data['direct_dep_job_id']
                    if direct:
                        reason = clr('Depends on failed job {}').format(event.data['dep_job_id'])
                    else:
                        reason = clr('Depends on failed job {} via {}').format(
                            event.data['dep_job_id'],
                            event.data['direct_dep_job_id'])
                elif 'PEER_FAILED' == event.data['reason']:
                    reason = clr('Unrelated job failed')
                elif 'MISSING_DEPS' == event.data['reason']:
                    reason = clr('Depends on unknown jobs: {}').format(
                        ', '.join([clr('@!{}@|').format(jid) for jid in event.data['dep_ids']]))

                wide_log(clr('Abandoned <<< {:<{}} [ {} ]').format(
                    event.data['job_id'],
                    self.max_jid_length,
                    reason))

            elif 'STARTED_STAGE' == eid:
                active_stages[event.data['job_id']] = [event.data['stage_label'], event.time, '']
                start_times[event.data['job_id']] = event.time

                if self.show_stage_events:
                    wide_log(clr('Starting >> {}:{}').format(
                        event.data['job_id'],
                        event.data['stage_label']))

            elif 'STAGE_PROGRESS' == eid:
                active_stages[event.data['job_id']][2] = event.data['percent']

            elif 'SUBPROCESS' == eid:
                if self.show_stage_events:
                    wide_log(clr('Subprocess > {}:{} `{}`').format(
                        event.data['job_id'],
                        event.data['stage_label'],
                        event.data['stage_repro']))

            elif 'FINISHED_STAGE' == eid:
                # Get the stage duration
                duration = event.time - start_times[event.data['job_id']]
                cumulative_times[event.data['job_id']] += duration

                # This is no longer the active stage for this job
                del active_stages[event.data['job_id']]

                header_border = None
                header_border_file = sys.stdout
                header_title = None
                header_title_file = sys.stdout
                lines = []
                footer_title = None
                footer_title_file = sys.stdout
                footer_border = None
                footer_border_file = sys.stdout

                # Generate headers / borders for output
                if event.data['succeeded']:
                    footer_title = clr(
                        'Finished << {}:{}').format(
                            event.data['job_id'],
                            event.data['stage_label'])

                    if len(event.data['stderr']) > 0:
                        # Mark that this job warned about something
                        if event.data['job_id'] not in warned_jobs:
                            warned_jobs.append(event.data['job_id'])

                        # Output contains warnings
                        header_border = clr('@!@{yf}' + '_' * (terminal_width() - 1) + '@|')
                        header_border_file = sys.stderr
                        header_title = clr(
                            'Warnings << {}:{} {}').format(
                                event.data['job_id'],
                                event.data['stage_label'],
                                event.data['logfile_filename'])
                        header_title_file = sys.stderr
                        footer_border = clr('@{yf}' + '.' * (terminal_width() - 1) + '@|')
                        footer_border_file = sys.stderr
                    else:
                        # Normal output, no warnings
                        header_title = clr(
                            'Output << {}:{} {}').format(
                                event.data['job_id'],
                                event.data['stage_label'],
                                event.data['logfile_filename'])

                    # Don't print footer title
                    if not self.show_stage_events:
                        footer_title = None
                else:
                    # Output contains errors
                    header_border = clr('@!@{rf}' + '_' * (terminal_width() - 1) + '@|')
                    header_border_file = sys.stderr
                    header_title = clr(
                        'Errors << {}:{} {}').format(
                            event.data['job_id'],
                            event.data['stage_label'],
                            event.data['logfile_filename'])
                    header_title_file = sys.stderr
                    footer_border = clr('@{rf}' + '.' * (terminal_width() - 1) + '@|')
                    footer_border_file = sys.stderr

                    footer_title = clr(
                        'Failed << {}:{:<{}} [ Exited with code {} ]').format(
                            event.data['job_id'],
                            event.data['stage_label'],
                            max(0, self.max_jid_length - len(event.data['job_id'])),
                            event.data['retcode'])
                    footer_title_file = sys.stderr

                lines_target = sys.stdout
                if self.show_buffered_stdout:
                    if len(event.data['interleaved']) > 0:
                        lines = [
                            l
                            for l in event.data['interleaved'].splitlines(True)
                            if (self.show_compact_io is False or len(l.strip()) > 0)
                        ]
                    else:
                        header_border = None
                        header_title = None
                        footer_border = None
                elif self.show_buffered_stderr:
                    if len(event.data['stderr']) > 0:
                        lines = [
                            l
                            for l in event.data['stderr'].splitlines(True)
                            if (self.show_compact_io is False or len(l.strip()) > 0)
                        ]
                        lines_target = sys.stderr
                    else:
                        header_border = None
                        header_title = None
                        footer_border = None

                if len(lines) > 0:
                    if self.show_repro_cmd:
                        if event.data['repro'] is not None:
                            lines.append(clr('@!@{kf}{}@|\n').format(event.data['repro']))

                    # Print the output
                    if header_border:
                        wide_log(header_border, file=header_border_file)
                    if header_title:
                        wide_log(header_title, file=header_title_file)
                    if len(lines) > 0:
                        wide_log(''.join(lines), end='\r', file=lines_target)
                    if footer_border:
                        wide_log(footer_border, file=footer_border_file)
                    if footer_title:
                        wide_log(footer_title, file=footer_title_file)

            elif 'STDERR' == eid:
                if self.show_live_stderr and len(event.data['data']) > 0:
                    wide_log(self.format_interleaved_lines(event.data), end='\r', file=sys.stderr)

            elif 'STDOUT' == eid:
                if self.show_live_stdout and len(event.data['data']) > 0:
                    wide_log(self.format_interleaved_lines(event.data), end='\r')

            elif 'MESSAGE' == eid:
                wide_log(event.data['msg'])

        # Print the full summary
        if self.show_full_summary:
            self.print_exec_summary(completed_jobs, warned_jobs, failed_jobs)

        # Print a compact summary
        if self.show_summary or self.show_full_summary:
            self.print_compact_summary(completed_jobs, warned_jobs, failed_jobs)

        # Print final runtime
        wide_log(clr('[{}] Runtime: {} total.').format(
            self.label,
            format_time_delta(time.time() - start_time)))
Пример #4
0
def build_isolated_workspace(
    context,
    packages=None,
    start_with=None,
    no_deps=False,
    jobs=None,
    force_cmake=False,
    force_color=False,
    quiet=False,
    interleave_output=False,
    no_status=False,
    lock_install=False,
    no_notify=False
):
    """Builds a catkin workspace in isolation

    This function will find all of the packages in the source space, start some
    executors, feed them packages to build based on dependencies and topological
    ordering, and then monitor the output of the executors, handling loggings of
    the builds, starting builds, failing builds, and finishing builds of
    packages, and handling the shutdown of the executors when appropriate.

    :param context: context in which to build the catkin workspace
    :type context: :py:class:`catkin_tools.verbs.catkin_build.context.Context`
    :param packages: list of packages to build, by default their dependencies will also be built
    :type packages: list
    :param start_with: package to start with, skipping all packages which proceed it in the topological order
    :type start_with: str
    :param no_deps: If True, the dependencies of packages will not be built first
    :type no_deps: bool
    :param jobs: number of parallel package build jobs
    :type jobs: int
    :param force_cmake: forces invocation of CMake if True, default is False
    :type force_cmake: bool
    :param force_color: forces colored output even if terminal does not support it
    :type force_color: bool
    :param quiet: suppresses the output of commands unless there is an error
    :type quiet: bool
    :param interleave_output: prints the output of commands as they are received
    :type interleave_output: bool
    :param no_status: disables status bar
    :type no_status: bool
    :param lock_install: causes executors to synchronize on access of install commands
    :type lock_install: bool
    :param no_notify: suppresses system notifications
    :type no_notify: bool

    :raises: SystemExit if buildspace is a file or no packages were found in the source space
        or if the provided options are invalid
    """
    # If no_deps is given, ensure packages to build are provided
    if no_deps and packages is None:
        sys.exit("With --no-deps, you must specify packages to build.")
    # Make sure there is a build folder and it is not a file
    if os.path.exists(context.build_space_abs):
        if os.path.isfile(context.build_space_abs):
            sys.exit(clr(
                "@{rf}Error:@| Build space '{0}' exists but is a file and not a folder."
                .format(context.build_space_abs)))
    # If it dosen't exist, create it
    else:
        log("Creating build space directory, '{0}'".format(context.build_space_abs))
        os.makedirs(context.build_space_abs)

    # Check for catkin_make droppings
    if context.corrupted_by_catkin_make():
        sys.exit(
            clr("@{rf}Error:@| Build space `{0}` exists but appears to have previously been "
                "created by the `catkin_make` or `catkin_make_isolated` tool. "
                "Please choose a different directory to use with `catkin build` "
                "or clean the build space.".format(context.build_space_abs)))

    # Declare a buildspace marker describing the build config for error checking
    buildspace_marker_data = {
        'workspace': context.workspace,
        'profile': context.profile,
        'install': context.install,
        'install_space': context.install_space_abs,
        'devel_space': context.devel_space_abs,
        'source_space': context.source_space_abs}

    # Check build config
    if os.path.exists(os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE)):
        with open(os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE)) as buildspace_marker_file:
            existing_buildspace_marker_data = yaml.load(buildspace_marker_file)
            misconfig_lines = ''
            for (k, v) in existing_buildspace_marker_data.items():
                new_v = buildspace_marker_data.get(k, None)
                if new_v != v:
                    misconfig_lines += (
                        '\n - %s: %s (stored) is not %s (commanded)' %
                        (k, v, new_v))
            if len(misconfig_lines) > 0:
                sys.exit(clr(
                    "\n@{rf}Error:@| Attempting to build a catkin workspace using build space: "
                    "\"%s\" but that build space's most recent configuration "
                    "differs from the commanded one in ways which will cause "
                    "problems. Fix the following options or use @{yf}`catkin "
                    "clean -b`@| to remove the build space: %s" %
                    (context.build_space_abs, misconfig_lines)))

    # Write the current build config for config error checking
    with open(os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE), 'w') as buildspace_marker_file:
        buildspace_marker_file.write(yaml.dump(buildspace_marker_data, default_flow_style=False))

    # Summarize the context
    summary_notes = []
    if force_cmake:
        summary_notes += [clr("@!@{cf}NOTE:@| Forcing CMake to run for each package.")]
    log(context.summary(summary_notes))

    # Find list of packages in the workspace
    packages_to_be_built, packages_to_be_built_deps, all_packages = determine_packages_to_be_built(packages, context)
    completed_packages = []
    if no_deps:
        # Consider deps as "completed"
        completed_packages.extend(packages_to_be_built_deps)
    else:
        # Extend packages to be built to include their deps
        packages_to_be_built.extend(packages_to_be_built_deps)
    # Also resort
    packages_to_be_built = topological_order_packages(dict(packages_to_be_built))
    max_package_name_length = max([len(pkg.name) for pth, pkg in packages_to_be_built])
    # Assert start_with package is in the workspace
    verify_start_with_option(start_with, packages, all_packages, packages_to_be_built + packages_to_be_built_deps)

    # Setup pool of executors
    executors = {}
    # The communication queue can have ExecutorEvent's or str's passed into it from the executors
    comm_queue = Queue()
    # The job queue has Jobs put into it
    job_queue = Queue()
    # Lock for install space
    install_lock = Lock() if lock_install else FakeLock()
    # Determine the number of executors
    try:
        if jobs:
            jobs = int(jobs)
            if jobs < 1:
                sys.exit("Specified number of jobs '{0}' is not positive.".format(jobs))
    except ValueError:
        sys.exit("Specified number of jobs '{0}' is no integer.".format(jobs))
    try:
        jobs = cpu_count() if jobs is None else jobs
    except NotImplementedError:
        log('Failed to determine the cpu_count, falling back to 1 jobs as the default.')
        jobs = 1 if jobs is None else jobs
    # If only one set of jobs, turn on interleaving to get more responsive feedback
    if jobs == 1:
        # TODO: make the system more intelligent so that it can automatically switch to streaming output
        #       when only one job is building, even if multiple jobs could be building
        quiet = False
        interleave_output = True
    # Start the executors
    for x in range(jobs):
        e = Executor(x, context, comm_queue, job_queue, install_lock)
        executors[x] = e
        e.start()

    # Variables for tracking running jobs and built/building packages
    start = time.time()
    total_packages = len(packages_to_be_built)
    package_count = 0
    running_jobs = {}
    log_dir = os.path.join(context.build_space_abs, 'build_logs')
    color = True
    if not force_color and not is_tty(sys.stdout):
        color = True
    out = OutputController(log_dir, quiet, interleave_output, color, max_package_name_length, prefix_output=(jobs > 1))
    if no_status:
        disable_wide_log()

    # Prime the job_queue
    ready_packages = []
    if start_with is None:
        ready_packages = get_ready_packages(packages_to_be_built, running_jobs, completed_packages)
    while start_with is not None:
        ready_packages.extend(get_ready_packages(packages_to_be_built, running_jobs, completed_packages))
        while ready_packages:
            pth, pkg = ready_packages.pop(0)
            if pkg.name != start_with:
                completed_packages.append(pkg.name)
                package_count += 1
                wide_log("[build] Skipping package '{0}'".format(pkg.name))
            else:
                ready_packages.insert(0, (pth, pkg))
                start_with = None
                break
    running_jobs = queue_ready_packages(ready_packages, running_jobs, job_queue, context, force_cmake)
    assert running_jobs

    error_state = False
    errors = []

    def set_error_state(error_state):
        if error_state:
            return
        # Set the error state to prevent new jobs
        error_state = True
        # Empty the job queue
        while not job_queue.empty():
            job_queue.get()
        # Kill the executors by sending a None to the job queue for each of them
        for x in range(jobs):
            job_queue.put(None)

    # While any executors are running, process executor events
    while executors:
        try:
            # Try to get an event from the communications queue
            try:
                event = comm_queue.get(True, 0.1)
            except Empty:
                # timeout occured, create null event to pass through checks
                event = ExecutorEvent(None, None, None, None)

            if event.event_type == 'job_started':
                package_count += 1
                running_jobs[event.package]['package_number'] = package_count
                running_jobs[event.package]['start_time'] = time.time()
                out.job_started(event.package)

            if event.event_type == 'command_started':
                out.command_started(event.package, event.data['cmd'], event.data['location'])

            if event.event_type == 'command_log':
                out.command_log(event.package, event.data['message'])

            if event.event_type == 'command_failed':
                out.command_failed(event.package, event.data['cmd'], event.data['location'], event.data['retcode'])
                # Add to list of errors
                errors.append(event)
                # Remove the command from the running jobs
                del running_jobs[event.package]
                # If it hasn't already been done, stop the executors
                set_error_state(error_state)

            if event.event_type == 'command_finished':
                out.command_finished(event.package, event.data['cmd'], event.data['location'], event.data['retcode'])

            if event.event_type == 'job_finished':
                completed_packages.append(event.package)
                run_time = format_time_delta(time.time() - running_jobs[event.package]['start_time'])
                out.job_finished(event.package, run_time)
                del running_jobs[event.package]
                # If shutting down, do not add new packages
                if error_state:
                    continue
                # Calculate new packages
                if not no_status:
                    wide_log('[build] Calculating new jobs...', end='\r')
                    sys.stdout.flush()
                ready_packages = get_ready_packages(packages_to_be_built, running_jobs, completed_packages)
                running_jobs = queue_ready_packages(ready_packages, running_jobs, job_queue, context, force_cmake)
                # Make sure there are jobs to be/being processed, otherwise kill the executors
                if not running_jobs:
                    # Kill the executors by sending a None to the job queue for each of them
                    for x in range(jobs):
                        job_queue.put(None)

            # If an executor exit event, join it and remove it from the executors list
            if event.event_type == 'exit':
                # If an executor has an exception, set the error state
                if event.data['reason'] == 'exception':
                    set_error_state(error_state)
                    errors.append(event)
                # Join and remove it
                executors[event.executor_id].join()
                del executors[event.executor_id]

            if not no_status:
                # Update the status bar on the screen
                executing_jobs = []
                for name, value in running_jobs.items():
                    number, start_time = value['package_number'], value['start_time']
                    if number is None or start_time is None:
                        continue
                    executing_jobs.append({
                        'number': number,
                        'name': name,
                        'run_time': format_time_delta_short(time.time() - start_time)
                    })
                msg = clr("[build - {run_time}] ").format(run_time=format_time_delta_short(time.time() - start))
                # If errors post those
                if errors:
                    for error in errors:
                        msg += clr("[!{package}] ").format(package=error.package)
                # Print them in order of started number
                for job_msg_args in sorted(executing_jobs, key=lambda args: args['number']):
                    msg += clr("[{name} - {run_time}] ").format(**job_msg_args)
                msg_rhs = clr("[{0}/{1} Active | {2}/{3} Completed]").format(
                    len(executing_jobs),
                    len(executors),
                    len(packages) if no_deps else len(completed_packages),
                    total_packages
                )
                # Update title bar
                sys.stdout.write("\x1b]2;[build] {0}/{1}\x07".format(
                    len(packages) if no_deps else len(completed_packages),
                    total_packages
                ))
                # Update status bar
                wide_log(msg, rhs=msg_rhs, end='\r')
                sys.stdout.flush()
        except KeyboardInterrupt:
            wide_log("[build] User interrupted, stopping.")
            set_error_state(error_state)
    # All executors have shutdown
    sys.stdout.write("\x1b]2;\x07")
    if not errors:
        if context.isolate_devel:
            if not context.install:
                _create_unmerged_devel_setup(context)
            else:
                _create_unmerged_devel_setup_for_install(context)
        wide_log("[build] Finished.")
        if not no_notify:
            notify("Build Finished", "{0} packages built".format(total_packages))
        return 0
    else:
        wide_log(clr("[build] There were @!@{rf}errors@|:"))
        if not no_notify:
            notify("Build Failed", "there were {0} errors".format(len(errors)))
        for error in errors:
            if error.event_type == 'exit':
                wide_log("""Executor '{exec_id}' had an unhandle exception while processing package '{package}':

{data[exc]}
""".format(exec_id=error.executor_id + 1, **error.__dict__))
            else:
                wide_log(clr("""
@{rf}Failed@| to build package '@{cf}{package}@|' because the following command:

    @!@{kf}# Command run in directory: @|{location}
    {cmd.cmd_str}

@{rf}Exited@| with return code: @!{retcode}@|""").format(package=error.package, **error.data))
        sys.exit(1)