Beispiel #1
0
    def test_ctor_loop(self):
        loop = mock.Mock()
        lock = asyncio.Lock(loop=loop)
        self.assertIs(lock._loop, loop)

        lock = asyncio.Lock(loop=self.loop)
        self.assertIs(lock._loop, self.loop)
Beispiel #2
0
    def test_release_no_waiters(self):
        lock = asyncio.Lock(loop=self.loop)
        self.loop.run_until_complete(lock.acquire())
        self.assertTrue(lock.locked())

        lock.release()
        self.assertFalse(lock.locked())
Beispiel #3
0
    def test_ambiguous_loops(self):
        loop = self.new_test_loop()
        self.addCleanup(loop.close)

        lock = asyncio.Lock(loop=self.loop)
        with self.assertRaises(ValueError):
            asyncio.Condition(lock, loop=loop)
Beispiel #4
0
    def test_acquire_cancel(self):
        lock = asyncio.Lock(loop=self.loop)
        self.assertTrue(self.loop.run_until_complete(lock.acquire()))

        task = asyncio.Task(lock.acquire(), loop=self.loop)
        self.loop.call_soon(task.cancel)
        self.assertRaises(asyncio.CancelledError, self.loop.run_until_complete,
                          task)
        self.assertFalse(lock._waiters)
Beispiel #5
0
    def __init__(self, connection_factory, max_connections=6):
        assert max_connections > 0, \
            'num must be positive. got {}'.format(max_connections)

        self._connection_factory = connection_factory
        self.max_connections = max_connections
        self.ready = set()
        self.busy = set()
        self._lock = trollius.Lock()
        self._condition = trollius.Condition(lock=self._lock)
        self._closed = False
Beispiel #6
0
    def test_context_manager(self):
        lock = asyncio.Lock(loop=self.loop)

        @asyncio.coroutine
        def acquire_lock():
            raise Return((yield From(lock)))

        with self.loop.run_until_complete(acquire_lock()):
            self.assertTrue(lock.locked())

        self.assertFalse(lock.locked())
Beispiel #7
0
    def test_acquire(self):
        lock = asyncio.Lock(loop=self.loop)
        result = []

        self.assertTrue(self.loop.run_until_complete(lock.acquire()))

        @asyncio.coroutine
        def c1(result):
            if (yield From(lock.acquire())):
                result.append(1)
            raise Return(True)

        @asyncio.coroutine
        def c2(result):
            if (yield From(lock.acquire())):
                result.append(2)
            raise Return(True)

        @asyncio.coroutine
        def c3(result):
            if (yield From(lock.acquire())):
                result.append(3)
            raise Return(True)

        t1 = asyncio.Task(c1(result), loop=self.loop)
        t2 = asyncio.Task(c2(result), loop=self.loop)

        test_utils.run_briefly(self.loop)
        self.assertEqual([], result)

        lock.release()
        test_utils.run_briefly(self.loop)
        self.assertEqual([1], result)

        test_utils.run_briefly(self.loop)
        self.assertEqual([1], result)

        t3 = asyncio.Task(c3(result), loop=self.loop)

        lock.release()
        test_utils.run_briefly(self.loop)
        self.assertEqual([1, 2], result)

        lock.release()
        test_utils.run_briefly(self.loop)
        self.assertEqual([1, 2, 3], result)

        self.assertTrue(t1.done())
        self.assertTrue(t1.result())
        self.assertTrue(t2.done())
        self.assertTrue(t2.result())
        self.assertTrue(t3.done())
        self.assertTrue(t3.result())
Beispiel #8
0
    def test_repr(self):
        lock = asyncio.Lock(loop=self.loop)
        self.assertTrue(repr(lock).endswith('[unlocked]>'))
        self.assertTrue(RGX_REPR.match(repr(lock)))

        @asyncio.coroutine
        def acquire_lock():
            yield From(lock.acquire())

        self.loop.run_until_complete(acquire_lock())
        self.assertTrue(repr(lock).endswith('[locked]>'))
        self.assertTrue(RGX_REPR.match(repr(lock)))
Beispiel #9
0
    def test_context_manager_no_yield(self):
        lock = asyncio.Lock(loop=self.loop)

        try:
            with lock:
                self.fail('RuntimeError is not raised in with expression')
        except RuntimeError as err:
            self.assertEqual(
                str(err),
                '"yield From" should be used as context manager expression')

        self.assertFalse(lock.locked())
Beispiel #10
0
    def test_lock(self):
        lock = asyncio.Lock(loop=self.loop)

        @asyncio.coroutine
        def acquire_lock():
            yield From(lock.acquire())
            raise Return(lock)

        res = self.loop.run_until_complete(acquire_lock())

        self.assertTrue(res)
        self.assertTrue(lock.locked())

        lock.release()
        self.assertFalse(lock.locked())
Beispiel #11
0
    def test_cancel_race(self):
        # Several tasks:
        # - A acquires the lock
        # - B is blocked in aqcuire()
        # - C is blocked in aqcuire()
        #
        # Now, concurrently:
        # - B is cancelled
        # - A releases the lock
        #
        # If B's waiter is marked cancelled but not yet removed from
        # _waiters, A's release() call will crash when trying to set
        # B's waiter; instead, it should move on to C's waiter.

        # Setup: A has the lock, b and c are waiting.
        lock = asyncio.Lock(loop=self.loop)

        @asyncio.coroutine
        def lockit(name, blocker):
            yield From(lock.acquire())
            try:
                if blocker is not None:
                    yield From(blocker)
            finally:
                lock.release()

        fa = asyncio.Future(loop=self.loop)
        ta = asyncio.Task(lockit('A', fa), loop=self.loop)
        test_utils.run_briefly(self.loop, 2)
        self.assertTrue(lock.locked())
        tb = asyncio.Task(lockit('B', None), loop=self.loop)
        test_utils.run_briefly(self.loop, 2)
        self.assertEqual(len(lock._waiters), 1)
        tc = asyncio.Task(lockit('C', None), loop=self.loop)
        test_utils.run_briefly(self.loop, 2)
        self.assertEqual(len(lock._waiters), 2)

        # Create the race and check.
        # Without the fix this failed at the last assert.
        fa.set_result(None)
        tb.cancel()
        self.assertTrue(lock._waiters[0].cancelled())
        test_utils.run_briefly(self.loop, 2)
        self.assertFalse(lock.locked())
        self.assertTrue(ta.done())
        self.assertTrue(tb.cancelled())
        self.assertTrue(tc.done())
Beispiel #12
0
    def test_context_manager_cant_reuse(self):
        lock = asyncio.Lock(loop=self.loop)

        @asyncio.coroutine
        def acquire_lock():
            raise Return((yield From(lock)))

        # This spells "yield From(lock)" outside a generator.
        cm = self.loop.run_until_complete(acquire_lock())
        with cm:
            self.assertTrue(lock.locked())

        self.assertFalse(lock.locked())

        with self.assertRaises(AttributeError):
            with cm:
                pass
Beispiel #13
0
 def __init__(self,
              max_host_count=6,
              resolver=None,
              connection_factory=None,
              ssl_connection_factory=None,
              max_count=100):
     self._max_host_count = max_host_count
     self._resolver = resolver or Resolver()
     self._connection_factory = connection_factory or Connection
     self._ssl_connection_factory = ssl_connection_factory or SSLConnection
     self._max_count = max_count
     self._host_pools = {}
     self._host_pool_waiters = {}
     self._host_pools_lock = trollius.Lock()
     self._release_tasks = set()
     self._closed = False
     self._happy_eyeballs_table = HappyEyeballsTable()
Beispiel #14
0
    def __init__(self, ui, command, command_change_callback):
        self.ui = ui
        self.parent_command = command
        self.command_change_callback = command_change_callback
        self.scales = [
            400.0, 400.0, 50.0, 100.0, 100.0, 100.0, 45.0, 45.0, 45.0
        ]

        self.update_lock = asyncio.Lock()

        self.config = None
        self.command = None

        self.graphics_scene = graphics_scene.GraphicsScene()
        self.graphics_scene.sceneMouseMoveEvent.connect(self.handle_mouse_move)
        self.graphics_scene.sceneMousePressEvent.connect(
            self.handle_mouse_press)
        self.graphics_view = self.ui.gaitCommandView
        self.graphics_view.setTransform(QtGui.QTransform().scale(1, -1))
        self.graphics_view.setScene(self.graphics_scene)

        self.in_scale_changed = BoolContext()

        for combo in [self.ui.commandXCombo, self.ui.commandYCombo]:
            combo.currentIndexChanged.connect(self.handle_axis_change)

        for spin in [self.ui.commandXScaleSpin, self.ui.commandYScaleSpin]:
            spin.valueChanged.connect(self.handle_scale_change)

        self.axes_item = graphics_scene.AxesItem()
        self.graphics_scene.addItem(self.axes_item)

        self.grid_count = 10
        self.usable_rects = {}
        for x in range(-self.grid_count + 1, self.grid_count):
            for y in range(-self.grid_count + 1, self.grid_count):
                self.usable_rects[(x, y)] = \
                    self.graphics_scene.addRect(
                    (x - 0.5) / self.grid_count,
                    (y - 0.5) / self.grid_count,
                    1.0 / self.grid_count, 1.0 / self.grid_count)

        for rect in self.usable_rects.itervalues():
            rect.setPen(QtGui.QPen(QtCore.Qt.NoPen))
            rect.setZValue(-20)
Beispiel #15
0
 def __init__(self, *args, **kwargs):
     kwargs['timeout'] = 0
     self.raw_serial = serial.Serial(*args, **kwargs)
     self.write_lock = asyncio.Lock()
     self.read_lock = asyncio.Lock()
Beispiel #16
0
    def test_explicit_lock(self):
        lock = asyncio.Lock(loop=self.loop)
        cond = asyncio.Condition(lock, loop=self.loop)

        self.assertIs(cond._lock, lock)
        self.assertIs(cond._loop, lock._loop)
Beispiel #17
0
 def test_ctor_noloop(self):
     asyncio.set_event_loop(self.loop)
     lock = asyncio.Lock()
     self.assertIs(lock._loop, self.loop)
Beispiel #18
0
def async_job(verb, job, threadpool, locks, event_queue, log_path):
    """Run a sequence of Stages from a Job and collect their output.

    :param job: A Job instance
    :threadpool: A thread pool executor for blocking stages
    :event_queue: A queue for asynchronous events
    """

    # Initialize success flag
    all_stages_succeeded = True

    # Jobs start occuping a jobserver job
    occupying_job = True

    # Execute each stage of this job
    for stage in job.stages:
        # Logger reference in this scope for error reporting
        logger = None

        # Abort the job if one of the stages has failed
        if job.continue_on_failure and not all_stages_succeeded:
            break

        # Check for stage synchronization lock
        if stage.locked_resource is not None:
            lock = locks.setdefault(stage.locked_resource, asyncio.Lock())
            yield asyncio.From(lock)
        else:
            lock = FakeLock()

        try:
            # If the stage doesn't require a job token, release it temporarily
            if stage.occupy_job:
                if not occupying_job:
                    while job_server.try_acquire() is None:
                        yield asyncio.From(asyncio.sleep(0.05))
                    occupying_job = True
            else:
                if occupying_job:
                    job_server.release()
                    occupying_job = False

            # Notify stage started
            event_queue.put(
                ExecutionEvent('STARTED_STAGE',
                               job_id=job.jid,
                               stage_label=stage.label))

            if type(stage) is CommandStage:
                try:
                    # Initiate the command
                    while True:
                        try:
                            # Update the environment for this stage (respects overrides)
                            stage.update_env(job.env)

                            # Get the logger
                            protocol_type = stage.logger_factory(
                                verb, job.jid, stage.label, event_queue,
                                log_path)
                            # Start asynchroonous execution
                            transport, logger = yield asyncio.From(
                                async_execute_process(
                                    protocol_type,
                                    **stage.async_execute_process_kwargs))
                            break
                        except OSError as exc:
                            if 'Text file busy' in str(exc):
                                # This is a transient error, try again shortly
                                # TODO: report the file causing the problem (exc.filename)
                                yield asyncio.From(asyncio.sleep(0.01))
                                continue
                            raise

                    # Notify that a subprocess has been created
                    event_queue.put(
                        ExecutionEvent('SUBPROCESS',
                                       job_id=job.jid,
                                       stage_label=stage.label,
                                       stage_repro=stage.get_reproduction_cmd(
                                           verb, job.jid),
                                       **stage.async_execute_process_kwargs))

                    # Asynchronously yield until this command is  completed
                    retcode = yield asyncio.From(logger.complete)
                except:
                    logger = IOBufferLogger(verb, job.jid, stage.label,
                                            event_queue, log_path)
                    logger.err(str(traceback.format_exc()))
                    retcode = 3

            elif type(stage) is FunctionStage:
                logger = IOBufferLogger(verb, job.jid, stage.label,
                                        event_queue, log_path)
                try:
                    # Asynchronously yield until this function is completed
                    retcode = yield asyncio.From(get_loop().run_in_executor(
                        threadpool, stage.function, logger, event_queue))
                except:
                    logger.err('Stage `{}` failed with arguments:'.format(
                        stage.label))
                    for arg_val in stage.args:
                        logger.err('  {}'.format(arg_val))
                    for arg_name, arg_val in stage.kwargs.items():
                        logger.err('  {}: {}'.format(arg_name, arg_val))
                    retcode = 3
            else:
                raise TypeError("Bad Job Stage: {}".format(stage))

            # Set whether this stage succeeded
            stage_succeeded = (retcode == 0)

            # Update success tracker from this stage
            all_stages_succeeded = all_stages_succeeded and stage_succeeded

            # Store the results from this stage
            event_queue.put(
                ExecutionEvent('FINISHED_STAGE',
                               job_id=job.jid,
                               stage_label=stage.label,
                               succeeded=stage_succeeded,
                               stdout=logger.get_stdout_log(),
                               stderr=logger.get_stderr_log(),
                               interleaved=logger.get_interleaved_log(),
                               logfile_filename=logger.unique_logfile_name,
                               repro=stage.get_reproduction_cmd(verb, job.jid),
                               retcode=retcode))

            # Close logger
            logger.close()
        finally:
            lock.release()

    # Finally, return whether all stages of the job completed
    raise asyncio.Return(job.jid, all_stages_succeeded)
Beispiel #19
0
    def test_release_not_acquired(self):
        lock = asyncio.Lock(loop=self.loop)

        self.assertRaises(RuntimeError, lock.release)
Beispiel #20
0
def build_isolated_workspace(context,
                             packages=None,
                             start_with=None,
                             no_deps=False,
                             unbuilt=False,
                             n_jobs=None,
                             force_cmake=False,
                             pre_clean=False,
                             force_color=False,
                             quiet=False,
                             interleave_output=False,
                             no_status=False,
                             limit_status_rate=10.0,
                             lock_install=False,
                             no_notify=False,
                             continue_on_failure=False,
                             summarize_build=None,
                             relaxed_constraints=False,
                             influx_url=None,
                             influx_db=None):
    """Builds a catkin workspace in isolation

    This function will find all of the packages in the source space, start some
    executors, feed them packages to build based on dependencies and topological
    ordering, and then monitor the output of the executors, handling loggings of
    the builds, starting builds, failing builds, and finishing builds of
    packages, and handling the shutdown of the executors when appropriate.

    :param context: context in which to build the catkin workspace
    :type context: :py:class:`catkin_tools.verbs.catkin_build.context.Context`
    :param packages: list of packages to build, by default their dependencies will also be built
    :type packages: list
    :param start_with: package to start with, skipping all packages which proceed it in the topological order
    :type start_with: str
    :param no_deps: If True, the dependencies of packages will not be built first
    :type no_deps: bool
    :param n_jobs: number of parallel package build n_jobs
    :type n_jobs: int
    :param force_cmake: forces invocation of CMake if True, default is False
    :type force_cmake: bool
    :param force_color: forces colored output even if terminal does not support it
    :type force_color: bool
    :param quiet: suppresses the output of commands unless there is an error
    :type quiet: bool
    :param interleave_output: prints the output of commands as they are received
    :type interleave_output: bool
    :param no_status: disables status bar
    :type no_status: bool
    :param limit_status_rate: rate to which status updates are limited; the default 0, places no limit.
    :type limit_status_rate: float
    :param lock_install: causes executors to synchronize on access of install commands
    :type lock_install: bool
    :param no_notify: suppresses system notifications
    :type no_notify: bool
    :param continue_on_failure: do not stop building other jobs on error
    :type continue_on_failure: bool
    :param summarize_build: if True summarizes the build at the end, if None and continue_on_failure is True and the
        the build fails, then the build will be summarized, but if False it never will be summarized.
    :type summarize_build: bool
    :param relaxed_constraints If true, do not use exec_deps for topological ordering
    :type relaxed_constraints bool
    :param influx_url Url to access an InfluxDB instance
    :type influx_url string Url of the form user:password@host:port
    :param influx_db Database name in InfluxDB
    :type influx_db string

    :raises: SystemExit if buildspace is a file or no packages were found in the source space
        or if the provided options are invalid
    """
    pre_start_time = time.time()

    # Assert that the limit_status_rate is valid
    if limit_status_rate < 0:
        sys.exit(
            "[build] @!@{rf}Error:@| The value of --status-rate must be greater than or equal to zero."
        )

    # Declare a buildspace marker describing the build config for error checking
    buildspace_marker_data = {
        'workspace': context.workspace,
        'profile': context.profile,
        'install': context.install,
        'install_space': context.install_space_abs,
        'devel_space': context.devel_space_abs,
        'source_space': context.source_space_abs
    }

    # Check build config
    if os.path.exists(
            os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE)):
        with open(os.path.join(
                context.build_space_abs,
                BUILDSPACE_MARKER_FILE)) as buildspace_marker_file:
            existing_buildspace_marker_data = yaml.safe_load(
                buildspace_marker_file)
            misconfig_lines = ''
            for (k, v) in existing_buildspace_marker_data.items():
                new_v = buildspace_marker_data.get(k, None)
                if new_v != v:
                    misconfig_lines += (
                        '\n - %s: %s (stored) is not %s (commanded)' %
                        (k, v, new_v))
            if len(misconfig_lines) > 0:
                sys.exit(
                    clr("\n@{rf}Error:@| Attempting to build a catkin workspace using build space: "
                        "\"%s\" but that build space's most recent configuration "
                        "differs from the commanded one in ways which will cause "
                        "problems. Fix the following options or use @{yf}`catkin "
                        "clean -b`@| to remove the build space: %s" %
                        (context.build_space_abs, misconfig_lines)))

    # Summarize the context
    summary_notes = []
    if force_cmake:
        summary_notes += [
            clr("@!@{cf}NOTE:@| Forcing CMake to run for each package.")
        ]
    log(context.summary(summary_notes))

    # Make sure there is a build folder and it is not a file
    if os.path.exists(context.build_space_abs):
        if os.path.isfile(context.build_space_abs):
            sys.exit(
                clr("[build] @{rf}Error:@| Build space '{0}' exists but is a file and not a folder."
                    .format(context.build_space_abs)))
    # If it dosen't exist, create it
    else:
        log("[build] Creating build space: '{0}'".format(
            context.build_space_abs))
        os.makedirs(context.build_space_abs)

    # Write the current build config for config error checking
    with open(os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE),
              'w') as buildspace_marker_file:
        buildspace_marker_file.write(
            yaml.dump(buildspace_marker_data, default_flow_style=False))

    # Get all the packages in the context source space
    # Suppress warnings since this is a utility function
    workspace_packages = find_packages(context.source_space_abs,
                                       exclude_subspaces=True,
                                       warnings=[])

    # Get packages which have not been built yet
    built_packages, unbuilt_pkgs = get_built_unbuilt_packages(
        context, workspace_packages)

    # Handle unbuilt packages
    if unbuilt:
        # Check if there are any unbuilt
        if len(unbuilt_pkgs) > 0:
            # Add the unbuilt packages
            packages.extend(list(unbuilt_pkgs))
        else:
            log("[build] No unbuilt packages to be built.")
            return

    # If no_deps is given, ensure packages to build are provided
    if no_deps and packages is None:
        log(
            clr("[build] @!@{rf}Error:@| With no_deps, you must specify packages to build."
                ))
        return

    # Find list of packages in the workspace
    packages_to_be_built, packages_to_be_built_deps, all_packages = determine_packages_to_be_built(
        packages, context, workspace_packages)

    if not no_deps:
        # Extend packages to be built to include their deps
        packages_to_be_built.extend(packages_to_be_built_deps)

    # Also re-sort
    try:
        packages_to_be_built = topological_order_packages(
            dict(packages_to_be_built))
    except AttributeError:
        log(
            clr("[build] @!@{rf}Error:@| The workspace packages have a circular "
                "dependency, and cannot be built. Please run `catkin list "
                "--deps` to determine the problematic package(s)."))
        return

    # Check the number of packages to be built
    if len(packages_to_be_built) == 0:
        log(clr('[build] No packages to be built.'))

    # Assert start_with package is in the workspace
    verify_start_with_option(start_with, packages, all_packages,
                             packages_to_be_built)

    # Populate .catkin file if we're not installing
    # NOTE: This is done to avoid the Catkin CMake code from doing it,
    # which isn't parallel-safe. Catkin CMake only modifies this file if
    # it's package source path isn't found.
    if not context.install:
        dot_catkin_file_path = os.path.join(context.devel_space_abs, '.catkin')
        # If the file exists, get the current paths
        if os.path.exists(dot_catkin_file_path):
            dot_catkin_paths = open(dot_catkin_file_path,
                                    'r').read().split(';')
        else:
            dot_catkin_paths = []

        # Update the list with the new packages (in topological order)
        packages_to_be_built_paths = [
            os.path.join(context.source_space_abs, path)
            for path, pkg in packages_to_be_built
        ]

        new_dot_catkin_paths = [
            os.path.join(context.source_space_abs, path) for path in [
                os.path.join(context.source_space_abs, path)
                for path, pkg in all_packages
            ] if path in dot_catkin_paths or path in packages_to_be_built_paths
        ]

        # Write the new file if it's different, otherwise, leave it alone
        if dot_catkin_paths == new_dot_catkin_paths:
            wide_log("[build] Package table is up to date.")
        else:
            wide_log("[build] Updating package table.")
            open(dot_catkin_file_path,
                 'w').write(';'.join(new_dot_catkin_paths))

    # Remove packages before start_with
    if start_with is not None:
        for path, pkg in list(packages_to_be_built):
            if pkg.name != start_with:
                wide_log(
                    clr("@!@{pf}Skipping@|  @{gf}---@| @{cf}{}@|").format(
                        pkg.name))
                packages_to_be_built.pop(0)
            else:
                break

    # Get the names of all packages to be built
    packages_to_be_built_names = [p.name for _, p in packages_to_be_built]
    packages_to_be_built_deps_names = [
        p.name for _, p in packages_to_be_built_deps
    ]

    # Generate prebuild and prebuild clean jobs, if necessary
    prebuild_jobs = {}
    setup_util_present = os.path.exists(
        os.path.join(context.devel_space_abs, '_setup_util.py'))
    catkin_present = 'catkin' in (packages_to_be_built_names +
                                  packages_to_be_built_deps_names)
    catkin_built = 'catkin' in built_packages
    prebuild_built = 'catkin_tools_prebuild' in built_packages

    # Handle the prebuild jobs if the develspace is linked
    prebuild_pkg_deps = []
    if context.link_devel:
        prebuild_pkg = None

        # Construct a dictionary to lookup catkin package by name
        pkg_dict = dict([(pkg.name, (pth, pkg)) for pth, pkg in all_packages])

        if setup_util_present:
            # Setup util is already there, determine if it needs to be
            # regenerated
            if catkin_built:
                if catkin_present:
                    prebuild_pkg_path, prebuild_pkg = pkg_dict['catkin']
            elif prebuild_built:
                if catkin_present:
                    # TODO: Clean prebuild package
                    ct_prebuild_pkg_path = get_prebuild_package(
                        context.build_space_abs, context.devel_space_abs,
                        force_cmake)
                    ct_prebuild_pkg = parse_package(ct_prebuild_pkg_path)

                    prebuild_jobs[
                        'caktin_tools_prebuild'] = create_catkin_clean_job(
                            context,
                            ct_prebuild_pkg,
                            ct_prebuild_pkg_path,
                            dependencies=[],
                            dry_run=False,
                            clean_build=True,
                            clean_devel=True,
                            clean_install=True)

                    # TODO: Build catkin package
                    prebuild_pkg_path, prebuild_pkg = pkg_dict['catkin']
                    prebuild_pkg_deps.append('catkin_tools_prebuild')
            else:
                # How did these get here??
                log("Warning: devel space setup files have an unknown origin.")
        else:
            # Setup util needs to be generated
            if catkin_built or prebuild_built:
                log("Warning: generated devel space setup files have been deleted."
                    )

            if catkin_present:
                # Build catkin package
                prebuild_pkg_path, prebuild_pkg = pkg_dict['catkin']
            else:
                # Generate and buildexplicit prebuild package
                prebuild_pkg_path = get_prebuild_package(
                    context.build_space_abs, context.devel_space_abs,
                    force_cmake)
                prebuild_pkg = parse_package(prebuild_pkg_path)

        if prebuild_pkg is not None:
            # Create the prebuild job
            prebuild_job = create_catkin_build_job(
                context,
                prebuild_pkg,
                prebuild_pkg_path,
                build_dependencies=prebuild_pkg_deps,
                run_dependencies=[],
                force_cmake=force_cmake,
                pre_clean=pre_clean,
                prebuild=True)

            # Add the prebuld job
            prebuild_jobs[prebuild_job.jid] = prebuild_job

    # Remove prebuild jobs from normal job list
    for prebuild_jid, prebuild_job in prebuild_jobs.items():
        if prebuild_jid in packages_to_be_built_names:
            packages_to_be_built_names.remove(prebuild_jid)

    # Initial jobs list is just the prebuild jobs
    jobs = [] + list(prebuild_jobs.values())

    # Get all build type plugins
    build_job_creators = {
        ep.name: ep.load()['create_build_job']
        for ep in pkg_resources.iter_entry_points(group='catkin_tools.jobs')
    }

    # It's a problem if there aren't any build types available
    if len(build_job_creators) == 0:
        sys.exit(
            'Error: No build types available. Please check your catkin_tools installation.'
        )

    # Construct jobs
    for pkg_path, pkg in all_packages:
        if pkg.name not in packages_to_be_built_names:
            continue

        # Ignore metapackages
        if 'metapackage' in [e.tagname for e in pkg.exports]:
            continue

        # Get actual execution deps
        build_deps = [
            p.name for _, p in get_cached_recursive_build_depends_in_workspace(
                pkg, packages_to_be_built) if p.name not in prebuild_jobs
        ]
        build_for_run_deps = [
            p.name for _, p in get_cached_recursive_run_depends_in_workspace(
                pkg, packages_to_be_built) if p.name not in prebuild_jobs
        ]

        # All jobs depend on the prebuild jobs if they're defined
        if not no_deps:
            if relaxed_constraints:
                build_for_run_deps = [
                    p.name for _, p in
                    get_recursive_build_depends_for_run_depends_in_workspace(
                        [pkg], packages_to_be_built)
                    if p.name not in prebuild_jobs
                ]
            else:
                # revert to interpreting all dependencies as build dependencies
                build_deps = list(set(build_deps + build_for_run_deps))
                build_for_run_deps = []

            for j in prebuild_jobs.values():
                build_deps.append(j.jid)

        # Determine the job parameters
        build_job_kwargs = dict(context=context,
                                package=pkg,
                                package_path=pkg_path,
                                build_dependencies=build_deps,
                                run_dependencies=build_for_run_deps,
                                force_cmake=force_cmake,
                                pre_clean=pre_clean)

        # Create the job based on the build type
        build_type = get_build_type(pkg)

        if build_type in build_job_creators:
            jobs.append(build_job_creators[build_type](**build_job_kwargs))
        else:
            wide_log(
                clr("[build] @!@{yf}Warning:@| Skipping package `{}` because it "
                    "has an unsupported package build type: `{}`").format(
                        pkg.name, build_type))

            wide_log(clr("[build] Note: Available build types:"))
            for bt_name in build_job_creators.keys():
                wide_log(clr("[build]  - `{}`".format(bt_name)))

    # Queue for communicating status
    event_queue = Queue()

    status_queue = Queue()
    monitoring_queue = Queue()

    class ForwardingQueue(threading.Thread):
        def __init__(self, queues):
            super(ForwardingQueue, self).__init__()
            self.keep_running = True
            self.queues = queues

        def run(self):
            while self.keep_running:
                event = event_queue.get(True)
                for queue in self.queues:
                    queue.put(event)
                if event is None:
                    break

    queue_thread = ForwardingQueue([status_queue, monitoring_queue])

    threads = [queue_thread]

    try:
        # Spin up status output thread
        status_thread = ConsoleStatusController(
            'build', ['package', 'packages'],
            jobs,
            n_jobs, [pkg.name for _, pkg in context.packages],
            [p for p in context.whitelist], [p for p in context.blacklist],
            status_queue,
            show_notifications=not no_notify,
            show_active_status=not no_status,
            show_buffered_stdout=not quiet and not interleave_output,
            show_buffered_stderr=not interleave_output,
            show_live_stdout=interleave_output,
            show_live_stderr=interleave_output,
            show_stage_events=not quiet,
            show_full_summary=(summarize_build is True),
            pre_start_time=pre_start_time,
            active_status_rate=limit_status_rate)
        threads.append(status_thread)

        if influx_db is not None:
            if not have_influx_db:
                sys.exit(
                    "[build] @!@{rf}Error:@| InfluxDB monitoring is not possible, cannot import influxdb"
                )

            match = re.match('^(.+):(.+)@(.+):(.+)$', influx_url)
            if not match:
                sys.exit(
                    "[build] @!@{rf}Error:@| The value of --influx has to be of the form username:password@host:port"
                )
            username, password, host, port = match.groups()

            influxdb_thread = InfluxDBStatusController(monitoring_queue,
                                                       influx_db, host, port,
                                                       username, password)

            threads.append(influxdb_thread)

        for thread in threads:
            thread.start()

        # Initialize locks
        locks = {
            'installspace': asyncio.Lock() if lock_install else FakeLock()
        }

        # Block while running N jobs asynchronously
        try:
            all_succeeded = run_until_complete(
                execute_jobs('build',
                             jobs,
                             locks,
                             event_queue,
                             context.log_space_abs,
                             max_toplevel_jobs=n_jobs,
                             continue_on_failure=continue_on_failure,
                             continue_without_deps=False,
                             relaxed_constraints=relaxed_constraints))
        except Exception:
            all_succeeded = False
            for thread in threads:
                thread.keep_running = False
            for thread in threads:
                thread.join(1.0)
            wide_log(str(traceback.format_exc()))

        event_queue.put(None)

        for thread in threads:
            thread.join(1.0)

        # Warn user about new packages
        now_built_packages, now_unbuilt_pkgs = get_built_unbuilt_packages(
            context, workspace_packages)
        new_pkgs = [p for p in unbuilt_pkgs if p not in now_unbuilt_pkgs]
        if len(new_pkgs) > 0:
            log(
                clr("[build] @/@!Note:@| @/Workspace packages have changed, "
                    "please re-source setup files to use them.@|"))

        if all_succeeded:
            # Create isolated devel setup if necessary
            if context.isolate_devel:
                if not context.install:
                    _create_unmerged_devel_setup(context, now_unbuilt_pkgs)
                else:
                    _create_unmerged_devel_setup_for_install(context)
            return 0
        else:
            return 1

    except KeyboardInterrupt:
        wide_log("[build] Interrupted by user!")
        event_queue.put(None)

        return 130  # EOWNERDEAD return code is not part of the errno module.