def __init__(self, port, host, num_executors=10):
        """
        :param port: The port number the slave service is running on
        :type port: int
        :param host: The hostname at which the slave is reachable
        :type host: str
        :param num_executors: The number of executors this slave should operate with -- this determines how many
            concurrent subjobs the slave can execute.
        :type num_executors: int
        """
        self.port = port
        self.host = host
        self._slave_id = None
        self._num_executors = num_executors
        self._logger = log.get_logger(__name__)

        self._idle_executors = Queue(maxsize=num_executors)
        self.executors = {}
        for executor_id in range(num_executors):
            executor = SubjobExecutor(executor_id)
            self._idle_executors.put(executor)
            self.executors[executor_id] = executor

        self._setup_complete_event = Event()
        self._master_url = None
        self._network = Network(min_connection_poolsize=num_executors)
        self._master_api = None  # wait until we connect to a master first

        self._project_type = None  # this will be instantiated during build setup
        self._current_build_id = None

        UnhandledExceptionHandler.singleton().add_teardown_callback(self._async_teardown_build,
                                                                    should_disconnect_from_master=True)
Beispiel #2
0
    def connect_to_master(self, master_url=None):
        """
        Notify the master that this slave exists.

        :param master_url: The URL of the master service. If none specified, defaults to localhost:43000.
        :type master_url: str | None
        """
        self.is_alive = True
        self._master_url = master_url or 'localhost:43000'
        self._master_api = UrlBuilder(self._master_url)
        connect_url = self._master_api.url('slave')
        data = {
            'slave': '{}:{}'.format(self.host, self.port),
            'num_executors': self._num_executors,
        }
        response = self._network.post(connect_url, data=data)
        self._slave_id = int(response.json().get('slave_id'))
        self._logger.info('Slave {}:{} connected to master on {}.', self.host,
                          self.port, self._master_url)

        # We disconnect from the master before build_teardown so that the master stops sending subjobs. (Teardown
        # callbacks are executed in the reverse order that they're added, so we add the build_teardown callback first.)
        UnhandledExceptionHandler.singleton().add_teardown_callback(
            self._do_build_teardown_and_reset, timeout=30)
        UnhandledExceptionHandler.singleton().add_teardown_callback(
            self._disconnect_from_master)
    def setUp(self):
        super().setUp()
        self._set_up_safe_guards()

        # Reset singletons so that they get recreated for every test that uses them.
        Configuration.reset_singleton()
        UnhandledExceptionHandler.reset_singleton()

        # Explicitly initialize UnhandledExceptionHandler singleton here (on the main thread) since it sets up signal
        # handlers that must execute on the main thread.
        UnhandledExceptionHandler.singleton()

        MasterConfigLoader().configure_defaults(Configuration.singleton())
        MasterConfigLoader().configure_postload(Configuration.singleton())
        self.patch('app.util.conf.master_config_loader.MasterConfigLoader.load_from_config_file')

        # Configure logging to go to stdout. This makes debugging easier by allowing us to see logs for failed tests.
        log.configure_logging('DEBUG')
        # Then stub out configure_logging so we don't end up logging to real files during testing.
        self.patch('app.util.log.configure_logging')

        # Set up TestHandler. This allows asserting on log messages in tests.
        self.log_handler = logbook.TestHandler(bubble=True)
        self.log_handler.push_application()

        self._base_setup_called = True
Beispiel #4
0
 def initialize_unhandled_exception_handler():
     # Note: Unfortunately we can't use `self.assertRaises` here since this executes on a different thread.
     # todo: After exceptions in test threads are being caught, simplify this test to use self.assertRaises.
     UnhandledExceptionHandler.reset_singleton()
     try:
         UnhandledExceptionHandler.singleton()
     except Exception:
         nonlocal exception_raised
         exception_raised = True
    def test_handles_platform_does_not_support_SIGINFO(self):
        UnhandledExceptionHandler.reset_singleton()
        mock_signal = self.patch('app.util.unhandled_exception_handler.signal')

        def register_signal_handler(sig, _):
            if sig == process_utils.SIGINFO:
                raise ValueError
        mock_signal.signal.side_effect = register_signal_handler
        UnhandledExceptionHandler.singleton()
    def _write_pid_file(self, filename):
        fs.write_file(str(os.getpid()), filename)

        def remove_pid_file():
            try:
                os.remove(filename)
            except OSError:
                pass
        UnhandledExceptionHandler.singleton().add_teardown_callback(remove_pid_file)
 def initialize_unhandled_exception_handler():
     # Note: Unfortunately we can't use `self.assertRaises` here since this executes on a different thread.
     # todo: After exceptions in test threads are being caught, simplify this test to use self.assertRaises.
     UnhandledExceptionHandler.reset_singleton()
     try:
         UnhandledExceptionHandler.singleton()
     except Exception:
         nonlocal exception_raised
         exception_raised = True
Beispiel #8
0
 def _execute_git_remote_command_subprocess(self, *args, **kwargs):
     """
     This is the entry point for the subprocess that executes git remote commands using pexpect. Args and kwargs are
     passed directly through to self._execute_git_remote_command().
     """
     # Reset signal handlers -- unhandled exceptions will be handled by the main process when the subprocess dies.
     UnhandledExceptionHandler.reset_signal_handlers()
     self._close_unneeded_network_connections()
     self._execute_git_remote_command(*args, **kwargs)
Beispiel #9
0
    def setUp(self):
        super().setUp()
        self.addCleanup(patch.stopall)

        self._patched_items = {}
        self._blacklist_methods_not_allowed_in_unit_tests()

        # Stub out a few library dependencies that launch subprocesses.
        self.patch(
            'app.util.autoversioning.get_version').return_value = '0.0.0'
        self.patch('app.util.conf.base_config_loader.platform.node'
                   ).return_value = self._fake_hostname

        if self._do_network_mocks:
            # requests.Session() also makes some subprocess calls on instantiation.
            self.patch('app.util.network.requests.Session')
            # Stub out Network.are_hosts_same() call with a simple string comparison.
            self.patch('app.util.network.Network.are_hosts_same',
                       new=lambda host_a, host_b: host_a == host_b)

        # Reset singletons so that they get recreated for every test that uses them.
        Configuration.reset_singleton()
        UnhandledExceptionHandler.reset_singleton()
        SlaveRegistry.reset_singleton()

        # Explicitly initialize UnhandledExceptionHandler singleton here (on the main thread) since it sets up signal
        # handlers that must execute on the main thread.
        UnhandledExceptionHandler.singleton()

        MasterConfigLoader().configure_defaults(Configuration.singleton())
        MasterConfigLoader().configure_postload(Configuration.singleton())
        self.patch(
            'app.util.conf.master_config_loader.MasterConfigLoader.load_from_config_file'
        )

        # Reset counters
        Slave._slave_id_counter = Counter()
        Build._build_id_counter = Counter()
        analytics._event_id_generator = Counter()

        # Configure logging to go to stdout. This makes debugging easier by allowing us to see logs for failed tests.
        log.configure_logging('DEBUG')
        # Then stub out configure_logging so we don't end up logging to real files during testing.
        self.patch('app.util.log.configure_logging')

        # Set up TestHandler. This allows asserting on log messages in tests.
        self.log_handler = logbook.TestHandler(bubble=True)
        self.log_handler.push_application()

        self._base_setup_called = True
    def test_calling_kill_subprocesses_will_break_out_of_command_execution_wait_loop(
            self):
        self._mock_stdout_and_stderr(b'fake_output', b'fake_error')
        self.mock_popen.pid = 55555
        self._simulate_hanging_popen_process()

        project_type = ProjectType()
        command_thread = SafeThread(
            target=project_type.execute_command_in_project,
            args=('echo The power is yours!', ))

        # This calls execute_command_in_project() on one thread, and calls kill_subprocesses() on another. The
        # kill_subprocesses() call should cause the first thread to exit.
        command_thread.start()
        project_type.kill_subprocesses()

        # This *should* join immediately, but we specify a timeout just in case something goes wrong so that the test
        # doesn't hang. A successful join implies success. We also use the UnhandledExceptionHandler so that exceptions
        # propagate from the child thread to the test thread and fail the test.
        with UnhandledExceptionHandler.singleton():
            command_thread.join(timeout=10)
            if command_thread.is_alive():
                self.mock_killpg(
                )  # Calling killpg() causes the command thread to end.
                self.fail(
                    'project_type.kill_subprocesses should cause the command execution wait loop to exit.'
                )

        self.mock_killpg.assert_called_once_with(
            55555, ANY)  # Note: os.killpg does not accept keyword args.
    def test_executing_build_teardown_multiple_times_will_raise_exception(self):
        self.mock_network.post().status_code = http.client.OK
        slave = self._create_cluster_slave()
        project_type_mock = self.patch('app.slave.cluster_slave.util.create_project_type').return_value
        # This test uses setup_complete_event to detect when the async fetch_project() has executed.
        setup_complete_event = Event()
        project_type_mock.fetch_project.side_effect = self.no_args_side_effect(setup_complete_event.set)
        # This test uses teardown_event to cause a thread to block on the teardown_build() call.
        teardown_event = Event()
        project_type_mock.teardown_build.side_effect = self.no_args_side_effect(teardown_event.wait)

        slave.connect_to_master(self._FAKE_MASTER_URL)
        slave.setup_build(build_id=123, project_type_params={'type': 'Fake'}, build_executor_start_index=0)
        self.assertTrue(setup_complete_event.wait(timeout=5), 'Build setup should complete very quickly.')

        # Start the first thread that does build teardown. This thread will block on teardown_build().
        first_thread = SafeThread(target=slave._do_build_teardown_and_reset)
        first_thread.start()
        # Call build teardown() again and it should raise an exception.
        with self.assertRaises(BuildTeardownError):
            slave._do_build_teardown_and_reset()

        # Cleanup: Unblock the first thread and let it finish. We use the unhandled exception handler just in case any
        # exceptions occurred on the thread (so that they'd be passed back to the main thread and fail the test).
        teardown_event.set()
        with UnhandledExceptionHandler.singleton():
            first_thread.join()
Beispiel #12
0
def main(args):
    """
    This is the single entry point of the ClusterRunner application. This function feeds the command line parameters as
    keyword args directly into the run() method of the appropriate Subcommand subclass.

    Note that for the master and slave subcommands, we execute all major application logic (including the web server)
    on a separate thread (not the main thread). This frees up the main thread to be responsible for facilitating
    graceful application shutdown by intercepting external signals and executing teardown handlers.
    """
    parsed_args = _parse_args(args)
    _initialize_configuration(parsed_args.pop('subcommand'),
                              parsed_args.pop('config_file'))
    subcommand_class = parsed_args.pop(
        'subcommand_class'
    )  # defined in _parse_args() by subparser.set_defaults()

    try:
        unhandled_exception_handler = UnhandledExceptionHandler.singleton()
        with unhandled_exception_handler:
            subcommand_class().run(**parsed_args)

    finally:
        # The force kill countdown is not an UnhandledExceptionHandler teardown callback because we want it to execute
        # in all situations (not only when there is an unhandled exception).
        _start_app_force_kill_countdown(seconds=10)
Beispiel #13
0
    def test_executing_build_teardown_multiple_times_will_raise_exception(
            self):
        self.mock_network.post().status_code = http.client.OK
        slave = self._create_cluster_slave()
        project_type_mock = self.patch(
            'app.slave.cluster_slave.util.create_project_type').return_value
        # This test uses setup_complete_event to detect when the async fetch_project() has executed.
        setup_complete_event = Event()
        project_type_mock.fetch_project.side_effect = self.no_args_side_effect(
            setup_complete_event.set)
        # This test uses teardown_event to cause a thread to block on the teardown_build() call.
        teardown_event = Event()
        project_type_mock.teardown_build.side_effect = self.no_args_side_effect(
            teardown_event.wait)

        slave.connect_to_master(self._FAKE_MASTER_URL)
        slave.setup_build(build_id=123, project_type_params={'type': 'Fake'})
        self.assertTrue(setup_complete_event.wait(timeout=5),
                        'Build setup should complete very quickly.')

        # Start the first thread that does build teardown. This thread will block on teardown_build().
        first_thread = SafeThread(target=slave._do_build_teardown_and_reset)
        first_thread.start()
        # Call build teardown() again and it should raise an exception.
        with self.assertRaises(BuildTeardownError):
            slave._do_build_teardown_and_reset()

        # Cleanup: Unblock the first thread and let it finish. We use the unhandled exception handler just in case any
        # exceptions occurred on the thread (so that they'd be passed back to the main thread and fail the test).
        teardown_event.set()
        with UnhandledExceptionHandler.singleton():
            first_thread.join()
    def _start_application(self, application, port):
        # Note: No significant application logic should be executed before this point. The call to application.listen()
        # will raise an exception if another process is using the same port. We rely on this exception to force us to
        # exit if there are any port conflicts.
        try:
            application.listen(port, '0.0.0.0')
        except OSError:
            self._logger.error('Could not start application on port {}. Is port already in use?'.format(port))
            sys.exit(1)

        ioloop = tornado.ioloop.IOLoop.instance()

        # add a teardown callback that will stop the tornado server
        stop_tornado_ioloop = functools.partial(ioloop.add_callback, callback=ioloop.stop)
        UnhandledExceptionHandler.singleton().add_teardown_callback(stop_tornado_ioloop)
        return ioloop
    def test_calling_kill_subprocesses_will_break_out_of_command_execution_wait_loop(self):

        def fake_communicate(timeout=None):
            # The fake implementation is that communicate() times out forever until os.killpg is called.
            if mock_killpg.call_count == 0 and timeout is not None:
                raise TimeoutExpired(None, timeout)
            elif mock_killpg.call_count > 0:
                return b'fake output', b'fake error'
            self.fail('Popen.communicate() should not be called without a timeout before os.killpg has been called.')

        mock_killpg = self.patch('os.killpg')
        self.mock_popen.communicate.side_effect = fake_communicate
        self.mock_popen.returncode = 1
        self.mock_popen.pid = 55555
        project_type = ProjectType()
        command_thread = SafeThread(target=project_type.execute_command_in_project, args=('echo The power is yours!',))

        # This calls execute_command_in_project() on one thread, and calls kill_subprocesses() on another. The
        # kill_subprocesses() call should cause the first thread to exit.
        command_thread.start()
        project_type.kill_subprocesses()

        # This *should* join immediately, but we specify a timeout just in case something goes wrong so that the test
        # doesn't hang. A successful join implies success. We also use the UnhandledExceptionHandler so that exceptions
        # propagate from the child thread to the test thread and fail the test.
        with UnhandledExceptionHandler.singleton():
            command_thread.join(timeout=10)
            if command_thread.is_alive():
                mock_killpg()  # Calling killpg() causes the command thread to end.
                self.fail('project_type.kill_subprocesses should cause the command execution wait loop to exit.')

        mock_killpg.assert_called_once_with(pgid=55555, sig=ANY)
 def trigger_graceful_app_shutdown(self):
     """
     Helper method to easily trigger graceful shutdown. The side effect of this is that all teardown handlers that
     are registered with UnhandledExceptionHandler will be executed. This method will raise a SystemExit if an
     exception is raised in any of the teardown handlers.
     """
     with UnhandledExceptionHandler.singleton():
         raise _GracefulShutdownTrigger
Beispiel #17
0
    def _start_build(self):
        """
        Send the build request to the master for execution.
        """
        build_url = self._master_api.url('build')
        # todo: catch connection error
        response = self._network.post_with_digest(build_url, self._request_params, self._secret, error_on_failure=True)
        response_data = response.json()

        if 'error' in response_data:
            error_message = response_data['error']
            raise _BuildRunnerError('Error starting build: ' + error_message)

        self._build_id = response_data['build_id']

        UnhandledExceptionHandler.singleton().add_teardown_callback(self._cancel_build)
        self._logger.info('Build is running. (Build id: {})', self._build_id)
 def trigger_graceful_app_shutdown(self):
     """
     Helper method to easily trigger graceful shutdown. The side effect of this is that all teardown handlers that
     are registered with UnhandledExceptionHandler will be executed. This method will raise a SystemExit if an
     exception is raised in any of the teardown handlers.
     """
     with UnhandledExceptionHandler.singleton():
         raise _GracefulShutdownTrigger
    def setUp(self):
        super().setUp()
        self.addCleanup(patch.stopall)

        self._patched_items = {}
        self._blacklist_methods_not_allowed_in_unit_tests()

        # Stub out a few library dependencies that launch subprocesses.
        self.patch('app.util.autoversioning.get_version').return_value = '0.0.0'
        self.patch('app.util.conf.base_config_loader.platform.node').return_value = self._fake_hostname

        if self._do_network_mocks:
            # requests.Session() also makes some subprocess calls on instantiation.
            self.patch('app.util.network.requests.Session')
            # Stub out Network.are_hosts_same() call with a simple string comparison.
            self.patch('app.util.network.Network.are_hosts_same', new=lambda host_a, host_b: host_a == host_b)

        # Reset singletons so that they get recreated for every test that uses them.
        Configuration.reset_singleton()
        UnhandledExceptionHandler.reset_singleton()
        SlaveRegistry.reset_singleton()

        # Explicitly initialize UnhandledExceptionHandler singleton here (on the main thread) since it sets up signal
        # handlers that must execute on the main thread.
        UnhandledExceptionHandler.singleton()

        MasterConfigLoader().configure_defaults(Configuration.singleton())
        MasterConfigLoader().configure_postload(Configuration.singleton())
        self.patch('app.util.conf.master_config_loader.MasterConfigLoader.load_from_config_file')

        # Reset counters
        Slave._slave_id_counter = Counter()
        Build._build_id_counter = Counter()
        analytics._event_id_generator = Counter()

        # Configure logging to go to stdout. This makes debugging easier by allowing us to see logs for failed tests.
        log.configure_logging('DEBUG')
        # Then stub out configure_logging so we don't end up logging to real files during testing.
        self.patch('app.util.log.configure_logging')

        # Set up TestHandler. This allows asserting on log messages in tests.
        self.log_handler = logbook.TestHandler(bubble=True)
        self.log_handler.push_application()

        self._base_setup_called = True
    def _start_application(self, application, port):
        # Note: No significant application logic should be executed before this point. The call to application.listen()
        # will raise an exception if another process is using the same port. We rely on this exception to force us to
        # exit if there are any port conflicts.
        try:
            # If SSL cert and key files are provided in configuration, ClusterRunner wil start with HTTPS protocol.
            # Otherwise ClusterRunner will start with HTTP protocol.
            server = HTTPServer(application, ssl_options=self._get_https_options())
            server.listen(port, '0.0.0.0')
        except OSError:
            self._logger.error('Could not start application on port {}. Is port already in use?'.format(port))
            sys.exit(1)

        ioloop = tornado.ioloop.IOLoop.instance()

        # add a teardown callback that will stop the tornado server
        stop_tornado_ioloop = functools.partial(ioloop.add_callback, callback=ioloop.stop)
        UnhandledExceptionHandler.singleton().add_teardown_callback(stop_tornado_ioloop)
        return ioloop
Beispiel #21
0
    def _execute_git_remote_command_subprocess(self, command, cwd, timeout, log_msg_queue):
        """
        :type command: str
        :type cwd: str|None
        :type timeout: int
        :type log_msg_queue: multiprocessing.queues.Queue

        This is the entry point for the subprocess that executes git remote commands using pexpect. Args and kwargs are
        passed directly through to self._execute_git_remote_command().
        """
        try:
            # Reset signal handlers -- unhandled exceptions will be handled by the main process when the subprocess dies.
            UnhandledExceptionHandler.reset_signal_handlers()
            log_msg_queue.put(('INFO', 'Closing unneeded network connections...', ()))
            self._close_unneeded_network_connections()
            log_msg_queue.put(('INFO', 'Closed network connections, executing git remote command...', ()))
            self._execute_git_remote_command(command, cwd, timeout, log_msg_queue)
        except Exception as exception:
            log_msg_queue.put(('ERROR', 'Exception in _execute_git_remote_command_subprocess {}', (exception,)))
            raise exception
Beispiel #22
0
    def _start_application(self, application, port):
        # Note: No significant application logic should be executed before this point. The call to application.listen()
        # will raise an exception if another process is using the same port. We rely on this exception to force us to
        # exit if there are any port conflicts.
        try:
            application.listen(port, '0.0.0.0')
        except OSError:
            self._logger.error(
                'Could not start application on port {}. Is port already in use?'
                .format(port))
            sys.exit(1)

        ioloop = tornado.ioloop.IOLoop.instance()

        # add a teardown callback that will stop the tornado server
        stop_tornado_ioloop = functools.partial(ioloop.add_callback,
                                                callback=ioloop.stop)
        UnhandledExceptionHandler.singleton().add_teardown_callback(
            stop_tornado_ioloop)
        return ioloop
    def connect_to_master(self, master_url=None):
        """
        Notify the master that this slave exists.

        :param master_url: The URL of the master service. If none specified, defaults to localhost:43000.
        :type master_url: str | None
        """
        self.is_alive = True
        self._master_url = master_url or "localhost:43000"
        self._master_api = UrlBuilder(self._master_url)
        connect_url = self._master_api.url("slave")
        data = {"slave": "{}:{}".format(self.host, self.port), "num_executors": self._num_executors}
        response = self._network.post(connect_url, data=data)
        self._slave_id = int(response.json().get("slave_id"))
        self._logger.info("Slave {}:{} connected to master on {}.", self.host, self.port, self._master_url)

        # We disconnect from the master before build_teardown so that the master stops sending subjobs. (Teardown
        # callbacks are executed in the reverse order that they're added, so we add the build_teardown callback first.)
        UnhandledExceptionHandler.singleton().add_teardown_callback(self._do_build_teardown_and_reset, timeout=30)
        UnhandledExceptionHandler.singleton().add_teardown_callback(self._disconnect_from_master)
Beispiel #24
0
    def _start_build(self):
        """
        Send the build request to the master for execution.
        """
        build_url = self._master_api.url('build')
        # todo: catch connection error
        response = self._network.post_with_digest(build_url,
                                                  self._request_params,
                                                  self._secret,
                                                  error_on_failure=True)
        response_data = response.json()

        if 'error' in response_data:
            error_message = response_data['error']
            raise _BuildRunnerError('Error starting build: ' + error_message)

        self._build_id = response_data['build_id']

        UnhandledExceptionHandler.singleton().add_teardown_callback(
            self._cancel_build)
        self._logger.info('Build is running. (Build id: {})', self._build_id)
    def test_exception_on_safe_thread_calls_teardown_callbacks(self):
        my_awesome_teardown_callback = MagicMock()
        unhandled_exception_handler = UnhandledExceptionHandler.singleton()
        unhandled_exception_handler.add_teardown_callback(my_awesome_teardown_callback, 'fake arg', fake_kwarg='boop')

        def my_terrible_method():
            raise Exception('Sic semper tyrannis!')

        thread = SafeThread(target=my_terrible_method)
        thread.start()
        thread.join()

        my_awesome_teardown_callback.assert_called_once_with('fake arg', fake_kwarg='boop')
    def test_normal_execution_on_safe_thread_does_not_call_teardown_callbacks(self):
        my_lonely_teardown_callback = MagicMock()
        unhandled_exception_handler = UnhandledExceptionHandler.singleton()
        unhandled_exception_handler.add_teardown_callback(my_lonely_teardown_callback)

        def my_fantastic_method():
            print('Veritas vos liberabit!')

        thread = SafeThread(target=my_fantastic_method)
        thread.start()
        thread.join()

        self.assertFalse(my_lonely_teardown_callback.called,
                         'The teardown callback should not be called unless an exception is raised.')
Beispiel #27
0
    def __init__(self):
        self._logger = get_logger(__name__)
        self._master_results_path = Configuration['results_directory']
        self._all_slaves_by_url = {}
        self._scheduler_pool = BuildSchedulerPool()
        self._build_request_handler = BuildRequestHandler(self._scheduler_pool)
        self._build_request_handler.start()
        self._slave_allocator = SlaveAllocator(self._scheduler_pool)
        self._slave_allocator.start()

        # Initialize the database connection before we initialize a BuildStore
        Connection.create(Configuration['database_url'])
        UnhandledExceptionHandler.singleton().add_teardown_callback(
            BuildStore.clean_up)

        # The best practice for determining the number of threads to use is
        # the number of threads per core multiplied by the number of physical
        # cores. So for example, with 10 cores, 2 sockets and 2 per core, the
        # max would be 40.
        #
        # Currently we use threads for incrementing/decrementing slave executor
        # counts (lock acquisition) and tearing down the slave (network IO). 32 threads should be
        # plenty for these tasks. In the case of heavy load, the bottle neck will be the number
        # of executors, not the time it takes to lock/unlock the executor counts or the number of
        # teardown requests. Tweak the number to find the sweet spot if you feel this is the case.
        self._thread_pool_executor = ThreadPoolExecutor(max_workers=32)

        # Asynchronously delete (but immediately rename) all old builds when master starts.
        # Remove this if/when build numbers are unique across master starts/stops
        # TODO: We can remove this code since we persist builds across master restarts
        # if os.path.exists(self._master_results_path):
        #     fs.async_delete(self._master_results_path)
        # fs.create_dir(self._master_results_path)

        SlavesCollector.register_slaves_metrics_collector(
            lambda: self.all_slaves_by_id().values())
    def test_exception_on_safe_thread_calls_teardown_callbacks(self):
        my_awesome_teardown_callback = MagicMock()
        unhandled_exception_handler = UnhandledExceptionHandler.singleton()
        unhandled_exception_handler.add_teardown_callback(
            my_awesome_teardown_callback, 'fake arg', fake_kwarg='boop')

        def my_terrible_method():
            raise Exception('Sic semper tyrannis!')

        thread = SafeThread(target=my_terrible_method)
        thread.start()
        thread.join()

        my_awesome_teardown_callback.assert_called_once_with('fake arg',
                                                             fake_kwarg='boop')
Beispiel #29
0
def main(args):
    """
    This is the single entry point of the ClusterRunner application. This function feeds the command line parameters as
    keyword args directly into the subcommand entry point functions above.

    Note that we execute most application logic off of the main thread. This frees up the main thread to be responsible
    for facilitating a graceful application shutdown by intercepting external signals (currently only SIGINT/Ctrl-C)
    and executing teardown handlers.
    """
    parsed_args = _parse_args(args)
    _initialize_configuration(parsed_args.pop('subcommand'), parsed_args.pop('config_file'))
    subcommand_class = parsed_args.pop('subcommand_class')  # defined in _parse_args() by subparser.set_defaults()

    unhandled_exception_handler = UnhandledExceptionHandler.singleton()
    with unhandled_exception_handler:
        subcommand_class().run(**parsed_args)
    def test_normal_execution_on_safe_thread_does_not_call_teardown_callbacks(
            self):
        my_lonely_teardown_callback = MagicMock()
        unhandled_exception_handler = UnhandledExceptionHandler.singleton()
        unhandled_exception_handler.add_teardown_callback(
            my_lonely_teardown_callback)

        def my_fantastic_method():
            print('Veritas vos liberabit!')

        thread = SafeThread(target=my_fantastic_method)
        thread.start()
        thread.join()

        self.assertFalse(
            my_lonely_teardown_callback.called,
            'The teardown callback should not be called unless an exception is raised.'
        )
    def test_calling_kill_subprocesses_will_break_out_of_command_execution_wait_loop(self):
        self._mock_out_popen_communicate()

        project_type = ProjectType()
        command_thread = SafeThread(target=project_type.execute_command_in_project, args=('echo The power is yours!',))

        # This calls execute_command_in_project() on one thread, and calls kill_subprocesses() on another. The
        # kill_subprocesses() call should cause the first thread to exit.
        command_thread.start()
        project_type.kill_subprocesses()

        # This *should* join immediately, but we specify a timeout just in case something goes wrong so that the test
        # doesn't hang. A successful join implies success. We also use the UnhandledExceptionHandler so that exceptions
        # propagate from the child thread to the test thread and fail the test.
        with UnhandledExceptionHandler.singleton():
            command_thread.join(timeout=10)
            if command_thread.is_alive():
                self.mock_killpg()  # Calling killpg() causes the command thread to end.
                self.fail('project_type.kill_subprocesses should cause the command execution wait loop to exit.')

        self.mock_killpg.assert_called_once_with(55555, ANY)  # Note: os.killpg does not accept keyword args.
Beispiel #32
0
def main(args):
    """
    This is the single entry point of the ClusterRunner application. This function feeds the command line parameters as
    keyword args directly into the run() method of the appropriate Subcommand subclass.

    Note that for the master and slave subcommands, we execute all major application logic (including the web server)
    on a separate thread (not the main thread). This frees up the main thread to be responsible for facilitating
    graceful application shutdown by intercepting external signals and executing teardown handlers.
    """
    parsed_args = _parse_args(args)
    _initialize_configuration(parsed_args.pop('subcommand'), parsed_args.pop('config_file'))
    subcommand_class = parsed_args.pop('subcommand_class')  # defined in _parse_args() by subparser.set_defaults()

    try:
        unhandled_exception_handler = UnhandledExceptionHandler.singleton()
        with unhandled_exception_handler:
            subcommand_class().run(**parsed_args)

    finally:
        # The force kill countdown is not an UnhandledExceptionHandler teardown callback because we want it to execute
        # in all situations (not only when there is an unhandled exception).
        _start_app_force_kill_countdown(seconds=10)
Beispiel #33
0
 def run(self):
     unhandled_exception_handler = UnhandledExceptionHandler.singleton()
     with unhandled_exception_handler:
         super().run()
 def run(self):
     unhandled_exception_handler = UnhandledExceptionHandler.singleton()
     with unhandled_exception_handler:
         super().run()
 def setUp(self):
     super().setUp()
     self.exception_handler = UnhandledExceptionHandler.singleton()
Beispiel #36
0
 def setUp(self):
     super().setUp()
     self.exception_handler = UnhandledExceptionHandler.singleton()