def __init__(self):
        self._logger = get_logger(__name__)
        self._master_results_path = Configuration['results_directory']
        self._slave_registry = SlaveRegistry.singleton()
        self._scheduler_pool = BuildSchedulerPool()
        self._build_request_handler = BuildRequestHandler(self._scheduler_pool)
        self._build_request_handler.start()
        self._slave_allocator = SlaveAllocator(self._scheduler_pool)
        self._slave_allocator.start()

        # The best practice for determining the number of threads to use is
        # the number of threads per core multiplied by the number of physical
        # cores. So for example, with 10 cores, 2 sockets and 2 per core, the
        # max would be 40.
        #
        # Currently we use threads for incrementing/decrementing slave executor
        # counts (lock acquisition) and tearing down the slave (network IO). 32 threads should be
        # plenty for these tasks. In the case of heavy load, the bottle neck will be the number
        # of executors, not the time it takes to lock/unlock the executor counts or the number of
        # teardown requests. Tweak the number to find the sweet spot if you feel this is the case.
        self._thread_pool_executor = ThreadPoolExecutor(max_workers=32)

        # Asynchronously delete (but immediately rename) all old builds when master starts.
        # Remove this if/when build numbers are unique across master starts/stops
        if os.path.exists(self._master_results_path):
            fs.async_delete(self._master_results_path)
        fs.create_dir(self._master_results_path)

        # Configure heartbeat tracking
        self._unresponsive_slaves_cleanup_interval = Configuration[
            'unresponsive_slaves_cleanup_interval']
        self._hb_scheduler = sched.scheduler()

        SlavesCollector.register_slaves_metrics_collector(
            lambda: self._slave_registry.get_all_slaves_by_id().values())
示例#2
0
    def __init__(self):
        self._logger = get_logger(__name__)
        self._master_results_path = Configuration['results_directory']
        self._all_slaves_by_url = {}
        self._all_builds_by_id = OrderedDict()
        self._build_request_handler = BuildRequestHandler()
        self._build_request_handler.start()
        self._slave_allocator = SlaveAllocator(self._build_request_handler)
        self._slave_allocator.start()

        # Asynchronously delete (but immediately rename) all old builds when master starts.
        # Remove this if/when build numbers are unique across master starts/stops
        if os.path.exists(self._master_results_path):
            fs.async_delete(self._master_results_path)

        fs.create_dir(self._master_results_path)
    def test_prepare_build_async_does_not_call_mark_failed_for_canceled_build(
            self, subjobs):
        mock_project_lock = self.patch('threading.Lock').return_value
        build_scheduler_mock = self.patch(
            'app.master.build_scheduler.BuildScheduler').return_value
        build_request_handler = BuildRequestHandler(build_scheduler_mock)
        build_mock = self.patch('app.master.build.Build').return_value
        build_mock.get_subjobs.return_value = subjobs
        build_mock.is_canceled = True
        build_mock.prepare.side_effect = AtomizerError

        build_request_handler._prepare_build_async(build_mock,
                                                   mock_project_lock)

        self.assertFalse(
            build_mock.mark_failed.called,
            'Build mark_failed should not be called for CANCELED build')
    def test_prepare_build_async_does_not_call_finish_for_canceled_or_error_build(
            self):
        subjobs = []
        mock_project_lock = self.patch('threading.Lock').return_value
        build_scheduler_mock = self.patch(
            'app.master.build_scheduler.BuildScheduler').return_value
        build_request_handler = BuildRequestHandler(build_scheduler_mock)
        build_mock = self.patch('app.master.build.Build').return_value
        build_mock.is_stopped = True  # this means the BuildState is CANCELED or ERROR
        build_mock.get_subjobs.return_value = subjobs

        build_request_handler._prepare_build_async(build_mock,
                                                   mock_project_lock)

        self.assertFalse(
            build_mock.finish.called,
            'Build finish should not be called for CANCELED build')
    def test_prepare_build_async_calls_finish_only_if_no_subjobs(
            self, subjobs, build_finish_called):
        mock_project_lock = self.patch('threading.Lock').return_value
        build_scheduler_mock = self.patch(
            'app.master.build_scheduler.BuildScheduler').return_value
        build_request_handler = BuildRequestHandler(build_scheduler_mock)
        build_mock = self.patch('app.master.build.Build').return_value
        build_mock.is_stopped = False
        build_mock.get_subjobs.return_value = subjobs

        build_request_handler._prepare_build_async(build_mock,
                                                   mock_project_lock)

        if build_finish_called:
            build_mock.finish.assert_called_once_with()
        else:
            self.assertFalse(build_mock.finish.called)
示例#6
0
    def __init__(self):
        self._logger = get_logger(__name__)
        self._master_results_path = Configuration['results_directory']
        self._all_slaves_by_url = {}
        self._scheduler_pool = BuildSchedulerPool()
        self._build_request_handler = BuildRequestHandler(self._scheduler_pool)
        self._build_request_handler.start()
        self._slave_allocator = SlaveAllocator(self._scheduler_pool)
        self._slave_allocator.start()

        # Initialize the database connection before we initialize a BuildStore
        Connection.create(Configuration['database_url'])
        UnhandledExceptionHandler.singleton().add_teardown_callback(
            BuildStore.clean_up)

        # The best practice for determining the number of threads to use is
        # the number of threads per core multiplied by the number of physical
        # cores. So for example, with 10 cores, 2 sockets and 2 per core, the
        # max would be 40.
        #
        # Currently we use threads for incrementing/decrementing slave executor
        # counts (lock acquisition) and tearing down the slave (network IO). 32 threads should be
        # plenty for these tasks. In the case of heavy load, the bottle neck will be the number
        # of executors, not the time it takes to lock/unlock the executor counts or the number of
        # teardown requests. Tweak the number to find the sweet spot if you feel this is the case.
        self._thread_pool_executor = ThreadPoolExecutor(max_workers=32)

        # Asynchronously delete (but immediately rename) all old builds when master starts.
        # Remove this if/when build numbers are unique across master starts/stops
        # TODO: We can remove this code since we persist builds across master restarts
        # if os.path.exists(self._master_results_path):
        #     fs.async_delete(self._master_results_path)
        # fs.create_dir(self._master_results_path)

        SlavesCollector.register_slaves_metrics_collector(
            lambda: self.all_slaves_by_id().values())