예제 #1
0
    def __init__(self):
        self._logger = get_logger(__name__)
        self._master_results_path = Configuration['results_directory']
        self._slave_registry = SlaveRegistry.singleton()
        self._scheduler_pool = BuildSchedulerPool()
        self._build_request_handler = BuildRequestHandler(self._scheduler_pool)
        self._build_request_handler.start()
        self._slave_allocator = SlaveAllocator(self._scheduler_pool)
        self._slave_allocator.start()

        # The best practice for determining the number of threads to use is
        # the number of threads per core multiplied by the number of physical
        # cores. So for example, with 10 cores, 2 sockets and 2 per core, the
        # max would be 40.
        #
        # Currently we use threads for incrementing/decrementing slave executor
        # counts (lock acquisition) and tearing down the slave (network IO). 32 threads should be
        # plenty for these tasks. In the case of heavy load, the bottle neck will be the number
        # of executors, not the time it takes to lock/unlock the executor counts or the number of
        # teardown requests. Tweak the number to find the sweet spot if you feel this is the case.
        self._thread_pool_executor = ThreadPoolExecutor(max_workers=32)

        # Asynchronously delete (but immediately rename) all old builds when master starts.
        # Remove this if/when build numbers are unique across master starts/stops
        if os.path.exists(self._master_results_path):
            fs.async_delete(self._master_results_path)
        fs.create_dir(self._master_results_path)

        # Configure heartbeat tracking
        self._unresponsive_slaves_cleanup_interval = Configuration[
            'unresponsive_slaves_cleanup_interval']
        self._hb_scheduler = sched.scheduler()

        SlavesCollector.register_slaves_metrics_collector(
            lambda: self._slave_registry.get_all_slaves_by_id().values())
예제 #2
0
    def __init__(self):
        self._logger = get_logger(__name__)

        self._all_slaves_by_url = {}
        self._all_builds_by_id = OrderedDict()  # This is an OrderedDict so we can more easily implement get_queue()
        self._builds_waiting_for_slaves = Queue()

        self._request_queue = Queue()
        self._request_handler = SerialRequestHandler()

        self._request_queue_worker_thread = SafeThread(
            target=self._build_preparation_loop, name='RequestHandlerLoop', daemon=True)
        self._request_queue_worker_thread.start()

        self._slave_allocation_worker_thread = SafeThread(
            target=self._slave_allocation_loop, name='SlaveAllocationLoop', daemon=True)
        self._slave_allocation_worker_thread.start()

        self._master_results_path = Configuration['results_directory']

        # It's important that idle slaves are only in the queue once so we use OrderedSet
        self._idle_slaves = OrderedSetQueue()

        # Asynchronously delete (but immediately rename) all old builds when master starts.
        # Remove this if/when build numbers are unique across master starts/stops
        if os.path.exists(self._master_results_path):
            fs.async_delete(self._master_results_path)

        fs.create_dir(self._master_results_path)
예제 #3
0
    def test_async_delete_calls_correct_commands(self):
        popen_mock = self.patch('subprocess.Popen')
        move_mock = self.patch('shutil.move')
        self.patch('os.path.isdir').return_value = True
        mkdtemp_mock = self.patch('tempfile.mkdtemp')
        mkdtemp_mock.return_value = '/tmp/dir'
        fs.async_delete('/some/dir')

        move_mock.assert_called_with('/some/dir', '/tmp/dir')
        popen_mock.assert_called_with(['rm', '-rf', '/tmp/dir'])
예제 #4
0
    def test_async_delete_calls_correct_commands(self):
        popen_mock = self.patch("app.util.fs.Popen_with_delayed_expansion")
        move_mock = self.patch("shutil.move")
        self.patch("os.path.isdir").return_value = True
        mkdtemp_mock = self.patch("tempfile.mkdtemp")
        mkdtemp_mock.return_value = "/tmp/dir"
        fs.async_delete("/some/dir")

        move_mock.assert_called_with("/some/dir", "/tmp/dir")
        popen_mock.assert_called_with(["rm", "-rf", "/tmp/dir"])
예제 #5
0
    def test_async_delete_calls_correct_commands(self):
        popen_mock = self.patch('subprocess.Popen')
        move_mock = self.patch('shutil.move')
        self.patch('os.path.isdir').return_value = True
        mkdtemp_mock = self.patch('tempfile.mkdtemp')
        mkdtemp_mock.return_value = '/tmp/dir'
        fs.async_delete('/some/dir')

        move_mock.assert_called_with('/some/dir', '/tmp/dir')
        popen_mock.assert_called_with(['rm', '-rf', '/tmp/dir'])
예제 #6
0
    def __init__(self):
        self._logger = get_logger(__name__)
        self._master_results_path = Configuration['results_directory']
        self._all_slaves_by_url = {}
        self._all_builds_by_id = OrderedDict()
        self._build_request_handler = BuildRequestHandler()
        self._build_request_handler.start()
        self._slave_allocator = SlaveAllocator(self._build_request_handler)
        self._slave_allocator.start()

        # Asynchronously delete (but immediately rename) all old builds when master starts.
        # Remove this if/when build numbers are unique across master starts/stops
        if os.path.exists(self._master_results_path):
            fs.async_delete(self._master_results_path)

        fs.create_dir(self._master_results_path)
예제 #7
0
    def __init__(self):
        self._logger = get_logger(__name__)
        self._master_results_path = Configuration["results_directory"]
        self._all_slaves_by_url = {}
        self._all_builds_by_id = OrderedDict()
        self._build_request_handler = BuildRequestHandler()
        self._build_request_handler.start()
        self._slave_allocator = SlaveAllocator(self._build_request_handler)
        self._slave_allocator.start()

        # Asynchronously delete (but immediately rename) all old builds when master starts.
        # Remove this if/when build numbers are unique across master starts/stops
        if os.path.exists(self._master_results_path):
            fs.async_delete(self._master_results_path)

        fs.create_dir(self._master_results_path)