コード例 #1
0
ファイル: mcp.py プロジェクト: zhengpingwan/Tron
 def __init__(self, working_dir, config_path):
     super(MasterControlProgram, self).__init__()
     self.jobs = JobCollection()
     self.working_dir = working_dir
     self.config = manager.ConfigManager(config_path)
     self.context = command_context.CommandContext()
     self.state_watcher = statemanager.StateChangeWatcher()
     log.info('initialized')
コード例 #2
0
ファイル: mcp.py プロジェクト: Yelp/Tron
 def __init__(self, working_dir, config_path):
     super(MasterControlProgram, self).__init__()
     self.jobs = JobCollection()
     self.working_dir = working_dir
     self.config = manager.ConfigManager(config_path)
     self.context = command_context.CommandContext()
     self.state_watcher = statemanager.StateChangeWatcher()
     log.info('initialized')
コード例 #3
0
ファイル: job_collection_test.py プロジェクト: Yelp/Tron
class TestJobCollection(TestCase):
    @setup
    def setup_collection(self):
        self.collection = JobCollection()

    def test_load_from_config(self):
        autospec_method(self.collection.jobs.filter_by_name)
        autospec_method(self.collection.add)
        factory = mock.create_autospec(JobSchedulerFactory)
        job_configs = {'a': mock.Mock(), 'b': mock.Mock()}
        result = self.collection.load_from_config(job_configs, factory, True)
        result = list(result)
        self.collection.jobs.filter_by_name.assert_called_with(job_configs)
        expected_calls = [mock.call(v) for v in job_configs.values()]
        assert_mock_calls(expected_calls, factory.build.mock_calls)
        assert_length(self.collection.add.mock_calls, len(job_configs) * 2)
        assert_length(result, len(job_configs))
        job_schedulers = [
            call[1][0] for call in self.collection.add.mock_calls[::2]
        ]
        for job_scheduler in job_schedulers:
            job_scheduler.schedule.assert_called_with()
            job_scheduler.get_job.assert_called_with()

    def test_move_running_job(self):
        with mock.patch(
            'tron.core.job_collection.JobCollection.get_by_name',
            autospec=None
        ) as mock_scheduler:
            mock_scheduler.return_value.get_job.return_value.status = Job.STATUS_RUNNING
            result = self.collection.move('old.test', 'new.test')
            assert 'Job is still running.' in result

    def test_move(self):
        with mock.patch(
            'tron.core.job_collection.JobCollection.get_by_name',
            autospec=None
        ) as mock_scheduler:
            mock_scheduler.return_value.get_job.return_value.status = Job.STATUS_ENABLED
            mock_scheduler.get_name.return_value = 'old.test'
            self.collection.add(mock_scheduler)
            result = self.collection.move('old.test', 'new.test')
            assert 'succeeded' in result

    def test_update(self):
        mock_scheduler = mock.create_autospec(JobScheduler)
        existing_scheduler = mock.create_autospec(JobScheduler)
        autospec_method(
            self.collection.get_by_name,
            return_value=existing_scheduler,
        )
        assert self.collection.update(mock_scheduler)
        self.collection.get_by_name.assert_called_with(
            mock_scheduler.get_name(),
        )
        existing_scheduler.update_from_job_scheduler.assert_called_with(
            mock_scheduler
        )
        existing_scheduler.schedule_reconfigured.assert_called_with()
コード例 #4
0
class TestJobCollection(TestCase):
    @setup
    def setup_collection(self):
        self.collection = JobCollection()

    def test_load_from_config(self):
        autospec_method(self.collection.jobs.filter_by_name)
        autospec_method(self.collection.add)
        factory = mock.create_autospec(JobSchedulerFactory)
        job_configs = {'a': mock.Mock(), 'b': mock.Mock()}
        result = self.collection.load_from_config(job_configs, factory, True)
        result = list(result)
        self.collection.jobs.filter_by_name.assert_called_with(job_configs)
        expected_calls = [mock.call(v) for v in six.itervalues(job_configs)]
        assert_mock_calls(expected_calls, factory.build.mock_calls)
        assert_length(self.collection.add.mock_calls, len(job_configs) * 2)
        assert_length(result, len(job_configs))
        job_schedulers = [
            call[1][0] for call in self.collection.add.mock_calls[::2]
        ]
        for job_scheduler in job_schedulers:
            job_scheduler.schedule.assert_called_with()
            job_scheduler.get_job.assert_called_with()

    def test_update(self):
        mock_scheduler = mock.create_autospec(JobScheduler)
        existing_scheduler = mock.create_autospec(JobScheduler)
        autospec_method(
            self.collection.get_by_name,
            return_value=existing_scheduler,
        )
        assert self.collection.update(mock_scheduler)
        self.collection.get_by_name.assert_called_with(
            mock_scheduler.get_name(),
        )
        existing_scheduler.get_job().update_from_job.assert_called_with(
            mock_scheduler.get_job.return_value,
        )
        existing_scheduler.schedule_reconfigured.assert_called_with()
コード例 #5
0
class TestJobCollection(TestCase):
    @setup
    def setup_collection(self):
        self.collection = JobCollection()

    def test_load_from_config(self):
        autospec_method(self.collection.jobs.filter_by_name)
        autospec_method(self.collection.add)
        factory = mock.create_autospec(JobSchedulerFactory)
        job_configs = {'a': mock.Mock(), 'b': mock.Mock()}
        result = self.collection.load_from_config(job_configs, factory, True)
        result = list(result)
        self.collection.jobs.filter_by_name.assert_called_with(job_configs)
        expected_calls = [mock.call(v) for v in job_configs.values()]
        assert_mock_calls(expected_calls, factory.build.mock_calls)
        assert_length(self.collection.add.mock_calls, len(job_configs) * 2)
        assert_length(result, len(job_configs))
        job_schedulers = [
            call[1][0] for call in self.collection.add.mock_calls[::2]
        ]
        for job_scheduler in job_schedulers:
            job_scheduler.schedule.assert_called_with()
            job_scheduler.get_job.assert_called_with()

    def test_move_running_job(self):
        with mock.patch('tron.core.job_collection.JobCollection.get_by_name',
                        autospec=None) as mock_scheduler:
            mock_scheduler.return_value.get_job.return_value.status = Job.STATUS_RUNNING
            result = self.collection.move('old.test', 'new.test')
            assert 'Job is still running.' in result

    def test_move(self):
        with mock.patch('tron.core.job_collection.JobCollection.get_by_name',
                        autospec=None) as mock_scheduler:
            mock_scheduler.return_value.get_job.return_value.status = Job.STATUS_ENABLED
            mock_scheduler.get_name.return_value = 'old.test'
            self.collection.add(mock_scheduler)
            result = self.collection.move('old.test', 'new.test')
            assert 'succeeded' in result

    def test_update(self):
        mock_scheduler = mock.create_autospec(JobScheduler)
        existing_scheduler = mock.create_autospec(JobScheduler)
        autospec_method(
            self.collection.get_by_name,
            return_value=existing_scheduler,
        )
        assert self.collection.update(mock_scheduler)
        self.collection.get_by_name.assert_called_with(
            mock_scheduler.get_name(), )
        existing_scheduler.update_from_job_scheduler.assert_called_with(
            mock_scheduler)
        existing_scheduler.schedule_reconfigured.assert_called_with()
コード例 #6
0
 def setup_collection(self):
     self.collection = JobCollection()
コード例 #7
0
class MasterControlProgram(object):
    """Central state object for the Tron daemon."""
    def __init__(self, working_dir, config_path):
        super(MasterControlProgram, self).__init__()
        self.jobs = JobCollection()
        self.working_dir = working_dir
        self.config = manager.ConfigManager(config_path)
        self.context = command_context.CommandContext()
        self.state_watcher = statemanager.StateChangeWatcher()
        self.boot_time = time.time()
        log.info('initialized')

    def shutdown(self):
        EventBus.shutdown()
        self.state_watcher.shutdown()

    def reconfigure(self, namespace=None):
        """Reconfigure MCP while Tron is already running."""
        log.info("reconfigured")
        try:
            self._load_config(reconfigure=True,
                              namespace_to_reconfigure=namespace)
        except Exception as e:
            log.exception(f"reconfigure failure: {e.__class__.__name__}: {e}")
            raise e

    def _load_config(self, reconfigure=False, namespace_to_reconfigure=None):
        """Read config data and apply it."""
        with self.state_watcher.disabled():
            self.apply_config(
                self.config.load(),
                reconfigure=reconfigure,
                namespace_to_reconfigure=namespace_to_reconfigure)

    def initial_setup(self):
        """When the MCP is initialized the config is applied before the state.
        In this case jobs shouldn't be scheduled until the state is applied.
        """
        self._load_config()
        self.restore_state(
            actioncommand.create_action_runner_factory_from_config(
                self.config.load().get_master().action_runner))
        # Any job with existing state would have been scheduled already. Jobs
        # without any state will be scheduled here.
        self.jobs.run_queue_schedule()

    def apply_config(self,
                     config_container,
                     reconfigure=False,
                     namespace_to_reconfigure=None):
        """Apply a configuration."""
        master_config_directives = [
            (self.update_state_watcher_config, 'state_persistence'),
            (self.set_context_base, 'command_context'),
            (
                node.NodePoolRepository.update_from_config,
                'nodes',
                'node_pools',
                'ssh_options',
            ),
            (MesosClusterRepository.configure, 'mesos_options'),
            (self.configure_eventbus, 'eventbus_enabled'),
        ]
        master_config = config_container.get_master()
        apply_master_configuration(master_config_directives, master_config)

        self.state_watcher.watch(MesosClusterRepository)

        # If the master namespace was updated, we should update jobs in all namespaces
        if namespace_to_reconfigure == MASTER_NAMESPACE:
            namespace_to_reconfigure = None

        # TODO: unify NOTIFY_STATE_CHANGE and simplify this
        self.job_graph = JobGraph(config_container)
        factory = self.build_job_scheduler_factory(master_config,
                                                   self.job_graph)
        updated_jobs = self.jobs.update_from_config(
            config_container.get_jobs(),
            factory,
            reconfigure,
            namespace_to_reconfigure,
        )
        self.state_watcher.watch_all(
            updated_jobs, [Job.NOTIFY_STATE_CHANGE, Job.NOTIFY_NEW_RUN])

    def build_job_scheduler_factory(self, master_config, job_graph):
        output_stream_dir = master_config.output_stream_dir or self.working_dir
        action_runner = actioncommand.create_action_runner_factory_from_config(
            master_config.action_runner, )
        return JobSchedulerFactory(
            self.context,
            output_stream_dir,
            master_config.time_zone,
            action_runner,
            job_graph,
        )

    def update_state_watcher_config(self, state_config):
        """Update the StateChangeWatcher, and save all state if the state config
        changed.
        """
        if self.state_watcher.update_from_config(state_config):
            for job_scheduler in self.jobs:
                self.state_watcher.save_job(job_scheduler.get_job())

    def set_context_base(self, command_context):
        self.context.base = command_context

    def configure_eventbus(self, enabled):
        if enabled:
            if not EventBus.instance:
                EventBus.create(f"{self.working_dir}/_events")
                EventBus.start()
        else:
            EventBus.shutdown()

    def get_job_collection(self):
        return self.jobs

    def get_config_manager(self):
        return self.config

    def restore_state(self, action_runner):
        """Use the state manager to retrieve to persisted state and apply it
        to the configured Jobs.
        """
        log.info('restoring')
        states = self.state_watcher.restore(self.jobs.get_names())
        MesosClusterRepository.restore_state(states.get('mesos_state', {}))

        self.jobs.restore_state(states.get('job_state', {}), action_runner)
        self.state_watcher.save_metadata()

    def __str__(self):
        return "MCP"
コード例 #8
0
ファイル: mcp.py プロジェクト: Yelp/Tron
class MasterControlProgram(object):
    """Central state object for the Tron daemon."""

    def __init__(self, working_dir, config_path):
        super(MasterControlProgram, self).__init__()
        self.jobs = JobCollection()
        self.working_dir = working_dir
        self.config = manager.ConfigManager(config_path)
        self.context = command_context.CommandContext()
        self.state_watcher = statemanager.StateChangeWatcher()
        log.info('initialized')

    def shutdown(self):
        EventBus.shutdown()
        self.state_watcher.shutdown()

    def reconfigure(self):
        """Reconfigure MCP while Tron is already running."""
        log.info("reconfigured")
        try:
            self._load_config(reconfigure=True)
        except Exception as e:
            log.exception(f"reconfigure failure: {e.__class__.__name__}: {e}")
            raise e

    def _load_config(self, reconfigure=False):
        """Read config data and apply it."""
        with self.state_watcher.disabled():
            self.apply_config(self.config.load(), reconfigure=reconfigure)

    def initial_setup(self):
        """When the MCP is initialized the config is applied before the state.
        In this case jobs shouldn't be scheduled until the state is applied.
        """
        self._load_config()
        self.restore_state(
            actioncommand.create_action_runner_factory_from_config(
                self.config.load().get_master().action_runner
            )
        )
        # Any job with existing state would have been scheduled already. Jobs
        # without any state will be scheduled here.
        self.jobs.run_queue_schedule()

    def apply_config(self, config_container, reconfigure=False):
        """Apply a configuration."""
        master_config_directives = [
            (self.update_state_watcher_config, 'state_persistence'),
            (self.set_context_base, 'command_context'),
            (
                node.NodePoolRepository.update_from_config,
                'nodes',
                'node_pools',
                'ssh_options',
            ),
            (MesosClusterRepository.configure, 'mesos_options'),
            (self.configure_eventbus, 'eventbus_enabled'),
        ]
        master_config = config_container.get_master()
        apply_master_configuration(master_config_directives, master_config)

        self.state_watcher.watch(MesosClusterRepository)

        # TODO: unify NOTIFY_STATE_CHANGE and simplify this
        self.job_graph = JobGraph(config_container)
        factory = self.build_job_scheduler_factory(master_config, self.job_graph)
        self.apply_collection_config(
            config_container.get_jobs(),
            self.jobs,
            Job.NOTIFY_STATE_CHANGE,
            factory,
            reconfigure,
        )

    def apply_collection_config(self, config, collection, notify_type, *args):
        items = collection.load_from_config(config, *args)
        self.state_watcher.watch_all(items, notify_type)

    def build_job_scheduler_factory(self, master_config, job_graph):
        output_stream_dir = master_config.output_stream_dir or self.working_dir
        action_runner = actioncommand.create_action_runner_factory_from_config(
            master_config.action_runner,
        )
        return JobSchedulerFactory(
            self.context,
            output_stream_dir,
            master_config.time_zone,
            action_runner,
            job_graph,
        )

    def update_state_watcher_config(self, state_config):
        """Update the StateChangeWatcher, and save all state if the state config
        changed.
        """
        if self.state_watcher.update_from_config(state_config):
            for job_scheduler in self.jobs:
                self.state_watcher.save_job(job_scheduler.get_job())

    def set_context_base(self, command_context):
        self.context.base = command_context

    def configure_eventbus(self, enabled):
        if enabled:
            if not EventBus.instance:
                EventBus.create(f"{self.working_dir}/_events")
                EventBus.start()
        else:
            EventBus.shutdown()

    def get_job_collection(self):
        return self.jobs

    def get_config_manager(self):
        return self.config

    def restore_state(self, action_runner):
        """Use the state manager to retrieve to persisted state and apply it
        to the configured Jobs.
        """
        log.info('restoring')
        states = self.state_watcher.restore(self.jobs.get_names())
        MesosClusterRepository.restore_state(states.get('mesos_state', {}))

        self.jobs.restore_state(states.get('job_state', {}), action_runner)
        self.state_watcher.save_metadata()

    def __str__(self):
        return "MCP"
コード例 #9
0
ファイル: job_collection_test.py プロジェクト: Yelp/Tron
 def setup_collection(self):
     self.collection = JobCollection()