def run_daemon_loop(config: Config, process_holder: KafkaProcess, cmd_helper: CmdHelper, restart_on_init: bool): _LOG.info("Using configuration: {}".format(config)) kafka_props = KafkaProperties(config.kafka_settings_template, '{}/config/server.properties'.format(config.kafka_dir)) env_provider = EnvProvider.create_env_provider(config) address_provider = env_provider.get_address_provider() rack = env_provider.get_rack() if rack: kafka_props.set_property('broker.rack', rack) startup_timeout = StartupTimeout.build(config.timeout) _LOG.info("Loading exhibitor configuration") with load_exhibitor_proxy(address_provider, config.zk_prefix) as zookeeper: _LOG.info("Loading broker_id policy") broker_id_manager = env_provider.create_broker_id_manager(zookeeper, kafka_props) _LOG.info("Building broker manager") broker = BrokerManager(process_holder, zookeeper, broker_id_manager, kafka_props, startup_timeout) _LOG.info("Creating controller") controller = Controller(broker, zookeeper, env_provider) controller.add_check(CheckBrokerStopped(broker, zookeeper)) controller.add_check(RemoteCommandExecutorCheck(zookeeper, broker, config.health_port)) controller.add_check(GenerateDataSizeStatistics(zookeeper, broker, cmd_helper, kafka_props.get_property("log.dirs").split(","))) apply_features(config.health_port, config.features, controller, zookeeper, broker, kafka_props, env_provider) _LOG.info('Starting main controller loop') controller.loop(RestartBrokerChange(zookeeper, broker, lambda: False) if restart_on_init else None)
def apply_features(api_port, features: dict, controller: Controller, buku_proxy: BukuExhibitor, broker: BrokerManager, kafka_properties: KafkaProperties, env_provider: EnvProvider) -> list: for feature, config in features.items(): if feature == 'restart_on_exhibitor': controller.add_check( CheckExhibitorAddressChanged(buku_proxy, broker)) elif feature == 'rebalance_on_start': controller.add_check(RebalanceOnStartCheck(buku_proxy, broker)) elif feature == 'rebalance_on_brokers_change': controller.add_check(RebalanceOnBrokerListCheck( buku_proxy, broker)) elif feature == 'balance_data_size': controller.add_check( CheckBrokersDiskImbalance(buku_proxy, broker, config["diff_threshold_mb"] * 1024, api_port)) elif feature == 'graceful_terminate': register_terminate_on_interrupt(controller, broker) elif feature == 'use_ip_address': kafka_properties.set_property('advertised.host.name', env_provider.get_id()) else: _LOG.error('Using of unsupported feature "{}", skipping it'.format( feature))
def run_daemon_loop(config: Config, process_holder: KafkaProcess, cmd_helper: CmdHelper, restart_on_init: bool): _LOG.info("Using configuration: {}".format(config)) kafka_props = KafkaProperties( config.kafka_settings_template, '{}/config/server.properties'.format(config.kafka_dir)) env_provider = EnvProvider.create_env_provider(config) address_provider = env_provider.get_address_provider() rack = env_provider.get_rack() if rack: kafka_props.set_property('broker.rack', rack) startup_timeout = StartupTimeout.build(config.timeout) _LOG.info("Loading exhibitor configuration") with load_exhibitor_proxy(address_provider, config.zk_prefix) as zookeeper: _LOG.info("Loading broker_id policy") broker_id_manager = env_provider.create_broker_id_manager( zookeeper, kafka_props) _LOG.info("Building broker manager") broker = BrokerManager(process_holder, zookeeper, broker_id_manager, kafka_props, startup_timeout) _LOG.info("Creating controller") controller = Controller(broker, zookeeper, env_provider) controller.add_check(CheckBrokerStopped(broker, zookeeper)) controller.add_check( RemoteCommandExecutorCheck(zookeeper, broker, config.health_port)) controller.add_check( GenerateDataSizeStatistics( zookeeper, broker, cmd_helper, kafka_props.get_property("log.dirs").split(","))) apply_features(config.health_port, config.features, controller, zookeeper, broker, kafka_props, env_provider) _LOG.info('Starting main controller loop') controller.loop( RestartBrokerChange(zookeeper, broker, lambda: False ) if restart_on_init else None)
def apply_features(api_port, features: dict, controller: Controller, buku_proxy: BukuExhibitor, broker: BrokerManager, kafka_properties: KafkaProperties, env_provider: EnvProvider) -> list: for feature, config in features.items(): if feature == 'restart_on_exhibitor': controller.add_check(CheckExhibitorAddressChanged(buku_proxy, broker)) elif feature == 'rebalance_on_start': controller.add_check(RebalanceOnStartCheck(buku_proxy, broker)) elif feature == 'rebalance_on_brokers_change': controller.add_check(RebalanceOnBrokerListCheck(buku_proxy, broker)) elif feature == 'balance_data_size': controller.add_check( CheckBrokersDiskImbalance(buku_proxy, broker, config["diff_threshold_mb"] * 1024, api_port)) elif feature == 'graceful_terminate': register_terminate_on_interrupt(controller, broker) elif feature == 'use_ip_address': kafka_properties.set_property('advertised.host.name', env_provider.get_id()) else: _LOG.error('Using of unsupported feature "{}", skipping it'.format(feature))
def test_multiple_changes_are_executed_one_by_one(): running_count = [3, 3, 3] class FakeChange(Change): def __init__(self, index): self.index = index def get_name(self): return 'fake' def can_run(self, current_actions): return True def run(self, current_actions): running_count[self.index] -= 1 return running_count[self.index] > 0 class FakeCheck(Check): def __init__(self): super().__init__(0) self.changes_limit = 3 self.changes_issued = 0 def check(self): if self.changes_issued < self.changes_limit: self.changes_issued += 1 return FakeChange(self.changes_issued - 1) current_changes = {} zk = MagicMock() zk.get_running_changes.return_value = current_changes zk.register_change = lambda x, y: current_changes.update({x: y}) zk.unregister_change = lambda x: current_changes.pop(x) controller = Controller(MagicMock(), zk, MagicMock()) controller.provider_id = 'fake' controller.add_check(FakeCheck()) assert [3, 3, 3] == running_count controller.make_step() assert not current_changes assert [3, 3, 3] == running_count controller.make_step() assert current_changes assert [2, 3, 3] == running_count controller.make_step() assert [1, 3, 3] == running_count controller.make_step() assert [0, 3, 3] == running_count controller.make_step() assert [0, 2, 3] == running_count controller.make_step() assert [0, 1, 3] == running_count controller.make_step() assert [0, 0, 3] == running_count controller.make_step() assert [0, 0, 2] == running_count controller.make_step() assert [0, 0, 1] == running_count assert current_changes controller.make_step() assert [0, 0, 0] == running_count assert not current_changes controller.make_step() assert [0, 0, 0] == running_count assert not current_changes
def delete_from_controller_queue(name: str, controller: Controller): return { 'count': controller.cancel_changes(name) }
def load_controller_queue(controller: Controller): return controller.enumerate_changes()
def delete_from_controller_queue(name: str, controller: Controller): return {'count': controller.cancel_changes(name)}