def test_broker_checks_death(self): exhibitor = MagicMock() states = [2, 2] def _load_states(): for idx in range(0, len(states)): states[idx] -= 1 return [ ('t1', 0, {'leader': states[0], 'isr': [1, 3] if states[0] >= 1 else [3]}), ('t2', 0, {'leader': states[1], 'isr': [1, 3] if states[1] >= 1 else [3]}) ] exhibitor.load_partition_states = _load_states id_manager = MagicMock() id_manager.get_broker_id = lambda: '1' kafka_props = build_test_properties() kafka_props.set_property('unclean.leader.election.enable', 'true') manager = BrokerManager(FakeProcessManager(), exhibitor, id_manager, kafka_props, StartupTimeout.build({'type': 'linear'})) assert not manager.has_leadership() kafka_props.set_property('unclean.leader.election.enable', 'false') assert manager.has_leadership() assert not manager.has_leadership()
def run_daemon_loop(config: Config, process_holder: KafkaProcess, cmd_helper: CmdHelper, restart_on_init: bool): _LOG.info("Using configuration: {}".format(config)) kafka_props = KafkaProperties(config.kafka_settings_template, '{}/config/server.properties'.format(config.kafka_dir)) env_provider = EnvProvider.create_env_provider(config) address_provider = env_provider.get_address_provider() rack = env_provider.get_rack() if rack: kafka_props.set_property('broker.rack', rack) startup_timeout = StartupTimeout.build(config.timeout) _LOG.info("Loading exhibitor configuration") with load_exhibitor_proxy(address_provider, config.zk_prefix) as zookeeper: _LOG.info("Loading broker_id policy") broker_id_manager = env_provider.create_broker_id_manager(zookeeper, kafka_props) _LOG.info("Building broker manager") broker = BrokerManager(process_holder, zookeeper, broker_id_manager, kafka_props, startup_timeout) _LOG.info("Creating controller") controller = Controller(broker, zookeeper, env_provider) controller.add_check(CheckBrokerStopped(broker, zookeeper)) controller.add_check(RemoteCommandExecutorCheck(zookeeper, broker, config.health_port)) controller.add_check(GenerateDataSizeStatistics(zookeeper, broker, cmd_helper, kafka_props.get_property("log.dirs").split(","))) apply_features(config.health_port, config.features, controller, zookeeper, broker, kafka_props, env_provider) _LOG.info('Starting main controller loop') controller.loop(RestartBrokerChange(zookeeper, broker, lambda: False) if restart_on_init else None)
def test_linear(self): o = StartupTimeout.build({ 'type': 'linear', 'initial': '10', 'step': 2 }) TestStartupTimeout._verify(o, 10., 12.)
def test_progressive(self): o = StartupTimeout.build({ 'type': 'progressive', 'initial': '16', 'step': '0.25' }) TestStartupTimeout._verify(o, 16., 20.)
def _verify(o: StartupTimeout, border_value: float, border_value_after_fail: float): print(o) assert not o.is_timed_out(border_value) assert o.is_timed_out(border_value + 1) o.on_timeout_fail() assert not o.is_timed_out(border_value_after_fail) assert o.is_timed_out(border_value_after_fail + 1)
def test_linear(self): o = StartupTimeout.build({ 'type': 'linear', 'initial': '10', 'step': 2 }) TestDataSizeStats._verify(o, 10., 2.) o.on_timeout_fail() TestDataSizeStats._verify(o, 12., 2.) o.on_timeout_fail() TestDataSizeStats._verify(o, 14., 2.)
def test_progressive(self): o = StartupTimeout.build({ 'type': 'progressive', 'initial': '16', 'step': '0.25' }) TestDataSizeStats._verify(o, 16., 4.) o.on_timeout_fail() TestDataSizeStats._verify(o, 20., 5.) o.on_timeout_fail() TestDataSizeStats._verify(o, 25., 6.25)
def _prepare_for_start_fail(broker_ids, leader, isr): exhibitor = MagicMock() exhibitor.get_broker_ids.return_value = broker_ids exhibitor.load_partition_states.return_value = [ ('t0', 0, {'leader': int(leader), 'isr': [int(i) for i in isr]})] id_manager = MagicMock() id_manager.get_broker_id = lambda: '1' kafka_props = build_test_properties() broker = BrokerManager(FakeProcessManager(), exhibitor, id_manager, kafka_props, StartupTimeout.build({'type': 'linear'})) kafka_props.set_property('unclean.leader.election.enable', 'false') return kafka_props, broker
def run_daemon_loop(config: Config, process_holder: KafkaProcess, cmd_helper: CmdHelper, restart_on_init: bool): _LOG.info("Using configuration: {}".format(config)) kafka_props = KafkaProperties( config.kafka_settings_template, '{}/config/server.properties'.format(config.kafka_dir)) env_provider = EnvProvider.create_env_provider(config) address_provider = env_provider.get_address_provider() rack = env_provider.get_rack() if rack: kafka_props.set_property('broker.rack', rack) startup_timeout = StartupTimeout.build(config.timeout) _LOG.info("Loading exhibitor configuration") with load_exhibitor_proxy(address_provider, config.zk_prefix) as zookeeper: _LOG.info("Loading broker_id policy") broker_id_manager = env_provider.create_broker_id_manager( zookeeper, kafka_props) _LOG.info("Building broker manager") broker = BrokerManager(process_holder, zookeeper, broker_id_manager, kafka_props, startup_timeout) _LOG.info("Creating controller") controller = Controller(broker, zookeeper, env_provider) controller.add_check(CheckBrokerStopped(broker, zookeeper)) controller.add_check( RemoteCommandExecutorCheck(zookeeper, broker, config.health_port)) controller.add_check( GenerateDataSizeStatistics( zookeeper, broker, cmd_helper, kafka_props.get_property("log.dirs").split(","))) apply_features(config.health_port, config.features, controller, zookeeper, broker, kafka_props, env_provider) _LOG.info('Starting main controller loop') controller.loop( RestartBrokerChange(zookeeper, broker, lambda: False ) if restart_on_init else None)
def _verify(o: StartupTimeout, value: float, step: float): print(o) assert o.get_timeout() == value assert o.get_step() == step
def test_progressive_defaults(self): o = StartupTimeout.build({'type': 'progressive'}) TestDataSizeStats._verify(o, 300., 150.)
def test_progressive_defaults(self): o = StartupTimeout.build({'type': 'progressive'}) TestStartupTimeout._verify(o, 300., 450.)
def test_linear_defaults(self): o = StartupTimeout.build({'type': 'linear'}) TestStartupTimeout._verify(o, 300., 360.)
def test_linear_defaults(self): o = StartupTimeout.build({'type': 'linear'}) TestDataSizeStats._verify(o, 300., 60.)
def test_linear(self): o = StartupTimeout.build({'type': 'linear', 'initial': '10', 'step': 2}) TestStartupTimeout._verify(o, 10., 12.)
def test_progressive(self): o = StartupTimeout.build({'type': 'progressive', 'initial': '16', 'step': '0.25'}) TestStartupTimeout._verify(o, 16., 20.)