def serve_client(client): config = DistConfig() port = config.port('zookeeper') rest_port = config.port('zookeeper-rest') host = None network_interface = hookenv.config().get('network_interface') if network_interface: host = get_ip_for_interface(network_interface) client.send_connection(port, rest_port, host)
def stop_spark(): hookenv.status_set('maintenance', 'Stopping Livy REST server') dist = DistConfig(data=layer.options('livy')) livy = Livy(dist) livy.close_ports() livy.stop() remove_state('livy.started') hookenv.status_set('maintenance', 'Stopping Apache Spark') dist = DistConfig(data=layer.options('apache-spark')) spark = Spark(dist) spark.close_ports() spark.stop() remove_state('spark.started')
def reconfigure_kafka_zk_instances_leaving(zkdeparting, zkavailable): try: zk_units = zkavailable.get_zookeeper_units() hookenv.status_set('maintenance', 'Updating Kafka with departing Zookeeper instances ') kafka = Kafka(DistConfig()) kafka.configure_kafka(zk_units) kafka.restart() zkdeparting.dismiss_departing() hookenv.status_set('active', 'Ready') except: hookenv.log("Relation with Zookeeper not established. Stopping Kafka.") kafka = Kafka(DistConfig()) kafka.stop() remove_state('kafka.started') hookenv.status_set('blocked', 'Waiting for connection to Zookeeper')
def restart_zookeeper_if_config_changed(): """Restart Zookeeper if zoo.cfg has changed. As peers come and go, zoo.cfg will be updated. When that file changes, restart the Zookeeper service and set an appropriate status message. """ # Possibly update bind address network_interface = hookenv.config().get('network_interface') if data_changed("zookeeper.bind_address", network_interface): zk = Zookeeper() zk.update_bind_address() zoo_cfg = DistConfig().path('zookeeper_conf') / 'zoo.cfg' if any_file_changed([zoo_cfg]): hookenv.status_set('maintenance', 'Server config changed: restarting Zookeeper') zk = Zookeeper() zk.stop() zk.start() zk_count = int(zk.get_zk_count()) extra_status = "" if zk_count < 3: extra_status = ": less than 3 is suboptimal" elif (zk_count % 2 == 0): extra_status = ": even number is suboptimal" hookenv.status_set('active', 'Ready (%d zk units%s)' % (zk_count, extra_status)) else: # Make sure zookeeper is running in any case zk = Zookeeper() zk.start() zk.open_ports()
def install_livy(hadoop): # pylint: disable=w0613 dist = DistConfig(data=layer.options('livy')) livy = Livy(dist) if livy.verify_resources(): hookenv.status_set('maintenance', 'Installing Livy REST server') livy.install() set_state('livy.installed')
def get_dist_config(): from jujubigdata.utils import DistConfig # no available until after bootstrap if not getattr(get_dist_config, 'value', None): zeppelin_reqs = ['vendor', 'packages', 'dirs', 'ports'] get_dist_config.value = DistConfig(filename='dist.yaml', required_keys=zeppelin_reqs) return get_dist_config.value
def install_kafka(*args): kafka = Kafka(DistConfig()) if kafka.verify_resources(): hookenv.status_set('maintenance', 'Installing Kafka') kafka.install() kafka.open_ports() set_state('kafka.installed')
def start_spark(hadoop): # pylint: disable=w0613 hookenv.status_set('maintenance', 'Setting up Apache Spark') dist = DistConfig(data=layer.options('apache-spark')) spark = Spark(dist) spark.configure() spark.start() spark.open_ports() set_state('spark.started')
def get_dist_config(required_keys=None): required_keys = required_keys or [ 'vendor', 'hadoop_version', 'packages', 'groups', 'users', 'dirs', 'ports' ] dist = DistConfig(filename='dist.yaml', required_keys=required_keys) opts = layer.options('apache-bigtop-base') for key in ('hadoop_version', ): if key in opts: dist.dist_config[key] = opts[key] for key in ('packages', 'groups'): if key in opts: dist.dist_config[key] = list( set(dist.dist_config[key]) | set(opts[key])) for key in ('users', 'dirs', 'ports'): if key in opts: dist.dist_config[key].update(opts[key]) return dist
def install_spark(hadoop): # pylint: disable=w0613 dist = DistConfig(data=layer.options('apache-spark')) spark = Spark(dist) if spark.verify_resources(): hookenv.status_set('maintenance', 'Installing Apache Spark') spark.install() spark.setup_spark_config() spark.install_demo() set_state('spark.installed')
def start_livy(hadoop): # pylint: disable=w0613 hookenv.status_set('maintenance', 'Setting up Livy REST server') dist = DistConfig(data=layer.options('livy')) livy = Livy(dist) mode = hookenv.config()['spark_execution_mode'] livy.configure(mode) livy.start() livy.open_ports() set_state('livy.started') hookenv.status_set('active', 'Ready')
def get_dist_config(required_keys=None): required_keys = required_keys or [ 'vendor', 'hadoop_version', 'packages', 'groups', 'users', 'dirs', 'ports'] dist = DistConfig(filename='dist.yaml', required_keys=required_keys) opts = layer.options('hadoop-base') for key in ('hadoop_version',): if key in opts: dist.dist_config[key] = opts[key] for key in ('packages', 'groups'): if key in opts: dist.dist_config[key] = list( set(dist.dist_config[key]) | set(opts[key]) ) for key in ('users', 'dirs', 'ports'): if key in opts: dist.dist_config[key].update(opts[key]) return dist
def reconfigure_kafka_new_zk_instances(zkjoining, zkavailable): try: zk_units = zkavailable.get_zookeeper_units() hookenv.status_set('maintenance', 'Updating Kafka with new Zookeeper instances') kafka = Kafka(DistConfig()) kafka.configure_kafka(zk_units) kafka.restart() zkjoining.dismiss_joining() hookenv.status_set('active', 'Ready') except: hookenv.log("Relation with Zookeeper not established")
def reconfigure_spark(hadoop): # pylint: disable=w0613 config = hookenv.config() if not data_changed('configuration', config): return hookenv.status_set('maintenance', 'Configuring Apache Spark and Livy REST server') dist = DistConfig(data=layer.options('apache-spark')) spark = Spark(dist) dist = DistConfig(data=layer.options('livy')) livy = Livy(dist) livy.stop() spark.stop() spark.configure() mode = hookenv.config()['spark_execution_mode'] livy.configure(mode) spark.start() livy.start() hookenv.status_set('active', 'Ready')
def configure_kafka(zkjoining, zkavailable): try: zk_units = zkavailable.get_zookeeper_units() hookenv.status_set('maintenance', 'Setting up Kafka') kafka = Kafka(DistConfig()) kafka.configure_kafka(zk_units) kafka.start() zkjoining.dismiss_joining() hookenv.status_set('active', 'Ready') set_state('kafka.started') except: hookenv.log("Relation with Zookeeper not established")
def serve_client(client, zookeeper): kafka_port = DistConfig().port('kafka') host = hookenv.config().get('hostname') if not host: # If we've attempted to bind to a spefic ip address, and we # haven't set a hostname that will resolve to that ip, just # send the ip address along. network_interface = hookenv.config().get('network_interface') if network_interface: host = get_ip_for_interface(network_interface) client.send_connection(kafka_port, host=host) client.send_zookeepers(zookeeper.zookeepers()) hookenv.log('Sent Kafka configuration to client')
def update_config(zk): """Configure ready zookeepers and restart kafka if needed. Also restart if network_interface has changed. As zks come and go, server.properties will be updated. When that file changes, restart Kafka and set appropriate status messages. """ hookenv.log('Checking Zookeeper configuration') kafka = Kafka() zks = zk.zookeepers() network_interface = hookenv.config().get('network_interface') kafka.configure_kafka(zks, network_interface) server_cfg = DistConfig().path('kafka_conf') / 'server.properties' if any_file_changed([server_cfg]): hookenv.status_set('maintenance', 'Server config changed: restarting Kafka') hookenv.log('Server config changed: restarting Kafka') kafka.restart() hookenv.status_set('active', 'Ready')
def serve_client(client, zookeeper): kafka_port = DistConfig().port('kafka') client.send_port(kafka_port) client.send_zookeepers(zookeeper.zookeepers()) hookenv.log('Sent Kafka configuration to client')
def __init__(self, dist_config=None): self._dist_config = dist_config or DistConfig( data=layer.options('apache-bigtop-base')) self._roles = ['zookeeper-server', 'zookeeper-client'] self._hosts = {}
def get_dist_config(): return DistConfig(data=layer.options('hadoop-client'))
def get_dist_config(keys): from jujubigdata.utils import DistConfig if not getattr(get_dist_config, 'value', None): get_dist_config.value = DistConfig(filename='dist.yaml', required_keys=keys) return get_dist_config.value
def client_present(client): dist = DistConfig(data=layer.options("livy")) rest_port = dist.port("livy") client.send_rest_port(rest_port) client.set_spark_started()
def client_present(client): dist = DistConfig(data=layer.options('livy')) rest_port = dist.port('livy') client.send_rest_port(rest_port) client.set_spark_started()
def serve_client(kafka_client, zookeeper): kafka_port = DistConfig().port('kafka') kafka_client.send_port(kafka_port) kafka_client.send_zookeepers(zookeeper.get_zookeeper_units()) hookenv.log('Sending configuration to client')
def serve_client(client): config = DistConfig() port = config.port('zookeeper') rest_port = config.port('zookeeper-rest') client.send_port(port, rest_port)