def serve_client(client):
    config = DistConfig()
    port = config.port('zookeeper')
    rest_port = config.port('zookeeper-rest')
    host = None
    network_interface = hookenv.config().get('network_interface')
    if network_interface:
        host = get_ip_for_interface(network_interface)
    client.send_connection(port, rest_port, host)
Exemplo n.º 2
0
def serve_client(client):
    config = DistConfig()
    port = config.port('zookeeper')
    rest_port = config.port('zookeeper-rest')
    host = None
    network_interface = hookenv.config().get('network_interface')
    if network_interface:
        host = get_ip_for_interface(network_interface)
    client.send_connection(port, rest_port, host)
Exemplo n.º 3
0
def stop_spark():
    hookenv.status_set('maintenance', 'Stopping Livy REST server')
    dist = DistConfig(data=layer.options('livy'))
    livy = Livy(dist)
    livy.close_ports()
    livy.stop()
    remove_state('livy.started')

    hookenv.status_set('maintenance', 'Stopping Apache Spark')
    dist = DistConfig(data=layer.options('apache-spark'))
    spark = Spark(dist)
    spark.close_ports()
    spark.stop()
    remove_state('spark.started')
Exemplo n.º 4
0
def reconfigure_kafka_zk_instances_leaving(zkdeparting, zkavailable):
    try:
        zk_units = zkavailable.get_zookeeper_units()
        hookenv.status_set('maintenance', 'Updating Kafka with departing Zookeeper instances ')
        kafka = Kafka(DistConfig())
        kafka.configure_kafka(zk_units)
        kafka.restart()
        zkdeparting.dismiss_departing()
        hookenv.status_set('active', 'Ready')
    except:
        hookenv.log("Relation with Zookeeper not established. Stopping Kafka.")
        kafka = Kafka(DistConfig())
        kafka.stop()
        remove_state('kafka.started')
        hookenv.status_set('blocked', 'Waiting for connection to Zookeeper')
Exemplo n.º 5
0
def restart_zookeeper_if_config_changed():
    """Restart Zookeeper if zoo.cfg has changed.

    As peers come and go, zoo.cfg will be updated. When that file changes,
    restart the Zookeeper service and set an appropriate status message.
    """

    # Possibly update bind address
    network_interface = hookenv.config().get('network_interface')
    if data_changed("zookeeper.bind_address", network_interface):
        zk = Zookeeper()
        zk.update_bind_address()

    zoo_cfg = DistConfig().path('zookeeper_conf') / 'zoo.cfg'
    if any_file_changed([zoo_cfg]):
        hookenv.status_set('maintenance',
                           'Server config changed: restarting Zookeeper')
        zk = Zookeeper()
        zk.stop()
        zk.start()
        zk_count = int(zk.get_zk_count())
        extra_status = ""
        if zk_count < 3:
            extra_status = ": less than 3 is suboptimal"
        elif (zk_count % 2 == 0):
            extra_status = ": even number is suboptimal"
        hookenv.status_set('active',
                           'Ready (%d zk units%s)' % (zk_count, extra_status))
    else:
        # Make sure zookeeper is running in any case
        zk = Zookeeper()
        zk.start()
        zk.open_ports()
Exemplo n.º 6
0
def install_livy(hadoop):  # pylint: disable=w0613
    dist = DistConfig(data=layer.options('livy'))
    livy = Livy(dist)
    if livy.verify_resources():
        hookenv.status_set('maintenance', 'Installing Livy REST server')
        livy.install()
        set_state('livy.installed')
Exemplo n.º 7
0
def get_dist_config():
    from jujubigdata.utils import DistConfig  # no available until after bootstrap

    if not getattr(get_dist_config, 'value', None):
        zeppelin_reqs = ['vendor', 'packages', 'dirs', 'ports']
        get_dist_config.value = DistConfig(filename='dist.yaml', required_keys=zeppelin_reqs)
    return get_dist_config.value
Exemplo n.º 8
0
def install_kafka(*args):

    kafka = Kafka(DistConfig())
    if kafka.verify_resources():
        hookenv.status_set('maintenance', 'Installing Kafka')
        kafka.install()
        kafka.open_ports()
        set_state('kafka.installed')
Exemplo n.º 9
0
def start_spark(hadoop):  # pylint: disable=w0613
    hookenv.status_set('maintenance', 'Setting up Apache Spark')
    dist = DistConfig(data=layer.options('apache-spark'))
    spark = Spark(dist)
    spark.configure()
    spark.start()
    spark.open_ports()
    set_state('spark.started')
Exemplo n.º 10
0
 def get_dist_config(required_keys=None):
     required_keys = required_keys or [
         'vendor', 'hadoop_version', 'packages', 'groups', 'users', 'dirs',
         'ports'
     ]
     dist = DistConfig(filename='dist.yaml', required_keys=required_keys)
     opts = layer.options('apache-bigtop-base')
     for key in ('hadoop_version', ):
         if key in opts:
             dist.dist_config[key] = opts[key]
     for key in ('packages', 'groups'):
         if key in opts:
             dist.dist_config[key] = list(
                 set(dist.dist_config[key]) | set(opts[key]))
     for key in ('users', 'dirs', 'ports'):
         if key in opts:
             dist.dist_config[key].update(opts[key])
     return dist
Exemplo n.º 11
0
def install_spark(hadoop):  # pylint: disable=w0613
    dist = DistConfig(data=layer.options('apache-spark'))
    spark = Spark(dist)
    if spark.verify_resources():
        hookenv.status_set('maintenance', 'Installing Apache Spark')
        spark.install()
        spark.setup_spark_config()
        spark.install_demo()
        set_state('spark.installed')
Exemplo n.º 12
0
def start_livy(hadoop):  # pylint: disable=w0613
    hookenv.status_set('maintenance', 'Setting up Livy REST server')
    dist = DistConfig(data=layer.options('livy'))
    livy = Livy(dist)
    mode = hookenv.config()['spark_execution_mode']
    livy.configure(mode)
    livy.start()
    livy.open_ports()
    set_state('livy.started')
    hookenv.status_set('active', 'Ready')
Exemplo n.º 13
0
def get_dist_config(required_keys=None):
    required_keys = required_keys or [
        'vendor', 'hadoop_version', 'packages',
        'groups', 'users', 'dirs', 'ports']
    dist = DistConfig(filename='dist.yaml',
                      required_keys=required_keys)
    opts = layer.options('hadoop-base')
    for key in ('hadoop_version',):
        if key in opts:
            dist.dist_config[key] = opts[key]
    for key in ('packages', 'groups'):
        if key in opts:
            dist.dist_config[key] = list(
                set(dist.dist_config[key]) | set(opts[key])
            )
    for key in ('users', 'dirs', 'ports'):
        if key in opts:
            dist.dist_config[key].update(opts[key])
    return dist
Exemplo n.º 14
0
def reconfigure_kafka_new_zk_instances(zkjoining, zkavailable):
    try:
        zk_units = zkavailable.get_zookeeper_units()
        hookenv.status_set('maintenance', 'Updating Kafka with new Zookeeper instances')
        kafka = Kafka(DistConfig())
        kafka.configure_kafka(zk_units)
        kafka.restart()
        zkjoining.dismiss_joining()
        hookenv.status_set('active', 'Ready')
    except:
        hookenv.log("Relation with Zookeeper not established")
Exemplo n.º 15
0
def reconfigure_spark(hadoop):  # pylint: disable=w0613
    config = hookenv.config()
    if not data_changed('configuration', config):
        return

    hookenv.status_set('maintenance',
                       'Configuring Apache Spark and Livy REST server')
    dist = DistConfig(data=layer.options('apache-spark'))
    spark = Spark(dist)
    dist = DistConfig(data=layer.options('livy'))
    livy = Livy(dist)

    livy.stop()
    spark.stop()
    spark.configure()
    mode = hookenv.config()['spark_execution_mode']
    livy.configure(mode)
    spark.start()
    livy.start()
    hookenv.status_set('active', 'Ready')
Exemplo n.º 16
0
def configure_kafka(zkjoining, zkavailable):
    try:
        zk_units = zkavailable.get_zookeeper_units()
        hookenv.status_set('maintenance', 'Setting up Kafka')
        kafka = Kafka(DistConfig())
        kafka.configure_kafka(zk_units)
        kafka.start()
        zkjoining.dismiss_joining()
        hookenv.status_set('active', 'Ready')
        set_state('kafka.started')
    except:
        hookenv.log("Relation with Zookeeper not established")
Exemplo n.º 17
0
def serve_client(client, zookeeper):
    kafka_port = DistConfig().port('kafka')
    host = hookenv.config().get('hostname')
    if not host:
        # If we've attempted to bind to a spefic ip address, and we
        # haven't set a hostname that will resolve to that ip, just
        # send the ip address along.
        network_interface = hookenv.config().get('network_interface')
        if network_interface:
            host = get_ip_for_interface(network_interface)

    client.send_connection(kafka_port, host=host)
    client.send_zookeepers(zookeeper.zookeepers())
    hookenv.log('Sent Kafka configuration to client')
Exemplo n.º 18
0
def update_config(zk):
    """Configure ready zookeepers and restart kafka if needed.

    Also restart if network_interface has changed.

    As zks come and go, server.properties will be updated. When that file
    changes, restart Kafka and set appropriate status messages.
    """
    hookenv.log('Checking Zookeeper configuration')
    kafka = Kafka()
    zks = zk.zookeepers()
    network_interface = hookenv.config().get('network_interface')
    kafka.configure_kafka(zks, network_interface)

    server_cfg = DistConfig().path('kafka_conf') / 'server.properties'
    if any_file_changed([server_cfg]):
        hookenv.status_set('maintenance', 'Server config changed: restarting Kafka')
        hookenv.log('Server config changed: restarting Kafka')
        kafka.restart()
        hookenv.status_set('active', 'Ready')
Exemplo n.º 19
0
def serve_client(client, zookeeper):
    kafka_port = DistConfig().port('kafka')
    client.send_port(kafka_port)
    client.send_zookeepers(zookeeper.zookeepers())
    hookenv.log('Sent Kafka configuration to client')
Exemplo n.º 20
0
    def __init__(self, dist_config=None):
        self._dist_config = dist_config or DistConfig(
            data=layer.options('apache-bigtop-base'))

        self._roles = ['zookeeper-server', 'zookeeper-client']
        self._hosts = {}
def get_dist_config():
    return DistConfig(data=layer.options('hadoop-client'))
Exemplo n.º 22
0
def get_dist_config(keys):
    from jujubigdata.utils import DistConfig

    if not getattr(get_dist_config, 'value', None):
        get_dist_config.value = DistConfig(filename='dist.yaml', required_keys=keys)
    return get_dist_config.value
Exemplo n.º 23
0
def client_present(client):
    dist = DistConfig(data=layer.options("livy"))
    rest_port = dist.port("livy")
    client.send_rest_port(rest_port)
    client.set_spark_started()
Exemplo n.º 24
0
def client_present(client):
    dist = DistConfig(data=layer.options('livy'))
    rest_port = dist.port('livy')
    client.send_rest_port(rest_port)
    client.set_spark_started()
Exemplo n.º 25
0
def serve_client(kafka_client, zookeeper):
    kafka_port = DistConfig().port('kafka')
    kafka_client.send_port(kafka_port)
    kafka_client.send_zookeepers(zookeeper.get_zookeeper_units())
    hookenv.log('Sending configuration to client')
Exemplo n.º 26
0
def serve_client(client):
    config = DistConfig()
    port = config.port('zookeeper')
    rest_port = config.port('zookeeper-rest')
    client.send_port(port, rest_port)