Beispiel #1
0
 def _url(self, *parts):
     dc = utils.DistConfig(
         data=layer.options('apache-bigtop-base'))
     url = 'http://localhost:{}/api/'.format(dc.port('zeppelin'))
     for part in parts:
         url = urljoin(url, part)
     return url
Beispiel #2
0
 def __init__(self, dist_config=None):
     self.dist_config = dist_config or utils.DistConfig()
     self.resources = {
         'kafka': 'kafka-%s' % utils.cpu_arch(),
     }
     self.verify_resources = utils.verify_resources(
         *self.resources.values())
def install_deployer():
    # Create user and configuration dir
    distconfig = utils.DistConfig(filename=charm_dir() + '/files/setup.yaml')
    distconfig.add_users()
    distconfig.add_dirs()
    # General deployer options
    deployers_path = '/home/kubedeployer/.config/kubedeployers'
    deployer_path = deployers_path + '/' + os.environ[
        'JUJU_UNIT_NAME'].replace('/', '-')
    # Save then in the kv store
    namespace_selector = 'ns'
    unitdata.kv().set('deployers_path', deployers_path)
    unitdata.kv().set('deployer_path', deployer_path)
    unitdata.kv().set('juju_app_selector', 'juju-app')
    unitdata.kv().set('deployer_selector', 'deployer')
    unitdata.kv().set('namespace_selector', namespace_selector)
    # Setup dir structure
    log('Setting up deployer dirs in: ' + deployer_path)
    global_dirs = ['namespaces', 'network-policies']
    for gd in global_dirs:
        if not os.path.exists(deployers_path + '/' + gd):
            os.makedirs(deployers_path + '/' + gd)
    dirs = ['resources']
    for d in dirs:
        if not os.path.exists(deployer_path + '/' + d):
            os.makedirs(deployer_path + '/' + d)
    # Setup the default namespace
    add_label_to_resource('default', namespace_selector + '=default',
                          'namespace', 'default', True)
    set_flag('deployer.installed')
Beispiel #4
0
 def __init__(self, dist_config=None, user='******'):
     self.user = user
     self.dist_config = dist_config or utils.DistConfig()
     self.resources = {
         'flume': 'flume-%s' % utils.cpu_arch(),
     }
     self.verify_resources = utils.verify_resources(
         *self.resources.values())
 def __init__(self):
     self.dist_config = utils.DistConfig(
         data=layer.options('hadoop-client'))
Beispiel #6
0
 def __init__(self):
     self.dist_config = utils.DistConfig(
         data=layer.options('apache-bigtop-base'))
Beispiel #7
0
 def __init__(self, dist_config=None):
     self.dist_config = dist_config or utils.DistConfig()
def get_layer_opts():
    return utils.DistConfig(data=layer.options('apache-bigtop-base'))
Beispiel #9
0
 def __init__(self, dist_config=None):
     self.dist_config = dist_config or utils.DistConfig()
     self.resources = {
         'zeppelin': 'zeppelin-%s' % utils.cpu_arch(),
     }
Beispiel #10
0
 def _url(self, *parts):
     dc = utils.DistConfig()
     url = 'http://localhost:{}/api/'.format(dc.port('zeppelin'))
     for part in parts:
         url = urljoin(url, part)
     return url
Beispiel #11
0
def install_kafka():
    status.maintenance('Installing Kafka')

    # Check if mimimum amount of brokers are available
    min_brokers = config().get('broker-count')
    broker_count = 1
    if min_brokers > 1 and is_flag_set('endpoint.broker.joined'):
        kafka_peers = endpoint_from_flag('endpoint.broker.joined')
        broker_count = kafka_peers.kafka_broker_count()

    if broker_count != min_brokers:
        status.blocked(
            "Waiting for {} units to start bootstrapping.".format(min_brokers))
        return

    # Install Java
    status.maintenance('Installing Java')
    install_java()

    # Unpack Kafka files and setup user/group
    status.maintenance('Unpacking Kafka files')
    filename = resource_get('apache-kafka')
    filepath = filename and Path(filename)
    if filepath and filepath.exists() and filepath.stat().st_size:
        tar = tarfile.open(filepath, "r:gz")
        tar.extractall("/usr/lib")
        tar.close()

    distconfig = utils.DistConfig("{}/files/setup.yaml".format(charm_dir()))
    distconfig.add_users()
    distconfig.add_dirs()

    if not os.path.exists('/usr/lib/kafka'):
        # Assumes that there is only 1 kafka_* dir
        kafka_dir = glob.glob('/usr/lib/kafka_*')[0]
        os.symlink(kafka_dir, '/usr/lib/kafka')

    if not os.path.exists('/usr/lib/kafka/logs'):
        os.makedirs('/usr/lib/kafka/logs')
        os.symlink('/usr/lib/kafka/logs', '/var/log/kafka')
        os.chmod('/var/log/kafka', 0o775)
        shutil.chown('/var/log/kafka', user='******', group='kafka')

    # Create server.properties
    status.maintenance('Creating Kafka config')
    zookeepers = endpoint_from_flag('zookeeper.ready')
    zoo_brokers = []
    for zoo in zookeepers.zookeepers():
        zoo_brokers.append("{}:{}".format(zoo['host'], zoo['port']))

    render(source="server.properties.j2",
           target='/usr/lib/kafka/config/server.properties',
           context={
               'broker_count': min_brokers,
               'transaction_min_isr':
               1 if min_brokers == 1 else min_brokers - 1,
               'zookeeper_brokers': ",".join(zoo_brokers),
           })

    # Create systemd service
    render(source='kafka.service.j2',
           target='/etc/systemd/system/kafka.service',
           context={
               'java_home': java_home(),
               'jmx': 1 if config().get('enable-jmx') else 0,
           })

    # Start systemd service
    status.maintenance('Starting Kafka services')
    try:
        check_call(['systemctl', 'daemon-reload'])
        check_call(['systemctl', 'start', 'kafka.service'])
        check_call(['systemctl', 'enable', 'kafka.service'])
    except CalledProcessError as e:
        log(e)
        status.blocked('Could not start Kafka services')
        return

    open_port(9092)
    if config().get('enable-jmx'):
        open_port(9999)
    status.active('Ready')
    set_flag('kafka.installed')