def setup_kafka_config(self): ''' copy the default configuration files to kafka_conf property defined in dist.yaml ''' default_conf = self.dist_config.path('kafka') / 'config' kafka_conf = self.dist_config.path('kafka_conf') kafka_conf.rmtree_p() default_conf.copytree(kafka_conf) # Now remove the conf included in the tarball and symlink our real conf # dir. we've seen issues where kafka still looks for config in # KAFKA_HOME/config. default_conf.rmtree_p() kafka_conf.symlink(default_conf) # Configure immutable bits kafka_bin = self.dist_config.path('kafka') / 'bin' with utils.environment_edit_in_place('/etc/environment') as env: if kafka_bin not in env['PATH']: env['PATH'] = ':'.join([env['PATH'], kafka_bin]) env['LOG_DIR'] = self.dist_config.path('kafka_app_logs') # note: we set the advertised.host.name below to the public_address # to ensure that external (non-Juju) clients can connect to Kafka public_address = hookenv.unit_get('public-address') private_ip = utils.resolve_private_address(hookenv.unit_get('private-address')) kafka_server_conf = self.dist_config.path('kafka_conf') / 'server.properties' service, unit_num = os.environ['JUJU_UNIT_NAME'].split('/', 1) utils.re_edit_in_place(kafka_server_conf, { r'^broker.id=.*': 'broker.id=%s' % unit_num, r'^port=.*': 'port=%s' % self.dist_config.port('kafka'), r'^log.dirs=.*': 'log.dirs=%s' % self.dist_config.path('kafka_data_logs'), r'^#?advertised.host.name=.*': 'advertised.host.name=%s' % public_address, }) kafka_log4j = self.dist_config.path('kafka_conf') / 'log4j.properties' utils.re_edit_in_place(kafka_log4j, { r'^kafka.logs.dir=.*': 'kafka.logs.dir=%s' % self.dist_config.path('kafka_app_logs'), }) # fix for lxc containers and some corner cases in manual provider # ensure that public_address is resolvable internally by mapping it to the private IP utils.update_etc_hosts({private_ip: public_address}) templating.render( 'upstart.conf', '/etc/init/kafka.conf', context={ 'kafka_conf': self.dist_config.path('kafka_conf'), 'kafka_bin': '{}/bin'.format(self.dist_config.path('kafka')) }, )
def configure_hosts_file(self): """ Add the unit's private-address to /etc/hosts to ensure that Java can resolve the hostname of the server to its real IP address. We derive our hostname from the unit_id, replacing / with -. """ local_ip = utils.resolve_private_address(hookenv.unit_get('private-address')) hostname = hookenv.local_unit().replace('/', '-') utils.update_etc_hosts({local_ip: hostname}) # update name of host to more semantically meaningful value # (this is required on some providers; the /etc/hosts entry must match # the /etc/hostname lest Hadoop get confused about where certain things # should be run) etc_hostname = Path('/etc/hostname') etc_hostname.write_text(hostname) check_call(['hostname', '-F', etc_hostname])
def setup_flume_config(self): ''' copy the default configuration files to flume_conf property defined in dist.yaml ''' default_conf = self.dist_config.path('flume') / 'conf' flume_conf = self.dist_config.path('flume_conf') flume_conf.rmtree_p() default_conf.copytree(flume_conf) # Now remove the conf included in the tarball and symlink our real conf default_conf.rmtree_p() flume_conf.symlink(default_conf) flume_env = self.dist_config.path('flume_conf') / 'flume-env.sh' if not flume_env.exists(): (self.dist_config.path('flume_conf') / 'flume-env.sh.template').copy(flume_env) utils.re_edit_in_place( flume_env, { r'.*FLUME_CLASSPATH.*': 'FLUME_CLASSPATH={}/*'.format( self.dist_config.path('zookeeper')), }) flume_conf = self.dist_config.path('flume_conf') / 'flume.conf' if not flume_conf.exists(): (self.dist_config.path('flume_conf') / 'flume-conf.properties.template').copy(flume_conf) flume_log4j = self.dist_config.path('flume_conf') / 'log4j.properties' utils.re_edit_in_place( flume_log4j, { r'^flume.log.dir.*': 'flume.log.dir={}'.format(self.dist_config.path('flume_logs')), }) # fix for lxc containers and some corner cases in manual provider utils.update_etc_hosts({hookenv.unit_private_ip(): hostname()}) templating.render( 'upstart.conf', '/etc/init/flume.conf', context={ 'flume': self.dist_config.path('flume'), 'flume_conf': self.dist_config.path('flume_conf') }, )
def setup_flume_config(self): ''' copy the default configuration files to flume_conf property defined in dist.yaml ''' default_conf = self.dist_config.path('flume') / 'conf' flume_conf = self.dist_config.path('flume_conf') flume_conf.rmtree_p() default_conf.copytree(flume_conf) # Now remove the conf included in the tarball and symlink our real conf default_conf.rmtree_p() flume_conf.symlink(default_conf) flume_env = self.dist_config.path('flume_conf') / 'flume-env.sh' if not flume_env.exists(): (self.dist_config.path('flume_conf') / 'flume-env.sh.template').copy(flume_env) utils.re_edit_in_place(flume_env, { r'.*FLUME_CLASSPATH.*': 'FLUME_CLASSPATH={}/*'.format(self.dist_config.path('zookeeper')), }) flume_conf = self.dist_config.path('flume_conf') / 'flume.conf' if not flume_conf.exists(): (self.dist_config.path('flume_conf') / 'flume-conf.properties.template').copy(flume_conf) flume_log4j = self.dist_config.path('flume_conf') / 'log4j.properties' utils.re_edit_in_place(flume_log4j, { r'^flume.log.dir.*': 'flume.log.dir={}'.format(self.dist_config.path('flume_logs')), }) # fix for lxc containers and some corner cases in manual provider utils.update_etc_hosts({hookenv.unit_private_ip():hostname()}) templating.render( 'upstart.conf', '/etc/init/flume.conf', context={ 'flume': self.dist_config.path('flume'), 'flume_conf': self.dist_config.path('flume_conf') }, )
def setup_kafka_config(self): ''' copy the default configuration files to kafka_conf property defined in dist.yaml ''' default_conf = self.dist_config.path('kafka') / 'config' kafka_conf = self.dist_config.path('kafka_conf') kafka_conf.rmtree_p() default_conf.copytree(kafka_conf) # Now remove the conf included in the tarball and symlink our real conf # dir. we've seen issues where kafka still looks for config in # KAFKA_HOME/config. default_conf.rmtree_p() kafka_conf.symlink(default_conf) # Configure immutable bits kafka_bin = self.dist_config.path('kafka') / 'bin' with utils.environment_edit_in_place('/etc/environment') as env: if kafka_bin not in env['PATH']: env['PATH'] = ':'.join([env['PATH'], kafka_bin]) env['LOG_DIR'] = self.dist_config.path('kafka_app_logs') # note: we set the advertised.host.name below to the public_address # to ensure that external (non-Juju) clients can connect to Kafka public_address = hookenv.unit_get('public-address') private_ip = utils.resolve_private_address( hookenv.unit_get('private-address')) kafka_server_conf = self.dist_config.path( 'kafka_conf') / 'server.properties' service, unit_num = os.environ['JUJU_UNIT_NAME'].split('/', 1) utils.re_edit_in_place( kafka_server_conf, { r'^broker.id=.*': 'broker.id=%s' % unit_num, r'^port=.*': 'port=%s' % self.dist_config.port('kafka'), r'^log.dirs=.*': 'log.dirs=%s' % self.dist_config.path('kafka_data_logs'), r'^#?advertised.host.name=.*': 'advertised.host.name=%s' % public_address, }) kafka_log4j = self.dist_config.path('kafka_conf') / 'log4j.properties' utils.re_edit_in_place( kafka_log4j, { r'^kafka.logs.dir=.*': 'kafka.logs.dir=%s' % self.dist_config.path('kafka_app_logs'), }) # fix for lxc containers and some corner cases in manual provider # ensure that public_address is resolvable internally by mapping it to the private IP utils.update_etc_hosts({private_ip: public_address}) templating.render( 'upstart.conf', '/etc/init/kafka.conf', context={ 'kafka_conf': self.dist_config.path('kafka_conf'), 'kafka_bin': '{}/bin'.format(self.dist_config.path('kafka')) }, )