def install_zookeeper(): ''' After Bigtop has done the initial setup, trigger a puppet install, via our Zooekeeper library. puppet will start the service, as a side effect. ''' hookenv.status_set('maintenance', 'installing zookeeper') zookeeper = Zookeeper() # Prime data changed data_changed('zkpeer.nodes', zookeeper.read_peers()) data_changed('zk.network_interface', hookenv.config().get('network_interface')) data_changed('zk.autopurge_purge_interval', hookenv.config().get('autopurge_purge_interval')) data_changed('zk.autopurge_snap_retain_count', hookenv.config().get('autopurge_snap_retain_count')) zookeeper.install() zookeeper.open_ports() set_state('zookeeper.installed') set_state('zookeeper.started') hookenv.status_set('active', 'ready {}'.format(zookeeper.quorum_check())) # set app version string for juju status output zoo_version = get_package_version('zookeeper') or 'unknown' hookenv.application_version_set(zoo_version)
def install_zookeeper(): ''' After Bigtop has done the initial setup, trigger a puppet install, via our Zooekeeper library. puppet will start the service, as a side effect. ''' hookenv.status_set('maintenance', 'installing zookeeper') zookeeper = Zookeeper() # Prime data changed data_changed('zkpeer.nodes', zookeeper.read_peers()) data_changed( 'zk.network_interface', hookenv.config().get('network_interface')) data_changed( 'zk.autopurge_purge_interval', hookenv.config().get('autopurge_purge_interval')) data_changed( 'zk.autopurge_snap_retain_count', hookenv.config().get('autopurge_snap_retain_count')) zookeeper.install() zookeeper.open_ports() set_state('zookeeper.installed') set_state('zookeeper.started') hookenv.status_set('active', 'ready {}'.format(zookeeper.quorum_check())) # set app version string for juju status output zoo_version = get_package_version('zookeeper') or 'unknown' hookenv.application_version_set(zoo_version)
def set_deployment_mode_state(state): if is_state('spark.yarn.installed'): remove_state('spark.standalone.installed') if is_state('spark.standalone.installed'): remove_state('spark.yarn.installed') set_state(state) # set app version string for juju status output spark_version = get_package_version('spark-core') or 'unknown' hookenv.application_version_set(spark_version)
def initial_setup(): hookenv.status_set('maintenance', 'installing zeppelin') zeppelin = Zeppelin() zeppelin.install() zeppelin.open_ports() set_state('zeppelin.installed') update_status() # set app version string for juju status output zeppelin_version = get_package_version('zeppelin') or 'unknown' hookenv.application_version_set(zeppelin_version)
def initial_setup(): hookenv.status_set("maintenance", "installing zeppelin") zeppelin = Zeppelin() zeppelin.install() zeppelin.setup_etc_env() zeppelin.open_ports() set_state("zeppelin.installed") update_status() # set app version string for juju status output zeppelin_version = get_package_version("zeppelin") or "unknown" hookenv.application_version_set(zeppelin_version)
def test_get_package_version(self, mock_sub): '''Verify expected package version is returned.''' # test empty package name with self.assertRaises(BigtopError): get_package_version('') # test good check_output result mock_sub.check_output.return_value = b'1.2.3' self.assertEqual(get_package_version('foo'), '1.2.3') # test bad check_output result class MockException(Exception): pass MockException.output = "package foo not found" mock_sub.CalledProcessError = MockException def mock_raise(*args, **kwargs): raise MockException('foo!') mock_sub.check_output.side_effect = mock_raise self.assertEqual(get_package_version('foo'), '')
def install_hbase(hdfs, zk): ''' Anytime our dependencies are available, check to see if we have a valid reason to (re)install. These include: - initial install - config change - Zookeeper unit has joined/departed ''' zks = zk.zookeepers() deployment_matrix = { 'zookeepers': zks, } # Handle nuances when installing versus re-installing if not is_state('hbase.installed'): prefix = "installing" # On initial install, prime our kv with the current deployment matrix. # Subsequent calls will use this to determine if a reinstall is needed. data_changed('deployment_matrix', deployment_matrix) else: prefix = "configuring" # We do not need to reinstall when peers come and go; that is covered # by other handlers below. if is_state('hbpeer.departed') or is_state('hbpeer.joined'): return # Return if neither config nor our matrix has changed if not (is_state('config.changed') or data_changed('deployment_matrix', deployment_matrix)): return hookenv.status_set('maintenance', '{} hbase'.format(prefix)) hookenv.log("{} hbase with: {}".format(prefix, deployment_matrix)) hbase = HBase() hosts = {} hosts['namenode'] = hdfs.namenodes()[0] hbase.configure(hosts, zks) # Ensure our IP is in the regionservers list; restart if the rs conf # file has changed. hbase.update_regionservers([hookenv.unit_private_ip()]) if any_file_changed(['/etc/hbase/conf/regionservers']): hbase.restart() # set app version string for juju status output hbase_version = get_package_version('hbase-master') or 'unknown' hookenv.application_version_set(hbase_version) hbase.open_ports() report_status() set_state('hbase.installed')
def install_hive(hadoop): ''' Anytime our dependencies are available, check to see if we have a valid reason to (re)install. These include: - initial install - HBase has joined/departed ''' # Hive cannot handle - in the metastore db name and # mysql uses the service name to name the db if "-" in hookenv.service_name(): hookenv.status_set('blocked', "application name may not contain '-'; " "redeploy with a different name") return # Get hbase connection dict if it's available if is_state('hbase.ready'): hbase = RelationBase.from_state('hbase.ready') hbserver = hbase.hbase_servers()[0] else: hbserver = None # Use this to determine if we need to reinstall deployment_matrix = { 'hbase': hbserver, } # Handle nuances when installing versus re-installing if not is_state('hive.installed'): prefix = "installing" # On initial install, prime our kv with the current deployment matrix. # Subsequent calls will use this to determine if a reinstall is needed. data_changed('deployment_matrix', deployment_matrix) else: prefix = "configuring" # Return if our matrix has not changed if not data_changed('deployment_matrix', deployment_matrix): return hookenv.status_set('maintenance', '{} hive'.format(prefix)) hookenv.log("{} hive with: {}".format(prefix, deployment_matrix)) hive = Hive() hive.install(hbase=hbserver) hive.restart() hive.open_ports() set_state('hive.installed') report_status() # set app version string for juju status output hive_version = get_package_version('hive') or 'unknown' hookenv.application_version_set(hive_version)
def configure_kafka(zk): hookenv.status_set('maintenance', 'setting up kafka') data_changed( # Prime data changed for network interface 'kafka.network_interface', hookenv.config().get('network_interface')) kafka = Kafka() zks = zk.zookeepers() kafka.configure_kafka(zks) kafka.open_ports() set_state('kafka.started') hookenv.status_set('active', 'ready') # set app version string for juju status output kafka_version = get_package_version('kafka') or 'unknown' hookenv.application_version_set(kafka_version)
def install_mahout(): hookenv.status_set('maintenance', 'installing mahout') bigtop = Bigtop() bigtop.render_site_yaml(roles=[ 'mahout-client', ], ) bigtop.trigger_puppet() with utils.environment_edit_in_place('/etc/environment') as env: env['MAHOUT_HOME'] = '/usr/lib/mahout' set_state('mahout.installed') hookenv.status_set('active', 'ready') # set app version string for juju status output mahout_version = get_package_version('mahout') or 'unknown' hookenv.application_version_set(mahout_version)
def install_mahout(): hookenv.status_set('maintenance', 'installing mahout') bigtop = Bigtop() bigtop.render_site_yaml( roles=[ 'mahout-client', ], ) bigtop.trigger_puppet() with utils.environment_edit_in_place('/etc/environment') as env: env['MAHOUT_HOME'] = '/usr/lib/mahout' set_state('mahout.installed') hookenv.status_set('active', 'ready') # set app version string for juju status output mahout_version = get_package_version('mahout') or 'unknown' hookenv.application_version_set(mahout_version)
def install_giraph(giraph): """Install giraph when prerequisite states are present.""" hookenv.status_set('maintenance', 'installing giraph') bigtop = Bigtop() bigtop.render_site_yaml( roles=[ 'giraph-client', ], ) bigtop.trigger_puppet() # Put down the -doc subpackage so we get giraph-examples fetch.apt_install('giraph-doc') giraph_home = Path('/usr/lib/giraph') giraph_docdir = Path('/usr/share/doc/giraph') giraph_libdir = Path(giraph_home / 'lib') giraph_examples = glob('{}/giraph-examples-*.jar'.format(giraph_docdir)) # Gather a list of all the giraph jars (needed for -libjars) giraph_jars = giraph_examples giraph_jars.extend(get_good_jars(giraph_home, prefix=True)) giraph_jars.extend(get_good_jars(giraph_libdir, prefix=True)) # Update environment with appropriate giraph bits. HADOOP_CLASSPATH can # use wildcards (and it should for readability), but GIRAPH_JARS, which # is intended to be used as 'hadoop jar -libjars $GIRAPH_JARS', needs to # be a comma-separate list of jars. with utils.environment_edit_in_place('/etc/environment') as env: cur_cp = env['HADOOP_CLASSPATH'] if 'HADOOP_CLASSPATH' in env else "" env['GIRAPH_HOME'] = giraph_home env['HADOOP_CLASSPATH'] = "{examples}/*:{home}/*:{libs}/*:{cp}".format( examples=giraph_docdir, home=giraph_home, libs=giraph_libdir, cp=cur_cp ) env['GIRAPH_JARS'] = ','.join(j for j in giraph_jars) set_state('giraph.installed') report_status() # set app version string for juju status output giraph_version = get_package_version('giraph') or 'unknown' hookenv.application_version_set(giraph_version)
def install_hbase(zk, hdfs): zks = zk.zookeepers() if (is_state('hbase.installed') and (not data_changed('zks', zks))): return msg = "configuring hbase" if is_state('hbase.installed') else "installing hbase" hookenv.status_set('maintenance', msg) hbase = HBase() hosts = {} nns = hdfs.namenodes() hosts['namenode'] = nns[0] hbase.configure(hosts, zks) hbase.open_ports() set_state('hbase.installed') report_status() # set app version string for juju status output hbase_version = get_package_version('hbase-master') or 'unknown' hookenv.application_version_set(hbase_version)
def install_giraph(giraph): """Install giraph when prerequisite states are present.""" hookenv.status_set('maintenance', 'installing giraph') bigtop = Bigtop() bigtop.render_site_yaml(roles=[ 'giraph-client', ], ) bigtop.trigger_puppet() # Put down the -doc subpackage so we get giraph-examples fetch.apt_install('giraph-doc') giraph_home = Path('/usr/lib/giraph') giraph_docdir = Path('/usr/share/doc/giraph') giraph_libdir = Path(giraph_home / 'lib') giraph_examples = glob('{}/giraph-examples-*.jar'.format(giraph_docdir)) # Gather a list of all the giraph jars (needed for -libjars) giraph_jars = giraph_examples giraph_jars.extend(get_good_jars(giraph_home, prefix=True)) giraph_jars.extend(get_good_jars(giraph_libdir, prefix=True)) # Update environment with appropriate giraph bits. HADOOP_CLASSPATH can # use wildcards (and it should for readability), but GIRAPH_JARS, which # is intended to be used as 'hadoop jar -libjars $GIRAPH_JARS', needs to # be a comma-separate list of jars. with utils.environment_edit_in_place('/etc/environment') as env: cur_cp = env['HADOOP_CLASSPATH'] if 'HADOOP_CLASSPATH' in env else "" env['GIRAPH_HOME'] = giraph_home env['HADOOP_CLASSPATH'] = "{examples}/*:{home}/*:{libs}/*:{cp}".format( examples=giraph_docdir, home=giraph_home, libs=giraph_libdir, cp=cur_cp) env['GIRAPH_JARS'] = ','.join(j for j in giraph_jars) set_state('giraph.installed') report_status() # set app version string for juju status output giraph_version = get_package_version('giraph') or 'unknown' hookenv.application_version_set(giraph_version)