def configure_spark(spark): master_url = spark.get_master_url() if data_changed('spark.master', master_url): hookenv.status_set('maintenance', 'configuring spark') zeppelin = Zeppelin() zeppelin.configure_spark(master_url) set_state('zeppelin.spark.configured') update_status()
def register_notebook(client): zeppelin = Zeppelin() for notebook in client.unregistered_notebooks(): notebook_md5 = hashlib.md5(notebook.encode('utf8')).hexdigest() if zeppelin.register_notebook(notebook_md5, notebook): client.accept_notebook(notebook) else: client.reject_notebook(notebook)
def configure_spark(spark): master_url = spark.get_master_url() if data_changed("spark.master", master_url): hookenv.status_set("maintenance", "configuring spark") zeppelin = Zeppelin() zeppelin.configure_spark(master_url) set_state("zeppelin.spark.configured") update_status()
def register_notebook(client): zeppelin = Zeppelin() for notebook in client.unregistered_notebooks(): notebook_md5 = hashlib.md5(notebook.encode("utf8")).hexdigest() if zeppelin.register_notebook(notebook_md5, notebook): client.accept_notebook(notebook) else: client.reject_notebook(notebook)
def unconfigure_spark(): hookenv.status_set('maintenance', 'removing spark relation') zeppelin = Zeppelin() # Yarn / Hadoop may not actually be available, but that is the default # value and nothing else would reasonably work here either without Spark. zeppelin.configure_spark('yarn-client') data_changed('spark.master', 'yarn-client') # ensure updated if re-added remove_state('zeppelin.spark.configured') update_status()
def unconfigure_spark(): hookenv.status_set("maintenance", "removing spark relation") zeppelin = Zeppelin() # Yarn / Hadoop may not actually be available, but that is the default # value and nothing else would reasonably work here either without Spark. zeppelin.configure_spark("yarn-client") data_changed("spark.master", "yarn-client") # ensure updated if re-added remove_state("zeppelin.spark.configured") update_status()
def initial_setup(): hookenv.status_set('maintenance', 'installing zeppelin') zeppelin = Zeppelin() zeppelin.install() zeppelin.open_ports() set_state('zeppelin.installed') update_status() # set app version string for juju status output zeppelin_version = get_package_version('zeppelin') or 'unknown' hookenv.application_version_set(zeppelin_version)
def configure_hive(hive): hive_ip = hive.get_private_ip() hive_port = hive.get_port() hive_url = "jdbc:hive2://%s:%s" % (hive_ip, hive_port) if data_changed("hive.connect", hive_url): hookenv.status_set("maintenance", "configuring hive") zeppelin = Zeppelin() zeppelin.configure_hive(hive_url) set_state("zeppelin.hive.configured") update_status()
def configure_hive(hive): hive_ip = hive.get_private_ip() hive_port = hive.get_port() hive_url = 'jdbc:hive2://%s:%s' % (hive_ip, hive_port) if data_changed('hive.connect', hive_url): hookenv.status_set('maintenance', 'configuring hive') zeppelin = Zeppelin() zeppelin.configure_hive(hive_url) set_state('zeppelin.hive.configured') update_status()
def unconfigure_spark(): ''' Remove remote Spark; reconfigure Zeppelin to use embedded Spark. ''' hookenv.status_set('maintenance', 'removing spark relation') zeppelin = Zeppelin() # Zepp includes the spark-client role, so reconfigure our built-in spark # if our related spark has gone away. if is_state('zeppelin.hadoop.configured'): local_master = 'yarn-client' else: local_master = 'local[*]' zeppelin.configure_spark(local_master) data_changed('spark.master', local_master) # ensure updated if re-added remove_state('zeppelin.spark.configured') update_status()
def check_repo_version(): """ Configure a bigtop site.yaml if a new version of zeppelin is available. This method will set unitdata if a different version of zeppelin is available in the newly configured bigtop repo. This unitdata allows us to configure site.yaml while gating the actual puppet apply. The user must do the puppet apply by calling the 'reinstall' action. """ repo_ver = Bigtop().check_bigtop_repo_package('zeppelin') if repo_ver: unitdata.kv().set('zeppelin.version.repo', repo_ver) unitdata.kv().flush(True) zeppelin = Zeppelin() zeppelin.trigger_bigtop() else: unitdata.kv().unset('zeppelin.version.repo') update_status()
def initial_setup(): hookenv.status_set('maintenance', 'installing zeppelin') zeppelin = Zeppelin() zeppelin.install() zeppelin.setup_etc_env() zeppelin.open_ports() set_state('zeppelin.installed') update_status() # set app version string for juju status output zeppelin_version = get_package_version('zeppelin') or 'unknown' hookenv.application_version_set(zeppelin_version)
def initial_setup(): hookenv.status_set("maintenance", "installing zeppelin") zeppelin = Zeppelin() zeppelin.install() zeppelin.setup_etc_env() zeppelin.open_ports() set_state("zeppelin.installed") update_status() # set app version string for juju status output zeppelin_version = get_package_version("zeppelin") or "unknown" hookenv.application_version_set(zeppelin_version)
def configure_spark(spark): ''' Configure Zeppelin to use remote Spark resources. ''' # NB: Use the master_url string if it already starts with spark://. # Otherwise, it means the remote spark is in local or yarn mode -- that's # bad because using 'local' or 'yarn' here would cause zepp's spark-submit # to use the builtin spark, hence ignoring the remote spark. In this case, # set a state so we can inform the user that the remote spark is unusable. master_url = spark.get_master_url() if master_url.startswith('spark'): remove_state('spark.master.unusable') # Only (re)configure if our master url has changed. if data_changed('spark.master', master_url): hookenv.status_set('maintenance', 'configuring spark') zeppelin = Zeppelin() zeppelin.configure_spark(master_url) set_state('zeppelin.spark.configured') else: remove_state('zeppelin.spark.configured') set_state('spark.master.unusable') update_status()
def unconfigure_hadoop(): zeppelin = Zeppelin() zeppelin.remove_hadoop_notebooks() remove_state("zeppelin.hadoop.configured")
def unconfigure_hive(): hookenv.status_set('maintenance', 'removing hive relation') zeppelin = Zeppelin() zeppelin.configure_hive('jdbc:hive2://:') remove_state('zeppelin.hive.configured') update_status()
def configure_hadoop(hadoop): zeppelin = Zeppelin() zeppelin.configure_hadoop() zeppelin.register_hadoop_notebooks() set_state("zeppelin.hadoop.configured")
def remove_notebook(client): zeppelin = Zeppelin() for notebook in client.unremoved_notebooks(): notebook_md5 = hashlib.md5(notebook.encode("utf8")).hexdigest() zeppelin.remove_notebook(notebook_md5) client.remove_notebook(notebook)
def remove_notebook(client): zeppelin = Zeppelin() for notebook in client.unremoved_notebooks(): notebook_md5 = hashlib.md5(notebook.encode('utf8')).hexdigest() zeppelin.remove_notebook(notebook_md5) client.remove_notebook(notebook)
def configure_hadoop(hadoop): zeppelin = Zeppelin() zeppelin.configure_hadoop() zeppelin.register_hadoop_notebooks() set_state('zeppelin.hadoop.configured')
def unconfigure_hadoop(): zeppelin = Zeppelin() zeppelin.remove_hadoop_notebooks() remove_state('zeppelin.hadoop.configured')
def unconfigure_hive(): hookenv.status_set("maintenance", "removing hive relation") zeppelin = Zeppelin() zeppelin.configure_hive("jdbc:hive2://:") remove_state("zeppelin.hive.configured") update_status()