def disable_zookeepers():
    hookenv.status_set('maintenance', 'Disabling high availability')
    spark = Spark(get_dist_config())
    spark.stop()
    spark.disable_ha()
    spark.configure()
    spark.start()
    remove_state('zookeeper.configured')
    report_status(spark)
def configure_zookeepers(zk):
    zks = zk.zookeepers()
    if data_changed('available.zookeepers', zks):
        spark = Spark(get_dist_config())
        hookenv.status_set('maintenance', 'Updating Apache Spark HA')
        spark.stop()
        spark.configure_ha(zks)
        spark.configure()
        spark.start()
        set_state('zookeeper.configured')
        report_status(spark)
def install_spark():
    dist = get_dist_config()
    spark = Spark(dist)
    if spark.verify_resources():
        hookenv.status_set('maintenance', 'Installing Apache Spark')
        spark.install()
        spark.setup_spark_config()
        spark.install_demo()
        set_state('spark.installed')
def switch_to_yarn(hadoop):
    '''
    In case you first change the config and then connect the plugin.
    '''
    mode = hookenv.config()['spark_execution_mode']
    if mode.startswith('yarn'):
        spark = Spark(get_dist_config())
        hookenv.status_set('maintenance', 'Setting up Apache Spark for YARN')
        spark.stop()
        spark.configure_yarn_mode()
        set_state('yarn.configured')
        spark.configure()
        spark.start()
        report_status(spark)
def restart_services():
    dc = get_dist_config()
    spark = Spark(dc)
    peers = RelationBase.from_state('sparkpeers.joined')
    is_scaled = peers and len(peers.get_nodes()) > 0
    is_master = spark.is_master()
    is_slave = not is_master or not is_scaled
    master_url = spark.get_master()
    master_ip = spark.get_master_ip()
    if data_changed('insightedge.master_url', master_url):
        stop_datagrid_services()
        start_datagrid_services(master_url, master_ip, is_master, is_slave)
    set_state('insightedge.ready')
    hookenv.status_set('active', 'ready')
Beispiel #6
0
def upgrade_spark():
    if is_state('not.upgrading'):
        return (False, "Please enter maintenance mode before triggering the upgrade.")

    version = hookenv.config()['spark_version']
    hookenv.status_set('maintenance', 'Upgrading to {}'.format(version))
    hookenv.log("Upgrading to {}".format(version))
    try:
        spark = Spark(get_dist_config())
        spark.switch_version(version)
    except ResourceError:
        return (False, "Download failed")
    hookenv.status_set('maintenance', 'Upgrade complete. You can exit maintenance mode')
    return (True, "ok")
Beispiel #7
0
def configure_zookeepers(zk):
    zks = zk.zookeepers()
    if data_changed('available.zookeepers', zks):
        spark = Spark(get_dist_config())
        hookenv.status_set('maintenance', 'Updating Apache Spark HA')
        spark.stop()
        spark.configure_ha(zks)
        spark.configure()
        spark.start()
        set_state('zookeeper.configured')
        report_status(spark)
def disable_yarn():
    hookenv.status_set('maintenance', 'Disconnecting Apache Spark from YARN')
    spark = Spark(get_dist_config())
    spark.stop()
    spark.disable_yarn_mode()
    spark.start()
    remove_state('yarn.configured')
    report_status(spark)
def start_spark():
    hookenv.status_set('maintenance', 'Setting up Apache Spark')
    spark = Spark(get_dist_config())
    spark.configure()
    spark.start()
    spark.open_ports()
    set_state('spark.started')
    report_status(spark)
Beispiel #10
0
def update_peers_config(nodes):
    nodes.sort()
    if data_changed('available.peers', nodes):
        spark = Spark(get_dist_config())
        # We need to reconfigure spark only if the master changes or if we
        # are in HA mode and a new potential master is added
        if spark.update_peers(nodes):
            hookenv.status_set('maintenance', 'Updating Apache Spark config')
            spark.stop()
            spark.configure()
            spark.start()
            report_status(spark)
def configure_insightedge_spark():
    hookenv.status_set('maintenance', 'configuring insightedge')
    dc = get_dist_config()
    destination = dc.path('spark')
    spark = Spark(dc)
    with host.chdir(destination):
        insightedge_jars = subprocess.check_output([
            'bash', '-c', '. {}; get_libs ,'.format(
                destination / 'sbin' / 'common-insightedge.sh')
        ],
                                                   env={
                                                       'INSIGHTEDGE_HOME':
                                                       destination
                                                   }).decode('utf8')
    spark.register_classpaths(insightedge_jars.split(','))
    set_state('insightedge-spark.configured')
Beispiel #12
0
def switch_to_yarn(hadoop):
    '''
    In case you first change the config and then connect the plugin.
    '''
    mode = hookenv.config()['spark_execution_mode']
    if mode.startswith('yarn'):
        spark = Spark(get_dist_config())
        hookenv.status_set('maintenance', 'Setting up Apache Spark for YARN')
        spark.stop()
        spark.configure_yarn_mode()
        set_state('yarn.configured')
        spark.configure()
        spark.start()
        report_status(spark)
Beispiel #13
0
def start_spark():
    hookenv.status_set('maintenance', 'Setting up Apache Spark')
    spark = Spark(get_dist_config())
    spark.configure()
    spark.start()
    spark.open_ports()
    set_state('spark.started')
    report_status(spark)
Beispiel #14
0
def disable_yarn():
    hookenv.status_set('maintenance', 'Disconnecting Apache Spark from YARN')
    spark = Spark(get_dist_config())
    spark.stop()
    spark.disable_yarn_mode()
    spark.start()
    remove_state('yarn.configured')
    report_status(spark)
Beispiel #15
0
def update_peers_config(nodes):
    nodes.sort()
    if data_changed('available.peers', nodes):
        spark = Spark(get_dist_config())
        # We need to reconfigure spark only if the master changes or if we
        # are in HA mode and a new potential master is added
        if spark.update_peers(nodes):
            hookenv.status_set('maintenance', 'Updating Apache Spark config')
            spark.stop()
            spark.configure()
            spark.start()
            report_status(spark)
Beispiel #16
0
def install_spark():
    dist = get_dist_config()
    spark = Spark(dist)
    hookenv.status_set('maintenance', 'Installing Apache Spark')
    try:
        spark.install()
    except ResourceError as e:
        hookenv.status_set('blocked', str(e))
        return
    spark.setup_spark_config()
    spark.install_demo()
    set_state('spark.installed')
    set_state('not.upgrading')
Beispiel #17
0
def install_spark():
    dist = get_dist_config()
    spark = Spark(dist)
    hookenv.status_set('maintenance', 'Installing Apache Spark')
    try:
        spark.install()
    except ResourceError as e:
        hookenv.status_set('blocked', str(e))
        return
    spark.setup_spark_config()
    spark.install_demo()
    set_state('spark.installed')
    set_state('not.upgrading')
Beispiel #18
0
def reconfigure_spark():
    mode = hookenv.config()['spark_execution_mode']
    hookenv.status_set('maintenance', 'Configuring Apache Spark')
    spark = Spark(get_dist_config())
    spark.stop()
    if is_state('hadoop.ready') and mode.startswith('yarn') and (not is_state('yarn.configured')):
        # was in a mode other than yarn, going to yarn
        hookenv.status_set('maintenance', 'Setting up Apache Spark for YARN')
        spark.configure_yarn_mode()
        set_state('yarn.configured')

    if is_state('hadoop.ready') and (not mode.startswith('yarn')) and is_state('yarn.configured'):
        # was in a yarn mode and going to another mode
        hookenv.status_set('maintenance', 'Disconnecting Apache Spark from YARN')
        spark.disable_yarn_mode()
        remove_state('yarn.configured')

    spark.configure()
    spark.start()
    report_status(spark)
Beispiel #19
0
def reconfigure_spark():
    config = hookenv.config()
    maintenance = config['maintenance_mode']
    if maintenance:
        remove_state('not.upgrading')
        spark = Spark(get_dist_config())
        report_status(spark)
        spark.stop()
        current_version = spark.get_current_version()
        if config['upgrade_immediately'] and config['spark_version'] != current_version:
            upgrade_spark()
        return
    else:
        set_state('not.upgrading')

    mode = hookenv.config()['spark_execution_mode']
    hookenv.status_set('maintenance', 'Configuring Apache Spark')
    spark = Spark(get_dist_config())
    spark.stop()
    if is_state('hadoop.ready') and mode.startswith('yarn') and (not is_state('yarn.configured')):
        # was in a mode other than yarn, going to yarn
        hookenv.status_set('maintenance', 'Setting up Apache Spark for YARN')
        spark.configure_yarn_mode()
        set_state('yarn.configured')

    if is_state('hadoop.ready') and (not mode.startswith('yarn')) and is_state('yarn.configured'):
        # was in a yarn mode and going to another mode
        hookenv.status_set('maintenance', 'Disconnecting Apache Spark from YARN')
        spark.disable_yarn_mode()
        remove_state('yarn.configured')

    spark.configure()
    spark.start()
    report_status(spark)