Esempio n. 1
0
def send_principal_yarn_info(principal, resourcemanager):
    """Send YARN data when the resourcemanager becomes ready."""
    principal.set_installed(get_hadoop_version())
    principal.set_yarn_ready(
        resourcemanager.resourcemanagers(), resourcemanager.port(),
        resourcemanager.hs_http(), resourcemanager.hs_ipc())
    set_state('apache-bigtop-plugin.yarn.ready')
def send_principal_yarn_info(principal, resourcemanager):
    """Send YARN data when the resourcemanager becomes ready."""
    principal.set_installed(get_hadoop_version())
    principal.set_yarn_ready(resourcemanager.resourcemanagers(),
                             resourcemanager.port(), resourcemanager.hs_http(),
                             resourcemanager.hs_ipc())
    set_state('apache-bigtop-plugin.yarn.ready')
def start_datanode(namenode):
    hookenv.status_set('maintenance', 'starting datanode')
    # NB: service should be started by install, but we want to verify it is
    # running before we set the .started state and open ports. We always
    # restart here, which may seem heavy-handed. However, restart works
    # whether the service is currently started or stopped. It also ensures the
    # service is using the most current config.
    started = host.service_restart('hadoop-hdfs-datanode')
    if started:
        # Create a /user/ubuntu dir in HDFS (this is safe to run multiple times).
        bigtop = Bigtop()
        if not bigtop.check_hdfs_setup():
            try:
                utils.wait_for_hdfs(30)
                bigtop.setup_hdfs()
            except utils.TimeoutError:
                # HDFS is not yet available or is still in safe mode, so we can't
                # do the initial setup (create dirs); skip setting the .started
                # state below so that we try again on the next hook.
                hookenv.status_set('waiting', 'waiting on hdfs')
                return

        # HDFS is ready. Open ports and set .started, status, and app version
        for port in get_layer_opts().exposed_ports('datanode'):
            hookenv.open_port(port)
        set_state('apache-bigtop-datanode.started')
        hookenv.status_set('maintenance', 'datanode started')
        hookenv.application_version_set(get_hadoop_version())
    else:
        hookenv.log('DataNode failed to start')
        hookenv.status_set('blocked', 'datanode failed to start')
        remove_state('apache-bigtop-datanode.started')
        for port in get_layer_opts().exposed_ports('datanode'):
            hookenv.close_port(port)
Esempio n. 4
0
def start_namenode():
    hookenv.status_set('maintenance', 'starting namenode')
    # NB: service should be started by install, but this may be handy in case
    # we have something that removes the .started state in the future. Also
    # note we restart here in case we modify conf between install and now.
    host.service_restart('hadoop-hdfs-namenode')
    for port in get_layer_opts().exposed_ports('namenode'):
        hookenv.open_port(port)
    set_state('apache-bigtop-namenode.started')
    hookenv.application_version_set(get_hadoop_version())
    hookenv.status_set('maintenance', 'namenode started')
Esempio n. 5
0
def start_namenode():
    hookenv.status_set('maintenance', 'starting namenode')
    # NB: service should be started by install, but this may be handy in case
    # we have something that removes the .started state in the future. Also
    # note we restart here in case we modify conf between install and now.
    host.service_restart('hadoop-hdfs-namenode')
    for port in get_layer_opts().exposed_ports('namenode'):
        hookenv.open_port(port)
    set_state('apache-bigtop-namenode.started')
    hookenv.application_version_set(get_hadoop_version())
    hookenv.status_set('maintenance', 'namenode started')
Esempio n. 6
0
def start_resourcemanager(namenode):
    hookenv.status_set('maintenance', 'starting resourcemanager')
    # NB: service should be started by install, but this may be handy in case
    # we have something that removes the .started state in the future. Also
    # note we restart here in case we modify conf between install and now.
    host.service_restart('hadoop-yarn-resourcemanager')
    host.service_restart('hadoop-mapreduce-historyserver')
    for port in get_layer_opts().exposed_ports('resourcemanager'):
        hookenv.open_port(port)
    set_state('apache-bigtop-resourcemanager.started')
    hookenv.application_version_set(get_hadoop_version())
    hookenv.status_set('maintenance', 'resourcemanager started')
Esempio n. 7
0
def start_resourcemanager(namenode):
    hookenv.status_set('maintenance', 'starting resourcemanager')
    # NB: service should be started by install, but this may be handy in case
    # we have something that removes the .started state in the future. Also
    # note we restart here in case we modify conf between install and now.
    host.service_restart('hadoop-yarn-resourcemanager')
    host.service_restart('hadoop-mapreduce-historyserver')
    for port in get_layer_opts().exposed_ports('resourcemanager'):
        hookenv.open_port(port)
    set_state('apache-bigtop-resourcemanager.started')
    hookenv.application_version_set(get_hadoop_version())
    hookenv.status_set('maintenance', 'resourcemanager started')
Esempio n. 8
0
def install_hadoop_client_hdfs(principal, namenode):
    """Install if the namenode has sent its FQDN.

    We only need the namenode FQDN to perform the plugin install, so poll for
    namenodes() data whenever we have a namenode relation. This allows us to
    install asap, even if 'namenode.ready' is not set yet.
    """
    if namenode.namenodes():
        hookenv.status_set('maintenance', 'installing plugin (hdfs)')
        nn_host = namenode.namenodes()[0]
        bigtop = Bigtop()
        hosts = {'namenode': nn_host}
        bigtop.render_site_yaml(hosts=hosts, roles='hadoop-client')
        bigtop.trigger_puppet()
        set_state('apache-bigtop-plugin.hdfs.installed')
        hookenv.application_version_set(get_hadoop_version())
        hookenv.status_set('maintenance', 'plugin (hdfs) installed')
    else:
        hookenv.status_set('waiting', 'waiting for namenode fqdn')
def install_hadoop_client_hdfs(principal, namenode):
    """Install if the namenode has sent its FQDN.

    We only need the namenode FQDN to perform the plugin install, so poll for
    namenodes() data whenever we have a namenode relation. This allows us to
    install asap, even if 'namenode.ready' is not set yet.
    """
    if namenode.namenodes():
        hookenv.status_set('maintenance', 'installing plugin (hdfs)')
        nn_host = namenode.namenodes()[0]
        bigtop = Bigtop()
        hosts = {'namenode': nn_host}
        bigtop.render_site_yaml(hosts=hosts, roles='hadoop-client')
        bigtop.trigger_puppet()
        set_state('apache-bigtop-plugin.hdfs.installed')
        hookenv.application_version_set(get_hadoop_version())
        hookenv.status_set('maintenance', 'plugin (hdfs) installed')
    else:
        hookenv.status_set('waiting', 'waiting for namenode fqdn')
def start_nodemanager(namenode, resourcemanager):
    hookenv.status_set('maintenance', 'starting nodemanager')
    # NB: service should be started by install, but we want to verify it is
    # running before we set the .started state and open ports. We always
    # restart here, which may seem heavy-handed. However, restart works
    # whether the service is currently started or stopped. It also ensures the
    # service is using the most current config.
    started = host.service_restart('hadoop-yarn-nodemanager')
    if started:
        for port in get_layer_opts().exposed_ports('nodemanager'):
            hookenv.open_port(port)
        set_state('apache-bigtop-nodemanager.started')
        hookenv.status_set('maintenance', 'nodemanager started')
        hookenv.application_version_set(get_hadoop_version())
    else:
        hookenv.log('NodeManager failed to start')
        hookenv.status_set('blocked', 'nodemanager failed to start')
        remove_state('apache-bigtop-nodemanager.started')
        for port in get_layer_opts().exposed_ports('nodemanager'):
            hookenv.close_port(port)
Esempio n. 11
0
def start_namenode():
    hookenv.status_set('maintenance', 'starting namenode')
    # NB: service should be started by install, but we want to verify it is
    # running before we set the .started state and open ports. We always
    # restart here, which may seem heavy-handed. However, restart works
    # whether the service is currently started or stopped. It also ensures the
    # service is using the most current config.
    started = host.service_restart('hadoop-hdfs-namenode')
    if started:
        for port in get_layer_opts().exposed_ports('namenode'):
            hookenv.open_port(port)
        set_state('apache-bigtop-namenode.started')
        hookenv.status_set('maintenance', 'namenode started')
        hookenv.application_version_set(get_hadoop_version())
    else:
        hookenv.log('NameNode failed to start')
        hookenv.status_set('blocked', 'namenode failed to start')
        remove_state('apache-bigtop-namenode.started')
        for port in get_layer_opts().exposed_ports('namenode'):
            hookenv.close_port(port)
Esempio n. 12
0
def start_resourcemanager(namenode):
    hookenv.status_set('maintenance', 'starting resourcemanager')
    # NB: service should be started by install, but we want to verify it is
    # running before we set the .started state and open ports. We always
    # restart here, which may seem heavy-handed. However, restart works
    # whether the service is currently started or stopped. It also ensures the
    # service is using the most current config.
    rm_started = host.service_restart('hadoop-yarn-resourcemanager')
    if rm_started:
        for port in get_layer_opts().exposed_ports('resourcemanager'):
            hookenv.open_port(port)
        set_state('apache-bigtop-resourcemanager.started')
        hookenv.status_set('maintenance', 'resourcemanager started')
        hookenv.application_version_set(get_hadoop_version())
    else:
        hookenv.log('YARN ResourceManager failed to start')
        hookenv.status_set('blocked', 'resourcemanager failed to start')
        remove_state('apache-bigtop-resourcemanager.started')
        for port in get_layer_opts().exposed_ports('resourcemanager'):
            hookenv.close_port(port)

    hs_started = host.service_restart('hadoop-mapreduce-historyserver')
    if not hs_started:
        hookenv.log('YARN HistoryServer failed to start')
Esempio n. 13
0
def send_principal_hdfs_info(principal, namenode):
    """Send HDFS data when the namenode becomes ready."""
    principal.set_installed(get_hadoop_version())
    principal.set_hdfs_ready(namenode.namenodes(), namenode.port())
    set_state('apache-bigtop-plugin.hdfs.ready')
def send_principal_hdfs_info(principal, namenode):
    """Send HDFS data when the namenode becomes ready."""
    principal.set_installed(get_hadoop_version())
    principal.set_hdfs_ready(namenode.namenodes(), namenode.port())
    set_state('apache-bigtop-plugin.hdfs.ready')