示例#1
0
 def configure_yarn(resourcemanager):
     hadoop = get_bigtop_base()
     yarn = YARN(hadoop)
     utils.update_kv_hosts(resourcemanager.hosts_map())
     utils.manage_etc_hosts()
     if not resourcemanager.resourcemanagers():
         data = yaml.dump(
             {
                 'relation_name': resourcemanager.relation_name,
                 'conversations': {
                     conv.key: dict({'relation_ids': conv.relation_ids},
                                    **conv.serialize(conv))
                     for conv in resourcemanager.conversations()
                 },
                 'relation_data': {
                     rid: {
                         unit: hookenv.relation_get(unit=unit, rid=rid)
                         for unit in hookenv.related_units(rid)
                     }
                     for rid in hookenv.relation_ids(
                         resourcemanager.relation_name)
                 },
             },
             default_flow_style=False)
         for line in data.splitlines():
             hookenv.log(line)
     yarn.configure_yarn_base(resourcemanager.resourcemanagers()[0],
                              resourcemanager.port(),
                              resourcemanager.hs_http(),
                              resourcemanager.hs_ipc())
     set_state('bigtop.yarn.configured')
示例#2
0
 def configure_hdfs(namenode):
     hadoop = get_bigtop_base()
     hdfs = HDFS(hadoop)
     utils.update_kv_hosts(namenode.hosts_map())
     utils.manage_etc_hosts()
     if not namenode.namenodes():
         data = yaml.dump(
             {
                 'relation_name': namenode.relation_name,
                 'conversations': {
                     conv.key: dict({'relation_ids': conv.relation_ids},
                                    **conv.serialize(conv))
                     for conv in namenode.conversations()
                 },
                 'relation_data': {
                     rid: {
                         unit: hookenv.relation_get(unit=unit, rid=rid)
                         for unit in hookenv.related_units(rid)
                     }
                     for rid in hookenv.relation_ids(namenode.relation_name)
                 },
             },
             default_flow_style=False)
         for line in data.splitlines():
             hookenv.log(line)
     hdfs.configure_hdfs_base(namenode.namenodes()[0], namenode.port())
     set_state('bigtop.hdfs.configured')
def send_info(datanode):
    hadoop = get_bigtop_base()
    # hdfs = HDFS(hadoop)
    # local_hostname = hookenv.local_unit().replace('/', '-')
    # hdfs_port = hadoop.dist_config.port('namenode')
    # webhdfs_port = hadoop.dist_config.port('nn_webapp_http')

    utils.update_kv_hosts({node['ip']: node['host']
                           for node in datanode.nodes()})
    utils.manage_etc_hosts()

    # datanode.send_spec(hadoop.spec())
    # datanode.send_namenodes([local_hostname])
    # datanode.send_ports(hdfs_port, webhdfs_port)
    # datanode.send_ssh_key(utils.get_ssh_key('hdfs'))
    datanode.send_hosts_map(utils.get_kv_hosts())

    # slaves = [node['host'] for node in datanode.nodes()]
    # if data_changed('namenode.slaves', slaves):
    #     unitdata.kv().set('namenode.slaves', slaves)
    #     hdfs.register_slaves(slaves)

    # hookenv.status_set('active', 'Ready ({count} DataNode{s})'.format(
    #     count=len(slaves),
    #     s='s' if len(slaves) > 1 else '',
    # ))
    set_state('namenode.ready')
    hookenv.status_set('active', 'ready')
 def configure_yarn(resourcemanager):
     hadoop = get_bigtop_base()
     yarn = YARN(hadoop)
     utils.update_kv_hosts(resourcemanager.hosts_map())
     utils.manage_etc_hosts()
     if not resourcemanager.resourcemanagers():
         data = yaml.dump({
             'relation_name': resourcemanager.relation_name,
             'conversations': {
                 conv.key: dict({'relation_ids': conv.relation_ids},
                                **conv.serialize(conv))
                 for conv in resourcemanager.conversations()
             },
             'relation_data': {
                 rid: {
                     unit: hookenv.relation_get(unit=unit, rid=rid)
                     for unit in hookenv.related_units(rid)
                 } for rid in hookenv.relation_ids(
                     resourcemanager.relation_name
                 )
             },
         }, default_flow_style=False)
         for line in data.splitlines():
             hookenv.log(line)
     yarn.configure_yarn_base(
         resourcemanager.resourcemanagers()[0], resourcemanager.port(),
         resourcemanager.hs_http(), resourcemanager.hs_ipc())
     set_state('bigtop.yarn.configured')
def configure_namenode():
    bigtop = get_bigtop_base()

    # use layer options someday, for now, hard code ports
    hookenv.open_port('8020')
    hookenv.open_port('50070')

    set_state('namenode.started')
def install_hadoop(namenode):
    '''Install only if the namenode has sent its FQDN.'''
    if namenode.namenodes():
        hookenv.status_set('maintenance', 'installing datanode')
        hostname = namenode.namenodes()[0]
        bigtop = get_bigtop_base()
        bigtop.install(NN=hostname)
        set_state('datanode.installed')
        hookenv.status_set('maintenance', 'datanode installed')
    else:
        hookenv.status_set('waiting', 'waiting for namenode to become ready')
def install_hadoop(resourcemanager):
    '''Install only if the resourcemanager has sent its FQDN.'''
    if resourcemanager.resourcemanagers():
        hookenv.status_set('maintenance', 'installing nodemanager')
        hostname = resourcemanager.resourcemanagers()[0]
        bigtop = get_bigtop_base()
        bigtop.install(NN=hostname)
        set_state('nodemanager.installed')
        hookenv.status_set('maintenance', 'nodemanager installed')
    else:
        hookenv.status_set('waiting', 'waiting for nodemanager to become ready')
 def configure_hdfs(namenode):
     hadoop = get_bigtop_base()
     hdfs = HDFS(hadoop)
     utils.update_kv_hosts(namenode.hosts_map())
     utils.manage_etc_hosts()
     if not namenode.namenodes():
         data = yaml.dump({
             'relation_name': namenode.relation_name,
             'conversations': {
                 conv.key: dict({'relation_ids': conv.relation_ids},
                                **conv.serialize(conv))
                 for conv in namenode.conversations()
             },
             'relation_data': {
                 rid: {
                     unit: hookenv.relation_get(unit=unit, rid=rid)
                     for unit in hookenv.related_units(rid)
                 } for rid in hookenv.relation_ids(namenode.relation_name)
             },
         }, default_flow_style=False)
         for line in data.splitlines():
             hookenv.log(line)
     hdfs.configure_hdfs_base(namenode.namenodes()[0], namenode.port())
     set_state('bigtop.hdfs.configured')
示例#9
0
 def set_yarn_spec(resourcemanager):
     hadoop = get_bigtop_base()
     resourcemanager.set_local_spec(hadoop.spec())
示例#10
0
 def set_hdfs_spec(namenode):
     hadoop = get_bigtop_base()
     namenode.set_local_spec(hadoop.spec())
示例#11
0
def install_hadoop():
    hadoop = get_bigtop_base()
    hadoop.install()
    set_state('bigtop.installed')
def fetch_resources():
    hadoop = get_bigtop_base()
    if hadoop.verify_resources():
        set_state('resources.available')
 def set_yarn_spec(resourcemanager):
     hadoop = get_bigtop_base()
     resourcemanager.set_local_spec(hadoop.spec())
 def set_hdfs_spec(namenode):
     hadoop = get_bigtop_base()
     namenode.set_local_spec(hadoop.spec())
def install_hadoop():
    hadoop = get_bigtop_base()
    hadoop.install()
    set_state('bigtop.installed')
示例#16
0
def install_hadoop():
    hookenv.status_set('maintenance', 'installing namenode')
    bigtop = get_bigtop_base()
    bigtop.install()
    set_state('namenode.installed')
    hookenv.status_set('maintenance', 'namenode installed')
示例#17
0
def handle_legacy_installed_flag():
    hadoop = get_bigtop_base()
    if hadoop.is_installed():
        set_state('bigtop.installed')
示例#18
0
def fetch_resources():
    hadoop = get_bigtop_base()
    if hadoop.verify_resources():
        set_state('resources.available')
def install_hadoop():
    bigtop = get_bigtop_base()
    bigtop.install()
    set_state('namenode.installed')
def handle_legacy_installed_flag():
    hadoop = get_bigtop_base()
    if hadoop.is_installed():
        set_state('bigtop.installed')