Пример #1
0
def launch_hdfs():

    print('\n====Formatting Namenode====\n')
    print(
        se_docker.exec_command('masternode' + '.' + config.domain_name,
                               "su -l hdfs -c 'hdfs namenode -format -force'"))

    print('\n====Starting Namenode====\n')
    print(
        se_docker.exec_command(
            'masternode' + '.' + config.domain_name,
            "su -l hdfs -c '/usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh start namenode'"
        ))
    print('Wait for 5 seconds....')
    time.sleep(5)

    print('\n====Starting datanodes====\n')
    for i in se_docker.get_all_containers():
        if 'slavenode' in i.name:
            print(
                se_docker.exec_command(
                    i.name,
                    "su -l hdfs -c '/usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh start datanode'"
                ))
            print('Wait for 5 seconds....')
            time.sleep(5)
Пример #2
0
def launch_hbase():
    print('\n====Verify if Zookeeper containers are running====\n')
    num = 0
    for i in se_docker.get_all_containers():
        if 'zookeeper' in i.name:
            num = num + 1
            if 'running' in i.status:
                print('{} : {}'.format(i.name, i.status))
            else:
                print('Error: Container "{}" is in status "{}"\n'.format(
                    i.name, i.status))
                print('Exiting script\n')
                sys.exit(1)
    if num == 0:
        print('No container found starting with name "zookeeper"')
        print('Exiting script\n')
        sys.exit(1)

    print('\n====Launch hbase master====\n')
    print(
        se_docker.exec_command(
            'masternode' + '.' + config.domain_name,
            "su -l hbase -c '/usr/hdp/current/hbase-client/bin/hbase-daemon.sh start master'"
        ))
    print('Wait for 10 seconds....')
    time.sleep(10)

    print('\n====Launch hbase region====\n')
    #se_docker.launch_containers('se_hbase','bash','hbaseregion' + '.' + config.domain_name,'hbaseregion' + '.' + config.domain_name,{os.path.join(config.dest_dir,'hbasenode'):{'bind':'/etc/hbase/conf','mode':'ro'}},'seplatform',True,True,port_map={'16020/tcp': 16020,'16030/tcp': 16030})
    print(
        se_docker.exec_command(
            'masternode' + '.' + config.domain_name,
            "su -l hbase -c '/usr/hdp/current/hbase-client/bin/hbase-daemon.sh start regionserver'"
        ))

    print('\n====Launch Phoenix query server====\n')
    #se_docker.launch_containers('se_hbase','bash','hbaseregion' + '.' + config.domain_name,'hbaseregion' + '.' + config.domain_name,{os.path.join(config.dest_dir,'hbasenode'):{'bind':'/etc/hbase/conf','mode':'ro'}},'seplatform',True,True,port_map={'16020/tcp': 16020,'16030/tcp': 16030})
    #print(se_docker.exec_command('hbasenode' + '.' + config.domain_name,"echo \"create 'hbaseTestTable','status'\" | hbase shell"))
    print(
        se_docker.exec_command(
            'masternode' + '.' + config.domain_name,
            "su -l hbase -c '/usr/hdp/current/phoenix-server/bin/queryserver.py start'"
        ))
Пример #3
0
def launch_kafka():

    print('\n====Running kafka_setup module====\n')
    setup_kafka_dirs()
    time.sleep(3)

    print('\n====Running kafka_config module====\n')
    for i in range(0, config.kafka_nodes):
        print("Updating configs for node 'kafkanode{}'\n".format(i))
        config_kafka(i)
    time.sleep(3)

    print('\n====Creating SE_Platform Network if not already created====\n')
    hadoop_net = config.hadoop_network_range + '/24'
    lst = config.hadoop_network_range.split('.')
    lst[3] = '1'
    hadoop_gateway = '.'.join(lst)
    se_docker.create_network('hadoopnet', hadoop_net, hadoop_gateway)

    print('\n====Launching containers and attaching bind mounts====\n')
    for i in range(0, config.kafka_nodes):
        se_docker.launch_containers(
            'kmahesh2611/kafka',
            '/kafka_2.11-2.1.0/bin/kafka-server-start.sh /kafka_2.11-2.1.0/config/server.properties',
            'kafkanode' + str(i) + '.' + config.domain_name,
            'kafkanode' + str(i) + '.' + config.domain_name, {
                os.path.join(config.dest_dir, 'kafkanode' + str(i)): {
                    'bind': '/kafka_2.11-2.1.0/config',
                    'mode': 'rw'
                }
            }, 'hadoopnet', True, True)

    print('Wait for 10 seconds....')
    time.sleep(10)

    print('\n====Verify if containers are running====\n')
    num = 0
    for i in se_docker.get_all_containers():
        if 'kafkanode' in i.name:
            num = num + 1
            if 'running' in i.status:
                print('{} : {}'.format(i.name, i.status))
            else:
                print('Error: Container "{}" is in status "{}"\n'.format(
                    i.name, i.status))
                print('Exiting script\n')
                sys.exit(1)
    if num == 0:
        print('No container found starting with name "kafkanode"')
        print('Exiting script\n')
        sys.exit(1)
    ### Creating Kafka topics ###
    print('\n====Creating Kafka Topics====\n')
    for i in config.kafka_topics:
        print(
            se_docker.exec_command(
                'kafkanode0' + '.' + config.domain_name,
                "/kafka_2.11-2.1.0/bin/kafka-topics.sh --create --zookeeper {} --replication-factor {} --partitions  {} --topic {}"
                .format(zookeeper_nodes, str(config.kafka_nodes),
                        str(config.kafka_default_partitions), i)))

    print("Created topics: {}\n".format(
        [topics for topics in config.kafka_topics]))
Пример #4
0
def del_kafka_containers():
    print('\n====Stopping and deleting Containers for kafka====\n')
    for i in se_docker.get_all_containers():
        if 'kafkanode' in i.name:
            print('Stopping and deleting Container: {}\n'.format(i.name))
            i.remove(force=True)
Пример #5
0
def del_hadoop_containers():
    print('\n====Stopping and deleting Containers for hdfs====\n')
    for i in se_docker.get_all_containers():
        if 'masternode' in i.name or 'slavenode' in i.name or 'clientnode' in i.name:
            print('Stopping and deleting Container: {}\n'.format(i.name))
            i.remove(force=True)
Пример #6
0
def launch_containers():
    print(
        '\n====Launching masternode container and attaching bind mounts====\n')
    se_docker.launch_containers(
        'kmahesh2611/hdp2.6.5',
        'bash',
        'masternode' + '.' + config.domain_name,
        'masternode' + '.' + config.domain_name, {
            os.path.join(config.dest_dir, 'hadoopnode'): {
                'bind': '/etc/hadoop/conf',
                'mode': 'rw'
            },
            os.path.join(config.dest_dir, 'hbasenode'): {
                'bind': '/etc/hbase/conf',
                'mode': 'rw'
            },
            os.path.join(config.dest_dir, 'sparknode'): {
                'bind': '/etc/spark2/conf',
                'mode': 'rw'
            }
        },
        'hadoopnet',
        True,
        True,
        port_map={
            '50070/tcp': 50070,
            '8088/tcp': 8088,
            '8765/tcp': 8765,
            '18080/tcp': 18080,
            '16010/tcp': 16010
        })
    print('Wait for 5 seconds....')
    time.sleep(5)

    # create_proxy_dirs()
    # proxy_config()
    # create_log_dirs()
    print(
        '\n====Launching slavenode container and attaching bind mounts====\n')
    for i in range(0, config.slavenodes):
        #se_docker.launch_containers('se_hadoop',"bash",'slavenode' + str(i) + '.' + config.domain_name,'slavenode' + str(i) + '.' + config.domain_name,{os.path.join(config.proxy_dir,'slavenode'):{'bind':'/opt/se_proxy','mode':'ro'},os.path.join(config.dest_dir,'hadoopnode'):{'bind':'/etc/hadoop/conf','mode':'rw'},os.path.join(config.dest_dir,'hbasenode'):{'bind':'/etc/hbase/conf','mode':'rw'},os.path.join(config.dest_dir,'sparknode'):{'bind':'/etc/spark2/conf','mode':'rw'}},'seplatform',True,True)
        se_docker.launch_containers(
            'kmahesh2611/hdp2.6.5', "bash",
            'slavenode' + str(i) + '.' + config.domain_name,
            'slavenode' + str(i) + '.' + config.domain_name, {
                os.path.join(config.dest_dir, 'hadoopnode'): {
                    'bind': '/etc/hadoop/conf',
                    'mode': 'rw'
                },
                os.path.join(config.dest_dir, 'hbasenode'): {
                    'bind': '/etc/hbase/conf',
                    'mode': 'rw'
                },
                os.path.join(config.dest_dir, 'sparknode'): {
                    'bind': '/etc/spark2/conf',
                    'mode': 'rw'
                }
            }, 'hadoopnet', True, True)
        time.sleep(3)
        print(
            se_docker.exec_command(
                'slavenode' + str(i) + '.' + config.domain_name,
                "chmod +x /etc/hadoop/conf/health_check"))

    print('\n====Launching client container and attaching bind mounts====\n')
    se_docker.launch_containers(
        'kmahesh2611/hadoopclient', 'bash',
        'clientnode' + '.' + config.domain_name,
        'clientnode' + '.' + config.domain_name, {
            os.path.join(config.dest_dir, 'hadoopnode'): {
                'bind': '/etc/hadoop/conf',
                'mode': 'rw'
            },
            os.path.join(config.dest_dir, 'hbasenode'): {
                'bind': '/etc/hbase/conf',
                'mode': 'rw'
            },
            os.path.join(config.dest_dir, 'sparknode'): {
                'bind': '/etc/spark2/conf',
                'mode': 'rw'
            },
            os.path.join(config.dest_dir, 'sparkclient'): {
                'bind': '/opt/spark',
                'mode': 'rw'
            },
            os.path.join(config.dest_dir, 'sparkwatcher'): {
                'bind': '/nfs',
                'mode': 'rw'
            }
        }, 'hadoopnet', True, True)
    print(
        se_docker.exec_command('clientnode' + '.' + config.domain_name,
                               "setfacl -m u:spark:rwX /nfs"))
    print(
        se_docker.exec_command('clientnode' + '.' + config.domain_name,
                               "setfacl -d -m u:spark:rwX /nfs"))
    print(
        se_docker.exec_command('clientnode' + '.' + config.domain_name,
                               "setfacl -m u:spark:rwX /opt/spark"))
    print(
        se_docker.exec_command('clientnode' + '.' + config.domain_name,
                               "setfacl -d -m u:spark:rwX /opt/spark"))
    print('Wait for 5 seconds....')
    time.sleep(5)

    print('\n====Verify if container running====\n')
    num = 0
    for i in se_docker.get_all_containers():
        if 'masternode' in i.name or 'slavenode' in i.name:
            num = num + 1
            if 'running' in i.status:
                print('{} : {}'.format(i.name, i.status))
            else:
                print('Error: Container "{}" is in status "{}"\n'.format(
                    i.name, i.status))
                print('Exiting script\n')
                sys.exit(1)
    if num == 0:
        print(
            'No container found starting with name "masternode" or "slavenode"'
        )
        print('Exiting script\n')
        sys.exit(1)
Пример #7
0
def launch_zookeeper():
    print('\n====Running zookeeper setup module====\n')
    setup_zookeeper_dirs()
    config_zookeeper()

    print('\n====Creating SE_Platform Network if not already created====\n')
    hadoop_net = config.hadoop_network_range + '/24'
    lst = config.hadoop_network_range.split('.')
    lst[3] = '1'
    hadoop_gateway = '.'.join(lst)
    se_docker.create_network('hadoopnet', hadoop_net, hadoop_gateway)

    print(
        '\n====Launching zookeeper containers and attaching bind mounts====\n')
    for i in range(0, config.zookeeper_nodes):
        port_num = 2181 + i
        print('Launching zookeeper node {}\n'.format(str(i)))
        se_docker.launch_containers(
            'kmahesh2611/zookeeper',
            'bash',
            'zookeepernode' + str(i) + '.' + config.domain_name,
            'zookeepernode' + str(i) + '.' + config.domain_name, {
                os.path.join(config.dest_dir, 'zookeepernode'): {
                    'bind': '/etc/zookeeper/conf',
                    'mode': 'ro'
                }
            },
            'hadoopnet',
            True,
            True,
            port_map={'2181/tcp': port_num})
        print('Creating myid file for the zookeeper node {}\n'.format(str(i)))
        print(
            se_docker.exec_command(
                'zookeepernode' + str(i) + '.' + config.domain_name,
                "su -l zookeeper -c 'echo {} > /var/lib/zookeeper/myid'".
                format(str(i + 1))))
        print('Starting Zookeeper service on the node the zookeeper node {}\n'.
              format(str(i)))
        print(
            se_docker.exec_command(
                'zookeepernode' + str(i) + '.' + config.domain_name,
                "su -l zookeeper -c '/usr/hdp/current/zookeeper-client/bin/zkServer.sh start'"
            ))
    print('Wait for 5 seconds....')
    time.sleep(5)

    print('\n====Verify if containers are running====\n')
    num = 0
    for i in se_docker.get_all_containers():
        if 'zookeepernode' in i.name:
            num = num + 1
            if 'running' in i.status:
                print('{} : {}'.format(i.name, i.status))
            else:
                print('Error: Container "{}" is in status "{}"\n'.format(
                    i.name, i.status))
                print('Exiting script\n')
                sys.exit(1)
    if num == 0:
        print('No container found starting with name "zookeepernode"')
        print('Exiting script\n')
        sys.exit(1)
Пример #8
0
def del_zookeeper_containers():
    print('\n====Stopping and deleting Containers for zookeeper====\n')
    for i in se_docker.get_all_containers():
        if 'zookeepernode' in i.name:
            print('Stopping and deleting Container: {}\n'.format(i.name))
            i.remove(force=True)