Ejemplo n.º 1
0
def list_clusters(env_name=None, dep_name=None):
    # Using default if available
    env_name = env_name if env_name else horton.cdcred.name
    dep_names = [dep_name] if dep_name else list_deployments(env_name)
    clusters = []
    for d in dep_names:
        [clusters.append(x) for x in [cd.ClustersApi(horton.cad).list(environment=env_name, deployment=d)]]
    return [x for y in clusters for x in y]
Ejemplo n.º 2
0
def get_cluster_status(clus_name, dep_name=None, env_name=None):
    env_name = env_name if env_name else horton.cdcred.name
    dep_name = dep_name if dep_name else clus_name
    #log.info("Fetching Cluster status for [%s]", clus_name)
    try:
        return cd.ClustersApi(horton.cad).get_status(environment=env_name,
                                                     deployment=dep_name,
                                                     cluster=clus_name)
    except ApiException as e:
        if e.status == 404:
            log.error("Cluster %s not found", dep_name)
            return None
        else:
            raise e
Ejemplo n.º 3
0
def get_cluster(clus_name, dep_name=None, env_name=None):
    env_name = env_name if env_name else horton.cdcred.name
    dep_name = dep_name if dep_name else clus_name
    log.info("Attempting to get Cluster %s", clus_name)
    try:
        return cd.ClustersApi(horton.cad).get_redacted(environment=env_name,
                                                       deployment=dep_name,
                                                       cluster=clus_name)
    except ApiException as e:
        if e.status == 404:
            log.error("Cluster %s not found, error: %s", clus_name, e.body)
            return []
        else:
            raise e
Ejemplo n.º 4
0
def create_cluster(cluster_def,
                   dep_name,
                   workers=3,
                   env_name=None,
                   scripts=None):
    env_name = env_name if env_name else horton.cdcred.name
    cdh_ver = str(cluster_def['products']['CDH'])
    services = cluster_def['services']
    cluster_name = cluster_def['name']
    products = cluster_def['products']
    if 'post_create_scripts' in cluster_def and cluster_def[
            'post_create_scripts'] is not None:
        log.info("Including post_create_scripts in cluster of %s",
                 cluster_def['post_create_scripts'])
        post_create_scripts = []
        for script_name in cluster_def['post_create_scripts']:
            log.info("Adding script %s like [%s]", script_name,
                     scripts[script_name][:50])
            post_create_scripts.append(cd.Script(content=scripts[script_name]))
    else:
        post_create_scripts = None
    if cdh_ver[0] == '5':
        load_parcels = [
            'https://archive.cloudera.com/cdh5/parcels/' + cdh_ver + '/'
        ]
    elif cdh_ver[0] == '6':
        load_parcels = [
            'https://archive.cloudera.com/cdh6/' + cdh_ver + '/parcels/'
        ]
    else:
        raise ValueError("Only CDH versions 5 or 6 supported")
    if 'parcels' in cluster_def:
        load_parcels += cluster_def['parcels']
    # Default Role and Service configs
    services_configs = {}
    master_setups = {}
    master_configs = {}
    worker_setups = {}
    worker_configs = {}
    if 'HDFS' in services:
        master_setups['HDFS'] = ['NAMENODE', 'SECONDARYNAMENODE']
        worker_setups['HDFS'] = ['DATANODE']
    if 'YARN' in services:
        master_setups['YARN'] = ['RESOURCEMANAGER', 'JOBHISTORY']
        worker_setups['YARN'] = ['NODEMANAGER']
    if 'ZOOKEEPER' in services:
        master_setups['ZOOKEEPER'] = ['SERVER']
    if 'HBASE' in services:
        master_setups['HBASE'] = ['MASTER']
        worker_setups['HBASE'] = ['REGIONSERVER']
    if 'HIVE' in services:
        master_setups['HIVE'] = ['HIVESERVER2', 'HIVEMETASTORE']
    if 'HUE' in services:
        master_setups['HUE'] = ['HUE_SERVER']
    if 'KUDU' in services:
        master_setups['KUDU'] = ['KUDU_MASTER']
        worker_setups['KUDU'] = ['KUDU_TSERVER']
        master_configs['KUDU'] = {
            'KUDU_MASTER': {
                'fs_wal_dir': "/data0/kudu/masterwal",
                'fs_data_dirs': "/data1/kudu/master"
            }
        }
        worker_configs['KUDU'] = {
            'KUDU_TSERVER': {
                'fs_wal_dir': "/data0/kudu/tabletwal",
                'fs_data_dirs': "/data1/kudu/tablet"
            }
        }
    if 'IMPALA' in services:
        master_setups['IMPALA'] = ['CATALOGSERVER', 'STATESTORE']
        worker_setups['IMPALA'] = ['IMPALAD']
    if 'NIFI' in services:
        worker_setups['NIFI'] = ['NIFI_NODE']
    if 'NIFIREGISTRY' in services:
        master_setups['NIFIREGISTRY'] = ['NIFI_REGISTRY_SERVER']
    if 'NIFITOOLKITCA' in services:
        master_setups['NIFITOOLKITCA'] = ['NIFI_TOOLKIT_SERVER']
        services_configs['NIFITOOLKITCA'] = {
            'nifi.toolkit.tls.ca.server.token':
            security.get_secret('MASTERKEY')
        }
    if 'KAFKA' in services:
        worker_setups['KAFKA'] = ['KAFKA_BROKER']
        services_configs['KAFKA'] = {'producer.metrics.enable': True}
    if 'SCHEMAREGISTRY' in services:
        master_setups['SCHEMAREGISTRY'] = ['SCHEMA_REGISTRY_SERVER']
    # Handle Services Configs overrides
    if 'servicesconfigs' in cluster_def.keys():
        for k, v in cluster_def['servicesconfigs']:
            services_configs[k] = v
    # Handle virtual instance generation
    master_vi = [
        create_virtual_instance(tem_name=dep_name + '-' + cluster_name +
                                '-master',
                                scripts=[
                                    '''sudo -i
            yum install mysql mariadb-server epel-release -y  # MariaDB
            yum -y install npm gcc-c++ make  # SMM-UI
            npm install forever -g  # SMM-UI
            systemctl enable mariadb
            service mariadb start
            mysql --execute="CREATE DATABASE registry DEFAULT CHARACTER SET utf8"
            mysql --execute="CREATE USER 'registry'@'localhost' IDENTIFIED BY 'registry'"
            mysql --execute="GRANT ALL PRIVILEGES ON registry.* TO 'registry'@'localhost' identified by 'registry'"
            mysql --execute="GRANT ALL PRIVILEGES ON registry.* TO 'registry'@'localhost' WITH GRANT OPTION"
            mysql --execute="CREATE DATABASE streamsmsgmgr DEFAULT CHARACTER SET utf8"
            mysql --execute="CREATE USER 'streamsmsgmgr'@'localhost' IDENTIFIED BY 'streamsmsgmgr'"
            mysql --execute="GRANT ALL PRIVILEGES ON streamsmsgmgr.* TO 'streamsmsgmgr'@'%' identified by 'streamsmsgmgr'"
            mysql --execute="GRANT ALL PRIVILEGES ON streamsmsgmgr.* TO 'streamsmsgmgr'@'%' WITH GRANT OPTION"
            mysql --execute="GRANT ALL PRIVILEGES ON streamsmsgmgr.* TO 'streamsmsgmgr'@'localhost' identified by 'streamsmsgmgr'"
            mysql --execute="GRANT ALL PRIVILEGES ON streamsmsgmgr.* TO 'streamsmsgmgr'@'localhost' WITH GRANT OPTION"
            mysql --execute="FLUSH PRIVILEGES"
            mysql --execute="COMMIT"'''
                                ])
    ]
    worker_vi = [
        create_virtual_instance(tem_name=dep_name + '-' + cluster_name +
                                '-worker') for _ in range(0, workers)
    ]
    try:
        cd.ClustersApi(horton.cad).create(
            environment=env_name,
            deployment=dep_name,
            cluster_template=cd.ClusterTemplate(
                name=cluster_name,
                product_versions=products,
                parcel_repositories=load_parcels,
                services=services,
                services_configs=services_configs,
                virtual_instance_groups={
                    'masters':
                    cd.VirtualInstanceGroup(
                        name='masters',
                        min_count=1,
                        service_type_to_role_types=master_setups,
                        role_types_configs=master_configs,
                        virtual_instances=master_vi),
                    'workers':
                    cd.VirtualInstanceGroup(
                        name='workers',
                        min_count=workers,
                        service_type_to_role_types=worker_setups,
                        role_types_configs=worker_configs,
                        virtual_instances=worker_vi)
                },
                post_create_scripts=post_create_scripts))
    except ApiException as e:
        if e.status == 409:
            log.error("Cluster %s already exists", cluster_name)
            raise ValueError("Cluster %s already exists", cluster_name)
        else:
            raise e
Ejemplo n.º 5
0
def create_cluster(cdh_ver,
                   workers=3,
                   services=None,
                   env_name=None,
                   dep_name=None,
                   clus_name=None):
    env_name = env_name if env_name else horton.cadcred.name
    dep_name = dep_name if dep_name else env_name + '-' + cdh_ver.replace(
        '.', '-')
    services = services if services else ['HDFS', 'YARN']
    master_setups = {}
    master_configs = {}
    worker_setups = {}
    worker_configs = {}
    if 'HDFS' in services:
        master_setups['HDFS'] = ['NAMENODE', 'SECONDARYNAMENODE']
        worker_setups['HDFS'] = ['DATANODE']
    if 'YARN' in services:
        master_setups['YARN'] = ['RESOURCEMANAGER', 'JOBHISTORY']
        worker_setups['YARN'] = ['NODEMANAGER']
    if 'ZOOKEEPER' in services:
        master_setups['ZOOKEEPER'] = ['SERVER']
    if 'HBASE' in services:
        master_setups['HBASE'] = ['MASTER']
        worker_setups['HBASE'] = ['REGIONSERVER']
    if 'HIVE' in services:
        master_setups['HIVE'] = ['HIVESERVER2', 'HIVEMETASTORE']
    if 'HUE' in services:
        master_setups['HUE'] = ['HUE_SERVER']
    if 'KUDU' in services:
        master_setups['KUDU'] = ['KUDU_MASTER']
        worker_setups['KUDU'] = ['KUDU_TSERVER']
        master_configs['KUDU'] = {
            'KUDU_MASTER': {
                'fs_wal_dir': "/data0/kudu/masterwal",
                'fs_data_dirs': "/data1/kudu/master"
            }
        }
        worker_configs['KUDU'] = {
            'KUDU_TSERVER': {
                'fs_wal_dir': "/data0/kudu/tabletwal",
                'fs_data_dirs': "/data1/kudu/tablet"
            }
        }
    if 'IMPALA' in services:
        master_setups['IMPALA'] = ['CATALOGSERVER', 'STATESTORE']
        worker_setups['IMPALA'] = ['IMPALAD']
    clus_name = clus_name if clus_name else \
        horton.cadcred.name + '-' + str(cdh_ver).replace('.', '-')
    if cdh_ver[0] == '5':
        parcels = [
            'https://archive.cloudera.com/cdh5/parcels/' + cdh_ver + '/'
        ]
    elif cdh_ver[0] == '6':
        parcels = [
            'https://archive.cloudera.com/cdh6/' + cdh_ver + '/parcels/'
        ]
    else:
        raise ValueError("Only CDH versions 5 or 6 supported")
    try:
        cd.ClustersApi(horton.cad).create(
            environment=env_name,
            deployment=dep_name,
            cluster_template=cd.ClusterTemplate(
                name=clus_name,
                product_versions={'CDH': cdh_ver},
                parcel_repositories=parcels,
                services=services,
                services_configs={},
                virtual_instance_groups={
                    'masters':
                    cd.VirtualInstanceGroup(
                        name='masters',
                        min_count=1,
                        service_type_to_role_types=master_setups,
                        role_types_configs=master_configs,
                        virtual_instances=[create_virtual_instance()]),
                    'workers':
                    cd.VirtualInstanceGroup(
                        name='workers',
                        min_count=workers,
                        service_type_to_role_types=worker_setups,
                        role_types_configs=worker_configs,
                        virtual_instances=[
                            create_virtual_instance()
                            for _ in range(0, workers)
                        ])
                }))
    except ApiException as e:
        if e.status == 409:
            log.error("Cluster %s already exists", clus_name)
            raise ValueError("Cluster %s already exists", clus_name)
        else:
            raise e
Ejemplo n.º 6
0
def list_clusters(env_name=None, dep_name=None):
    # Using default if available
    env_name = env_name if env_name else horton.cadcred.name
    dep_name = dep_name if dep_name else horton.cadcred.name
    cd.ClustersApi(horton.cad).list(environment=env_name, deployment=dep_name)
Ejemplo n.º 7
0
def create_cluster(cdh_ver,
                   workers=3,
                   services=None,
                   env_name=None,
                   dep_name=None,
                   clus_name=None,
                   parcels=None):
    env_name = env_name if env_name else horton.cdcred.name
    dep_name = dep_name if dep_name else env_name + '-' + cdh_ver.replace(
        '.', '-')
    services = services if services else ['HDFS', 'YARN']
    if cdh_ver[0] == '5':
        load_parcels = [
            'https://archive.cloudera.com/cdh5/parcels/' + cdh_ver + '/'
        ]
    elif cdh_ver[0] == '6':
        load_parcels = [
            'https://archive.cloudera.com/cdh6/' + cdh_ver + '/parcels/'
        ]
    else:
        raise ValueError("Only CDH versions 5 or 6 supported")
    if parcels:
        load_parcels += parcels
        if cdh_ver[0] == '6':
            # CDH6 already has Kafka
            load_parcels = [x for x in load_parcels if 'kafka' not in x]
    products = {'CDH': cdh_ver}
    if 'NIFI' in str(services):
        products['CFM'] = '1'
    if 'KAFKA' in str(services) and cdh_ver[0] == '5':
        products['KAFKA'] = '4'
    if 'SCHEMAREGISTRY' in str(services):
        products['SCHEMAREGISTRY'] = '0.7'
    services_configs = {}
    master_setups = {}
    master_configs = {}
    worker_setups = {}
    worker_configs = {}
    if 'HDFS' in services:
        master_setups['HDFS'] = ['NAMENODE', 'SECONDARYNAMENODE']
        worker_setups['HDFS'] = ['DATANODE']
    if 'YARN' in services:
        master_setups['YARN'] = ['RESOURCEMANAGER', 'JOBHISTORY']
        worker_setups['YARN'] = ['NODEMANAGER']
    if 'ZOOKEEPER' in services:
        master_setups['ZOOKEEPER'] = ['SERVER']
    if 'HBASE' in services:
        master_setups['HBASE'] = ['MASTER']
        worker_setups['HBASE'] = ['REGIONSERVER']
    if 'HIVE' in services:
        master_setups['HIVE'] = ['HIVESERVER2', 'HIVEMETASTORE']
    if 'HUE' in services:
        master_setups['HUE'] = ['HUE_SERVER']
    if 'KUDU' in services:
        master_setups['KUDU'] = ['KUDU_MASTER']
        worker_setups['KUDU'] = ['KUDU_TSERVER']
        master_configs['KUDU'] = {
            'KUDU_MASTER': {
                'fs_wal_dir': "/data0/kudu/masterwal",
                'fs_data_dirs': "/data1/kudu/master"
            }
        }
        worker_configs['KUDU'] = {
            'KUDU_TSERVER': {
                'fs_wal_dir': "/data0/kudu/tabletwal",
                'fs_data_dirs': "/data1/kudu/tablet"
            }
        }
    if 'IMPALA' in services:
        master_setups['IMPALA'] = ['CATALOGSERVER', 'STATESTORE']
        worker_setups['IMPALA'] = ['IMPALAD']
    if 'NIFI' in services:
        worker_setups['NIFI'] = ['NIFI_NODE']
    if 'NIFIREGISTRY' in services:
        master_setups['NIFIREGISTRY'] = ['NIFI_REGISTRY_SERVER']
    if 'NIFITOOLKITCA' in services:
        master_setups['NIFITOOLKITCA'] = ['NIFI_TOOLKIT_SERVER']
        services_configs['NIFITOOLKITCA'] = {
            'nifi.toolkit.tls.ca.server.token':
            security.get_secret('MASTERKEY')
        }
    if 'KAFKA' in services:
        worker_setups['KAFKA'] = ['KAFKA_BROKER']
    if 'SCHEMAREGISTRY' in services:
        master_setups['SCHEMAREGISTRY'] = ['SCHEMA_REGISTRY_SERVER']
    clus_name = clus_name if clus_name else \
        horton.cdcred.name + '-' + str(cdh_ver).replace('.', '-')
    # Handle virtual instance generation
    master_vi = [
        create_virtual_instance(tem_name='master',
                                scripts=[
                                    '''sudo -i
            yum install mysql mariadb-server -y
            systemctl enable mariadb
            service mariadb start
            mysql --execute="CREATE DATABASE registry DEFAULT CHARACTER SET utf8"
            mysql --execute="CREATE USER 'registry'@'localhost' IDENTIFIED BY 'registry'"
            mysql --execute="GRANT ALL PRIVILEGES ON registry.* TO 'registry'@'localhost' identified by 'registry'"
            mysql --execute="GRANT ALL PRIVILEGES ON registry.* TO 'registry'@'localhost' WITH GRANT OPTION"
            mysql --execute="FLUSH PRIVILEGES"
            mysql --execute="COMMIT"'''
                                ])
    ]
    worker_vi = [create_virtual_instance() for _ in range(0, workers)]
    try:
        cd.ClustersApi(horton.cad).create(
            environment=env_name,
            deployment=dep_name,
            cluster_template=cd.ClusterTemplate(
                name=clus_name,
                product_versions=products,
                parcel_repositories=load_parcels,
                services=services,
                services_configs=services_configs,
                virtual_instance_groups={
                    'masters':
                    cd.VirtualInstanceGroup(
                        name='masters',
                        min_count=1,
                        service_type_to_role_types=master_setups,
                        role_types_configs=master_configs,
                        virtual_instances=master_vi),
                    'workers':
                    cd.VirtualInstanceGroup(
                        name='workers',
                        min_count=workers,
                        service_type_to_role_types=worker_setups,
                        role_types_configs=worker_configs,
                        virtual_instances=worker_vi)
                }))
    except ApiException as e:
        if e.status == 409:
            log.error("Cluster %s already exists", clus_name)
            raise ValueError("Cluster %s already exists", clus_name)
        else:
            raise e