Exemple #1
0
def main():
    #print sys.argv[0]
    #for i in range(1, len(sys.argv)):
    #    print "param ", i, sys.argv[i]

    # get a handle on the instance of CM that we have running
    api = ApiResource(cm_host, cm_port, cm_username, cm_password, version=13)

    # get the CM instancepython2.7 setuptools
    cm = ClouderaManager(api)

    cluster = api.get_cluster(cluster_name)

    # distribution_parcels(api, cluster)

    cmd = cluster.first_run()

    while cmd.success == None:
        cmd = cmd.fetch()

    if cmd.success != True:
        print "The first run command failed: " + cmd.resultMessage()
        exit(0)

    print "First run successfully executed. Your cluster has been set up!"
def main():
        try:
                print '###############################\n\n'
                print 'STAGE 0. Services Prerequisite'
#               api_prerequisite()
                template = ApiClusterTemplate(resource).from_json_dict(data, resource)
                cms = ClouderaManager(resource)
                print 'STAGE 1. Installing Hosts'
                # Union of all Hosts
                CLUSTER_HOSTS = list(set(MGMT_1.split()) | set(MGMT_2.split()) | set(MGMT_3.split()) | set(HOST_DATA))
                print CLUSTER_HOSTS
                logger('info','Cluster Hosts part of this install' )
                logger('info','Adding hosts to Cloudera Manager')
                cm_install_hosts(logger,cms,HOST_USER,CLUSTER_HOSTS,HOST_PASS)
				
                print 'STAGE 2. CM API importing cluster template'
                logger('info','CM API importing cluster template' + OUTPUT_TEMPLATE )
                command = cms.import_cluster_template(template)
                command = command.wait()

                print "Active: %s. Success: %s" % (command.active, command.success)
                print "Cluster installation triggered !!!"
                logger('info',"Cluster template " + CLUSTER_NM + " import finished !!!");
        except Exception,err:
                logger('error',"Getting below exceptions, unable to install cluster " + CLUSTER_NM);
                logger('error',str(err));
                print "Failed to import cluster\n\n";
                print "Getting exceptions... check log file: "+LOG_FILE;
                sys.exit(1)
 def __init__(self, module):
     self.changed = False
     self.module = module
     self.cm_host = module.params.get('cm_host')
     self.cm_port = module.params.get('cm_port')
     self.cm_username = module.params.get('cm_username')
     self.cm_password = module.params.get('cm_password')
     self.cm_tls = module.params.get('cm_tls')
     self.cm_version = module.params.get('cm_version')
     self.hosts = module.params.get('hosts')
     self.username = module.params.get('username')
     self.sshport = module.params.get('sshport')
     self.password = module.params.get('password')
     self.private_key = module.params.get('private_key')
     self.passphrase = module.params.get('passphrase')
     self.cm_repo_url = module.params.get('cm_repo_url')
     self.gpg_key_custom_url = module.params.get('gpg_key_custom_url')
     self.java_strategy = module.params.get('java_strategy')
     self.unlimited_jce = module.params.get('unlimited_jce')
     self.parallel_install_count = module.params.get(
         'parallel_install_count')
     self.hosts_to_install = self.hosts
     self.hosts_reply = dict()
     try:
         self.cm_conn = ApiResource(self.cm_host,
                                    server_port=self.cm_port,
                                    username=self.cm_username,
                                    password=self.cm_password,
                                    use_tls=self.cm_tls,
                                    version=self.cm_version)
         self.cms = ClouderaManager(self.cm_conn)
     except ApiException as e:
         self.module.fail_json(changed=self.changed,
                               msg="Can't connect to API: {}".format(e))
Exemple #4
0
    def test_all_hosts_config(self):
        SUMMARY = """
      {
        "items" : [ {
          "name" : "blacklisted_parcel_products",
          "value" : "foo,bar"
        } ]
      }
      """
        FULL = """
      {
        "items" : [ {
          "name" : "blacklisted_parcel_products",
          "value" : "foo,bar",
          "required" : false,
          "default" : "",
          "displayName" : "Blacklisted Products",
          "description" : "Parcels for blacklisted products will not be distributed to the host, nor activated for process execution. Already distributed parcels will be undistributed. Already running process will not be affected until the next restart.",
          "validationState" : "OK"
        }, {
          "name" : "rm_enabled",
          "required" : false,
          "default" : "false",
          "displayName" : "Enable Resource Management",
          "description" : "Enables resource management for all roles on this host.",
          "validationState" : "OK",
          "validationWarningsSuppressed" : false
        } ]
      }
      """

        resource = utils.MockResource(self)
        cms = ClouderaManager(resource)

        resource.expect("GET",
                        "/cm/allHosts/config",
                        retdata=json.loads(SUMMARY))
        cfg = cms.get_all_hosts_config()
        self.assertIsInstance(cfg, dict)
        self.assertEqual(1, len(cfg))
        self.assertEqual('foo,bar', cfg.get('blacklisted_parcel_products'))

        resource.expect("GET",
                        "/cm/allHosts/config",
                        params={'view': 'full'},
                        retdata=json.loads(FULL))
        cfg = cms.get_all_hosts_config(view='full')
        self.assertIsInstance(cfg, dict)
        self.assertEqual(2, len(cfg))
        self.assertIsInstance(cfg['blacklisted_parcel_products'], ApiConfig)
        self.assertFalse(cfg['blacklisted_parcel_products'].required)
        self.assertEqual('OK', cfg['rm_enabled'].validationState)

        cfg = {'blacklisted_parcel_products': 'bar'}
        resource.expect("PUT",
                        "/cm/allHosts/config",
                        data=config_to_json(cfg),
                        retdata=json.loads(SUMMARY))
        cms.update_all_hosts_config(cfg)
Exemple #5
0
 def test_import_cluster_v12(self):
     resource = utils.MockResource(self, version=12)
     cms = ClouderaManager(resource)
     data = ApiClusterTemplate(resource).to_json_dict()
     resource.expect(method="POST",
                     reqpath="/cm/importClusterTemplate",
                     params=dict(addRepositories=True),
                     data=data,
                     retdata=ApiCommand(resource).to_json_dict())
     cms.import_cluster_template(data, True)
def cdh_manager(op):
    cm = ClouderaManager(api)
    cm_service = cm.get_service()
    print('cm_service=', cm_service)
    #restart the management service
    if op == 'stop':
        print('cm service - stop')
        cm_service.stop().wait()
    elif op == 'start':
        print('cm service - restart')
        cm_service.restart().wait()
Exemple #7
0
def main():
    #print sys.argv[0]
    #for i in range(1, len(sys.argv)):
    #    print "param ", i, sys.argv[i]

    # get a handle on the instance of CM that we have running
    api = ApiResource(cm_host, cm_port, cm_username, cm_password, version=13)

    # get the CM instancepython2.7 setuptools
    cm = ClouderaManager(api)

    cluster = api.get_cluster(cluster_name)
    distribution_parcels(api, cluster)
Exemple #8
0
  def test_host_commission_with_start(self):
    resource = utils.MockResource(self)
    cms = ClouderaManager(resource)

    resource.expect("POST", "/cm/commands/hostsDecommission",
        data=[ "host1", "host2" ],
        retdata={})
    cms.hosts_decommission([ "host1", "host2" ])

    resource.expect("POST", "/cm/commands/hostsRecommissionWithStart",
        data=[ "host1", "host2" ],
        retdata={})
    cms.hosts_recommission_with_start([ "host1", "host2" ])
def main():
    api = ApiResource(cm_host, cm_port, cm_username, cm_password, version=api_num)
    cm = ClouderaManager(api)
    #cm.host_install(host_username, host_list, password=host_password, cm_repo_url=cm_repo_url)
    MANAGER = api.get_cloudera_manager()
    #MANAGER.update_config)
    print "Connected to CM host on " + cm_host + " and updated CM configuration"

    #CLUSTER = init_cluster(api, cluster_name , cdh_version, host_list ,host_list)

    deploy_management(MANAGER, MGMT_SERVICENAME, MGMT_SERVICE_CONFIG, MGMT_ROLE_CONFIG, AMON_ROLENAME, AMON_ROLE_CONFIG, APUB_ROLENAME, APUB_ROLE_CONFIG, ESERV_ROLENAME, ESERV_ROLE_CONFIG, HMON_ROLENAME, HMON_ROLE_CONFIG, SMON_ROLENAME, SMON_ROLE_CONFIG, NAV_ROLENAME, NAV_ROLE_CONFIG, NAVMS_ROLENAME, NAVMS_ROLE_CONFIG, RMAN_ROLENAME, RMAN_ROLE_CONFIG)

    print "Deployed CM management service " + MGMT_SERVICENAME + " to run on " + cm_host + "now service is stop!"
Exemple #10
0
 def import_cluster_template(self, template_filename, cluster_name):
     """
     To import cluster template configuration into given cluster
     :param template_filename:
     :param cluster_name:
     :return:
     """
     cluster = self._cloudera_manager_oconnect.get_cluster(cluster_name)
     with open(template_filename) as data_file:
         data = json.load(data_file)
     template = ApiClusterTemplate(cluster).from_json_dict(data, cluster)
     cms = ClouderaManager(cluster)
     command = cms.import_cluster_template(template)
     print(command)
def set_up_cluster():
    # get a handle on the instance of CM that we have running
    api = ApiResource(cm_host, cm_port, cm_username, cm_password, version=13)

    # get the CM instancepython2.7 setuptools
    cm = ClouderaManager(api)

    # activate the CM trial license
    #cm.begin_trial()

    cmservice=None
    try:
        cmservice = cm.get_service()
    except Exception,e:   
        print Exception,":",e 
    def test_host_offline(self):
        resource = utils.MockResource(self)
        cms = ClouderaManager(resource)

        resource.expect("POST",
                        "/cm/commands/hostsOfflineOrDecommission",
                        data=["host1", "host2"],
                        retdata={})
        cms.hosts_offline_or_decommission(["host1", "host2"])

        resource.expect("POST",
                        "/cm/commands/hostsOfflineOrDecommission",
                        data=["host1", "host2"],
                        params={'timeout': 123456},
                        retdata={})
        cms.hosts_offline_or_decommission(["host1", "host2"], timeout=123456)
Exemple #13
0
def clean(api):
    for cluster in api.get_all_clusters():
        list_hosts = cluster.list_hosts
        print "hosts:"  #use this to add back when creating the cluster hostIds
        print list_hosts
        print "cluster:" + cluster.name + " ,cluster version:" + cluster.version
        for service in cluster.get_all_services():
            service.stop().wait()
            cluster.delete_service(service.name)
        #delete cloudera mgmt service
        cm = ClouderaManager(api)
        try:
            cm.delete_mgmt_service()
        except ApiException:
            print "mgmt service doesn't exist"
        api.delete_cluster(cluster.name)
Exemple #14
0
def main():
    #print sys.argv[0]
    #for i in range(1, len(sys.argv)):
    #    print "param ", i, sys.argv[i]

    # get a handle on the instance of CM that we have running
    api = ApiResource(cm_host, cm_port, cm_username, cm_password, version=13)

    # get the CM instancepython2.7 setuptools
    cm = ClouderaManager(api)

    cluster = create_cluster(api, cluster_name, cdh_version)

    cluster.auto_assign_roles()
    cluster.auto_configure()

    print "auto_assign_roles successfully executed. Your cluster has been set up!"
Exemple #15
0
    def test_peer_v10(self):
        json_peer = _make_cm_v10_format_peer("peer1", "url1")

        resource = utils.MockResource(self, version=10)
        cms = ClouderaManager(resource)
        peer = ApiCmPeer(resource,
                         name="peer1",
                         url="url1",
                         username="******",
                         password="******")

        # Create peer
        resource.expect("POST",
                        "/cm/peers",
                        data=peer,
                        retdata=json.loads(json_peer))
        cms.create_peer("peer1", "url1", "username", "password")

        # Delete peer
        resource.expect("DELETE",
                        "/cm/peers/peer1",
                        retdata=json.loads(json_peer))
        cms.delete_peer("peer1")

        # Update peer
        resource.expect("PUT",
                        "/cm/peers/peer1",
                        data=peer,
                        retdata=json.loads(json_peer))
        cms.update_peer("peer1", "peer1", "url1", "username", "password")

        # Read peer
        resource.expect("GET",
                        "/cm/peers/peer1",
                        retdata=json.loads(json_peer))
        cms.get_peer("peer1")

        # Test peer connectivity
        resource.expect("POST",
                        "/cm/peers/peer1/commands/test",
                        retdata=json.loads(SAMPLE_COMMAND_JSON))
        cms.test_peer_connectivity("peer1")
Exemple #16
0
 def test_get_licensed_feature_usage(self):
     resource = utils.MockResource(self)
     cms = ClouderaManager(resource)
     json_string = {
         "totals": {
             "Core": 8,
             "HBase": 8,
             "Impala": 8,
             "Search": 2,
             "Spark": 5,
             "Accumulo": 0,
             "Navigator": 8
         },
         "clusters": {
             "Cluster 1": {
                 "Core": 4,
                 "HBase": 4,
                 "Impala": 4,
                 "Search": 1,
                 "Spark": 1,
                 "Accumulo": 0,
                 "Navigator": 4
             },
             "Cluster 2": {
                 "Core": 4,
                 "HBase": 4,
                 "Impala": 4,
                 "Search": 1,
                 "Spark": 4,
                 "Accumulo": 0,
                 "Navigator": 4
             }
         }
     }
     resource.expect("GET",
                     "/cm/getLicensedFeatureUsage",
                     retdata=json_string)
     cms.get_licensed_feature_usage()
                                        'clouderadbhost')
cloudera_manager_repo = config.get('REPOS', 'cloudera_manager_repo')
cloudera_manager_repo_gpg = config.get('REPOS', 'cloudera_manager_repo_gpg')
cdh_parcel_repo = config.get('PARCELS', 'parcel_repo_url')
cdh_parcel_version = config.get('PARCELS', 'cdh_parcel_version')
kafka_parcel_repo = config.get('PARCELS', 'kafka_parcel_repo')
kafka_parcel_version = config.get('PARCELS', 'kafka_parcel_version')
api_version = config.get('CLOUDERA_PROPERTIES', 'api_version')

# Set API client and ClouderaManager object
logging.info('Setting initial Cloudera API Resource object with default user')
api = ApiResource(cloudera_manager_server_api,
                  username="******",
                  password="******",
                  version=api_version)
cloudera_manager = ClouderaManager(api)

# Create new admin user and delete default admin user
logging.info('Creating new admin user')
api.create_user(management_console_username, management_console_password,
                ["ROLE_ADMIN"])

logging.info('Setting Cloudera API Resource object using new user')
api = ApiResource(cloudera_manager_server_api,
                  username=management_console_username,
                  password=management_console_password,
                  version=api_version)
cloudera_manager = ClouderaManager(api)

logging.info('Deleting default admin user')
api.delete_user("admin")
Exemple #18
0
def set_up_cluster(cm_host, host_list):
    print "Setting up CDH cluster..."

    api = ApiResource(cm_host, cm_port, cm_username, cm_password, version=7)
    cm = ClouderaManager(api)

    print "Creating mgmg service."
    try:
        service_setup = ApiServiceSetupInfo(name=cm_service_name, type="MGMT")
        cm.create_mgmt_service(service_setup)
    except ApiException as exc:
        if exc.code != 400:
            print "create MGMT service failed: " + exc
            exit(1)

    print "Installing hosts. This might take a while."
    cmd = cm.host_install(host_username, host_list,
                          password=host_password).wait()
    if cmd.success != True:
        print "cm_host_install failed: " + cmd.resultMessage
        exit(2)

    print "Auto-assign roles and auto-configure the CM service"
    if not is_cluster_installed(api):
        cm.auto_assign_roles()
        cm.auto_configure()

    print "Creating cluster."
    if not is_cluster_installed(api):
        cluster = create_cluster(api, cluster_name, cdh_version)
        cluster.add_hosts(host_list)
    cluster = api.get_cluster(cluster_name)

    cdh_parcel = get_cdh_parcel(cluster)

    print "Downloading CDH parcel. This might take a while."
    if cdh_parcel.stage == "AVAILABLE_REMOTELY":
        cdh_parcel = wait_for_parcel(cdh_parcel.start_download(), api,
                                     cdh_parcel, cluster_name, 'DOWNLOADED')

    print "Distributing CDH parcel. This might take a while."
    if cdh_parcel.stage == "DOWNLOADED":
        cdh_parcel = wait_for_parcel(cdh_parcel.start_distribution(), api,
                                     cdh_parcel, cluster_name, 'DISTRIBUTED')

    print "Activating CDH parcel. This might take a while."
    if cdh_parcel.stage == "DISTRIBUTED":
        cdh_parcel = wait_for_parcel(cdh_parcel.activate(), api, cdh_parcel,
                                     cluster_name, 'ACTIVATED')


#  if cdh_parcel.stage != "ACTIVATED":
#    print "CDH parcel activation failed. Parcel in stage: " + cdh_parcel.stage
#    exit(14)

    print "Inspecting hosts. This might take a few minutes."
    cmd = cm.inspect_hosts()
    while cmd.success == None:
        cmd = cmd.fetch()
    if cmd.success != True:
        print "Host inpsection failed!"
        exit(8)
    print "Hosts successfully inspected: \n" + cmd.resultMessage

    print "Creating specified services."
    for s in service_types_and_names.keys():
        try:
            cluster.get_service(service_types_and_names[s])
        except:
            print "Creating service: " + service_types_and_names[s]
            service = cluster.create_service(service_types_and_names[s], s)

    slaves = [host for host in host_list if 'slave' in host]
    edges = [host for host in host_list if 'edge' in host]

    #assign master roles to master node
    for service in cluster.get_all_services():
        if service.name == 'HDFS-1':
            service.create_role('NAMENODE-1', 'NAMENODE', cm_host)
            service.create_role('SECONDARYNAMENODE', 'SECONDARYNAMENODE',
                                cm_host)
            service.create_role('BALANCER-1', 'BALANCER', cm_host)
            service.create_role('HTTPFS-1', 'HTTPFS', cm_host)
            service.create_role('HDFS-GW_MASTER1', 'GATEWAY', cm_host)
            for (i, edge) in enumerate(edges):
                service.create_role('HDFS-GW_EDGE%s' % i, 'GATEWAY', edge)
            for (i, slave) in enumerate(slaves):
                service.create_role('DATANODE-%s' % i, 'DATANODE', slave)

        if service.name == 'ZOOKEEPER-1':
            service.create_role('ZOOKEEPERSERVER-1', 'SERVER', cm_host)

        if service.name == 'HBASE-1':
            service.create_role('MASTER-1', 'MASTER', cm_host)
            service.create_role('HBASETHRIFTSERVER-1', 'HBASETHRIFTSERVER',
                                cm_host)
            for (i, slave) in enumerate(slaves):
                service.create_role('HBASE-RS-%s' % i, 'REGIONSERVER', slave)

        if service.name == 'HUE-1':
            service.create_role('HUE-MASTER1', 'HUE_SERVER', cm_host)
            service.create_role('HUE-LB_MASTER1', 'HUE_LOAD_BALANCER', cm_host)
            for (i, edge) in enumerate(edges):
                service.create_role('HUE-EDGE%s' % i, 'HUE_SERVER', edge)

        if service.name == 'HIVE-1':
            service.create_role('HIVEMETASTORE-1', 'HIVEMETASTORE', cm_host)
            service.create_role('HIVESERVER-1', 'HIVESERVER2', cm_host)
            service.create_role('HIVE-GW_MASTER1', 'GATEWAY', cm_host)
            for (i, edge) in enumerate(edges):
                service.create_role('HIVE-GW_EDGE%s' % i, 'GATEWAY', edge)
            for (i, slave) in enumerate(slaves):
                service.create_role('HIVE-GW_SLAVE%s' % i, 'GATEWAY', slave)

        if service.name == 'IMPALA-1':
            service.create_role('STATESTORE-1', 'STATESTORE', cm_host)
            service.create_role('CATALOGSERVER-1', 'CATALOGSERVER', cm_host)
            for (i, slave) in enumerate(slaves):
                service.create_role('IMPALAD-%s' % i, 'IMPALAD', slave)

        if service.name == 'OOZIE-1':
            service.create_role('OOZIE_SERVER-1', 'OOZIE_SERVER', cm_host)

        if service.name == 'SPARK_ON_YARN-1':
            service.create_role('SPARK_YARN_HISTORY_SERVER-1',
                                'SPARK_YARN_HISTORY_SERVER', cm_host)
            service.create_role('SPARK_ON_YARN-GW_MASTER%s' % i, 'GATEWAY',
                                cm_host)
            for (i, edge) in enumerate(edges):
                service.create_role('SPARK_ON_YARN-GW_EDGE%s' % i, 'GATEWAY',
                                    edge)
            for (i, slave) in enumerate(slaves):
                service.create_role('SPARK_ON_YARN-GW_SLAVE%s' % i, 'GATEWAY',
                                    slave)

        if service.name == 'SQOOP-1':
            service.create_role('SQOOP_SERVER-1', 'SQOOP_SERVER', cm_host)

        if service.name == 'YARN-1':
            service.create_role('RESOURCEMANAGER-1', 'RESOURCEMANAGER',
                                cm_host)
            service.create_role('JOBHISTORY-1', 'JOBHISTORY', cm_host)
            for (i, slave) in enumerate(slaves):
                service.create_role('NODEMANAGER-%s' % i, 'NODEMANAGER', slave)

    #print "Auto assigning roles."
    #cluster.auto_assign_roles()
    cluster.auto_configure()

    print "Updating Hive config."
    hive_metastore_host = cm_host  # let's assume that
    hive = cluster.get_service(service_types_and_names["HIVE"])
    hive_config = { "hive_metastore_database_host" : hive_metastore_host, \
                    "hive_metastore_database_name" : hive_metastore_name, \
                    "hive_metastore_database_user" : hive_metastore_user, \
                    "hive_metastore_database_password" : hive_metastore_password, \
                    "hive_metastore_database_port" : hive_metastore_database_port, \
                    "hive_metastore_database_type" : hive_metastore_database_type }
    hive.update_config(hive_config)

    print "Updating Hue config."
    hue_db_host = cm_host  # let's assume that
    hue = cluster.get_service(service_types_and_names["HUE"])
    hue_config = {  "database_host" : hue_db_host, \
                    "database_name" : hue_db_name, \
                    "database_user" : hue_db_user, \
                    "database_password" : hue_db_password, \
                    "database_port" : hue_db_port, \
                    "database_type" : hue_db_type }
    hue.update_config(hue_config)

    # Set Java version to OpenJDK
    cm.update_all_hosts_config({'java_home': '/usr/lib/jvm/java-openjdk'})

    print "Starting management service."
    cm_service = cm.get_service()
    cm_service.start().wait()

    print "Excuting first run command. This might take a while."
    cmd = cluster.first_run().wait()
    if cmd.success != True:
        print "The first run command failed: " + cmd.resultMessage
        exit(11)

    print "First run successfully executed. Your cluster has been set up!"

    config = cm.get_config(view='full')
    repolist = config['REMOTE_PARCEL_REPO_URLS']
    value = repolist.value or repolist.default
    value += ',' + anaconda_repo
    cm.update_config({'REMOTE_PARCEL_REPO_URLS': value})
    sleep(10)

    cluster = api.get_cluster(cluster_name)
    parcel = cluster.get_parcel('Anaconda', anaconda_parcel_version)

    print "Downloading Anaconda parcel. This might take a while."
    if parcel.stage == "AVAILABLE_REMOTELY":
        parcel = wait_for_parcel(parcel.start_download(), api, parcel,
                                 cluster_name, 'DOWNLOADED')

    print "Distributing Anaconda parcel. This might take a while."
    if parcel.stage == "DOWNLOADED":
        parcel = wait_for_parcel(parcel.start_distribution(), api, parcel,
                                 cluster_name, 'DISTRIBUTED')

    print "Activating Anaconda parcel. This might take a while."
    if parcel.stage == "DISTRIBUTED":
        parcel = wait_for_parcel(parcel.activate(), api, parcel, cluster_name,
                                 'ACTIVATED')

    print "Anaconda is now installed."
import pycurl
from cm_api.api_client import ApiResource
from cm_api.endpoints.cms import ClouderaManager
from datetime import date

cluster_name = "cluster"
host_url = "host-10-17-100-146.coe.cloudera.com"

api = ApiResource(
    host_url,
    username="******",
    password="******",
)

end_time = date(2017, 12, 18)
cm = ClouderaManager(api)
cmd = cm.collect_diagnostic_data_45(end_time, 1073741824, cluster_name)

if not cmd.wait().success:
    raise Exception("Failed to run cluster diagnositc bundle collection")

result_data_url = "http://" + host_url + ":7180/cmf/command/" + str(
    cmd.id) + "/download"
print result_data_url
print getattr(cmd, "id")
print getattr(cmd, "name")
print cmd.to_json_dict(True)

print cmd.id
print cmd.name
print cmd.resultDataUrl
Exemple #20
0
def main():
    module = AnsibleModule(argument_spec=dict(
        cm_host=dict(required=True, type='str'),
        cm_port=dict(required=False, type='int', default=7180),
        cm_username=dict(required=True, type='str'),
        cm_password=dict(required=True, type='str', no_log=True),
        cm_tls=dict(required=False, type='bool', default=False),
        cm_version=dict(required=False, type='int', default=10),
        cdh_version=dict(required=False, type='str'),
        cluster_name=dict(required=False, type='str', default='cluster'),
        state=dict(default='present',
                   choices=[
                       'present', 'absent', 'restarted', 'stopped', 'started',
                       'refreshed'
                   ])))

    cm_host = module.params.get('cm_host')
    cm_port = module.params.get('cm_port')
    cm_username = module.params.get('cm_username')
    cm_password = module.params.get('cm_password')
    cm_tls = module.params.get('cm_tls')
    cm_version = module.params.get('cm_version')
    cdh_version = module.params.get('cdh_version')
    cluster_name = module.params.get('cluster_name')
    state = module.params.get('state')

    changed = False

    if not CM_API:
        module.fail_json(changed=changed,
                         msg='cm_api required for this module')

    try:
        api = ApiResource(cm_host,
                          server_port=cm_port,
                          username=cm_username,
                          password=cm_password,
                          use_tls=cm_tls,
                          version=cm_version)
        cms = ClouderaManager(api)
    except ApiException as e:
        module.fail_json(changed=changed,
                         msg="Can't connect to CM API: {0}".format(e))

    if state == "present":
        try:
            api.create_cluster(cluster_name, version=cdh_version)
            module.exit_json(changed=True, rc=0)
        except Exception as e:
            module.fail_json(changed=changed, msg="{0}".format(e))
    elif state == "absent":
        try:
            api.delete_cluster(cluster_name)
            module.exit_json(changed=True, rc=0)
        except Exception as e:
            module.fail_json(changed=False, msg="{0}".format(e))
    elif state == "restarted":
        try:
            cluster = api.get_cluster(cluster_name)
            restart_cluster(cluster)
        except Exception as e:
            module.fail_json(changed=False, msg="{0}".format(e))
    elif state == "stopped":
        try:
            cluster = api.get_cluster(cluster_name)
            stop_cluster(cluster)
        except Exception as e:
            module.fail_json(changed=False, msg="{0}".format(e))
    elif state == "started":
        try:
            cluster = api.get_cluster(cluster_name)
            start_cluster(cluster)
        except Exception as e:
            module.fail_json(changed=False, msg="{0}".format(e))
    elif state == "refreshed":
        try:
            cluster = api.get_cluster(cluster_name)
            refresh_cluster(cluster)
        except Exception as e:
            module.fail_json(changed=False, msg="{0}".format(e))

    module.exit_json(changed=False, settings=cms.get_config('summary'))
def set_up_cluster():
    # get a handle on the instance of CM that we have running
    api = ApiResource(cm_host, cm_port, cm_username, cm_password, version=19)

    # get the CM instance
    cm = ClouderaManager(api)

    print "*************************************"
    print " Starting Auto Deployment of Cluster "
    print "*************************************"

    # {'owner': ROAttr(), 'uuid': ROAttr(), 'expiration': ROAttr(),}
    TRIAL = False
    try:

        trial_active = cm.get_license()
        print trial_active

        if trial_active.owner == "Trial License":
            print "Trial License is already set - will NOT continue now."
            print "Assuming Cluster is already setup"
            TRIAL = True
        else:
            print "Setting up `Trial License`."
            cm.begin_trial()
    except:
        cm.begin_trial()

    if TRIAL:
        exit(0)

    # create the management service
    service_setup = ApiServiceSetupInfo(name=cm_service_name, type="MGMT")

    try:
        if not cm.get_service().name:
            cm.create_mgmt_service(service_setup)
        else:
            print "Service already exist."
    except:
        cm.create_mgmt_service(service_setup)

    # install hosts on this CM instance
    cmd = cm.host_install(host_username,
                          host_list,
                          password=host_password,
                          cm_repo_url=cm_repo_url,
                          unlimited_jce=True)
    print "Installing hosts. This might take a while."
    while cmd.success == None:
        sleep(5)
        cmd = cmd.fetch()
        print cmd

    if cmd.success != True:
        print "cm_host_install failed: " + cmd.resultMessage
        exit(0)

    print "cm_host_install succeeded"

    # first auto-assign roles and auto-configure the CM service
    cm.auto_assign_roles()
    cm.auto_configure()

    # create a cluster on that instance
    cluster = create_cluster(api, cluster_name, cdh_version)

    # add all our hosts to the cluster
    cluster.add_hosts(host_list)

    cluster = api.get_cluster(cluster_name)

    parcels_list = []
    # get and list all available parcels
    print "Available parcels:"
    for p in cluster.get_all_parcels():
        print '\t' + p.product + ' ' + p.version
        if p.version.startswith(cdh_version_number) and p.product == "CDH":
            parcels_list.append(p)

    if len(parcels_list) == 0:
        print "No " + cdh_version + " parcel found!"
        exit(0)

    cdh_parcel = parcels_list[0]
    for p in parcels_list:
        if p.version > cdh_parcel.version:
            cdh_parcel = p

    # download the parcel
    print "Starting parcel download. This might take a while."
    cmd = cdh_parcel.start_download()
    if cmd.success != True:
        print "Parcel download failed!"
        exit(0)

    # make sure the download finishes
    while cdh_parcel.stage != 'DOWNLOADED':
        sleep(5)
        cdh_parcel = get_parcel(api, cdh_parcel.product, cdh_parcel.version,
                                cluster_name)

    print cdh_parcel.product + ' ' + cdh_parcel.version + " downloaded"

    # distribute the parcel
    print "Starting parcel distribution. This might take a while."
    cmd = cdh_parcel.start_distribution()
    if cmd.success != True:
        print "Parcel distribution failed!"
        exit(0)

    # make sure the distribution finishes
    while cdh_parcel.stage != "DISTRIBUTED":
        sleep(5)
        cdh_parcel = get_parcel(api, cdh_parcel.product, cdh_parcel.version,
                                cluster_name)

    print cdh_parcel.product + ' ' + cdh_parcel.version + " distributed"

    # activate the parcel
    cmd = cdh_parcel.activate()
    if cmd.success != True:
        print "Parcel activation failed!"
        exit(0)

    # make sure the activation finishes
    while cdh_parcel.stage != "ACTIVATED":
        cdh_parcel = get_parcel(api, cdh_parcel.product, cdh_parcel.version,
                                cluster_name)

    print cdh_parcel.product + ' ' + cdh_parcel.version + " activated"

    # inspect hosts and print the result
    print "Inspecting hosts. This might take a few minutes."

    cmd = cm.inspect_hosts()
    while cmd.success == None:
        cmd = cmd.fetch()

    if cmd.success != True:
        print "Host inpsection failed!"
        exit(0)

    print "Hosts successfully inspected: \n" + cmd.resultMessage

    # create all the services we want to add; we will only create one instance
    # of each
    for s in service_types_and_names.keys():
        service = cluster.create_service(service_types_and_names[s], s)

    # we will auto-assign roles; you can manually assign roles using the
    # /clusters/{clusterName}/services/{serviceName}/role endpoint or by using
    # ApiService.createRole()
    cluster.auto_assign_roles()
    cluster.auto_configure()

    # # this will set up the Hive and the reports manager databases because we
    # # can't auto-configure those two things
    # hive = cluster.get_service(service_types_and_names["HIVE"])
    # hive_config = {"hive_metastore_database_host": hive_metastore_host, \
    #                "hive_metastore_database_name": hive_metastore_name, \
    #                "hive_metastore_database_password": hive_metastore_password, \
    #                "hive_metastore_database_port": hive_metastore_database_port, \
    #                "hive_metastore_database_type": hive_metastore_database_type}
    # hive.update_config(hive_config)

    # start the management service
    cm_service = cm.get_service()
    cm_service.start().wait()

    # this will set the Reports Manager database password
    # first we find the correct role
    rm_role = None
    for r in cm.get_service().get_all_roles():
        if r.type == "REPORTSMANAGER":
            rm_role = r

    if rm_role == None:
        print "No REPORTSMANAGER role found!"
        exit(0)

    # then we get the corresponding role config group -- even though there is
    # only once instance of each CM management service, we do this just in case
    # it is not placed in the base group
    rm_role_group = rm_role.roleConfigGroupRef
    rm_rcg = get_role_config_group(api, rm_role.type, \
                                   rm_role_group.roleConfigGroupName, None)

    # update the appropriate fields in the config
    rm_rcg_config = {"headlamp_database_host": reports_manager_host, \
                     "headlamp_database_name": reports_manager_name, \
                     "headlamp_database_user": reports_manager_username, \
                     "headlamp_database_password": reports_manager_password, \
                     "headlamp_database_type": reports_manager_database_type}

    rm_rcg.update_config(rm_rcg_config)

    # restart the management service with new configs
    cm_service.restart().wait()

    # execute the first run command
    print "Excuting first run command. This might take a while."
    cmd = cluster.first_run()

    while cmd.success == None:
        cmd = cmd.fetch()

    if cmd.success != True:
        print "The first run command failed: " + cmd.resultMessage()
        exit(0)

    print "First run successfully executed. Your cluster has been set up!"
def install_cds(cm_host, host_list):
    print "Installing CDS for Spark2"

    api = ApiResource(cm_host, cm_port, cm_username, cm_password, version=7)
    cm = ClouderaManager(api)
    config = cm.get_config(view='full')

    # Add parcel repository
    repolist = config['REMOTE_PARCEL_REPO_URLS']
    value = repolist.value or repolist.default
    value += ',' + cds_parcel_repo
    cm.update_config({'REMOTE_PARCEL_REPO_URLS': value})
    sleep(10)

    # Install CDS parcel
    cluster = api.get_cluster(cluster_name)
    parcel = cluster.get_parcel(cds_parcel_name, cds_parcel_version)

    print "Downloading CDS parcel. This might take a while."
    if parcel.stage == "AVAILABLE_REMOTELY":
        parcel = wait_for_parcel(parcel.start_download(), api, parcel,
                                 cluster_name, 'DOWNLOADED')

    print "Distributing CDS parcel. This might take a while."
    if parcel.stage == "DOWNLOADED":
        parcel = wait_for_parcel(parcel.start_distribution(), api, parcel,
                                 cluster_name, 'DISTRIBUTED')

    print "Activating CDS parcel. This might take a while."
    if parcel.stage == "DISTRIBUTED":
        parcel = wait_for_parcel(parcel.activate(), api, parcel, cluster_name,
                                 'ACTIVATED')

    service = cluster.create_service(cds_service_name, cds_service_type)

    slaves = [host for host in host_list if 'slave' in host]
    edges = [host for host in host_list if 'edge' in host]

    service.create_role('SPARK2_YARN_HISTORY_SERVER-1',
                        'SPARK2_YARN_HISTORY_SERVER', cm_host)
    service.create_role('SPARK2_ON_YARN-GW_MASTER1', 'GATEWAY', cm_host)
    for (i, edge) in enumerate(edges):
        service.create_role('SPARK2_ON_YARN-GW_EDGE%s' % i, 'GATEWAY', edge)
    for (i, slave) in enumerate(slaves):
        service.create_role('SPARK2_ON_YARN-GW_SLAVE%s' % i, 'GATEWAY', slave)

    cluster.auto_configure()

    # Restart Cloudera Management Service and cluster
    cm_service = cm.get_service()
    cm_service.restart().wait()
    cluster.restart(restart_only_stale_services=True,
                    redeploy_client_configuration=True).wait()

    # Due to (presumably) CM bug, auto_configure() after Kafka installation creates additional
    # role config group for HDFS gateway, which breaks further use of auto_configure().
    # Below we remove it if it exists.
    try:
        hdfs_service = cluster.get_service("HDFS-1")
        hdfs_service.delete_role_config_group("HDFS-1-GATEWAY-1")
    except cm_api.api_client.ApiException:
        print(
            "Not removing HDFS Gateway role config group as it doesn't exist")
    else:
        print("Removed additional HDFS Gateway role config group")

    print "CDS is now installed."
Exemple #23
0
  def test_peer_v11(self):
    resource = utils.MockResource(self, version=11)
    cms = ClouderaManager(resource)

    json_peer1 = _make_cm_v11_format_peer("peer1", "url1", "REPLICATION")
    json_peer2 = _make_cm_v11_format_peer("peer2", "url2", "STATUS_AGGREGATION")

    peer1 = ApiCmPeer(resource,
        name="peer1",
        url="url1",
        username="******",
        password="******",
        type="REPLICATION")
    peer2 = ApiCmPeer(resource,
        name="peer2",
        url="url2",
        username="******",
        password="******",
        type="STATUS_AGGREGATION")

    params_replication = {
      'type':   "REPLICATION",
    }
    params_status_aggregation = {
      'type':   "STATUS_AGGREGATION",
    }

    # Create peer
    resource.expect("POST", "/cm/peers",
        data=peer1,
        retdata=json.loads(json_peer1))
    cms.create_peer("peer1", "url1", "username", "password")

    resource.expect("POST", "/cm/peers",
        data=peer2,
        retdata=json.loads(json_peer2))
    cms.create_peer("peer2", "url2", "username", "password",
        peer_type="STATUS_AGGREGATION")

    # Delete peer
    resource.expect("DELETE", "/cm/peers/peer1",
        params=params_replication, retdata=json.loads(json_peer1))
    cms.delete_peer("peer1")
    resource.expect("DELETE", "/cm/peers/peer2",
        params=params_status_aggregation, retdata=json.loads(json_peer2))
    cms.delete_peer("peer2", peer_type="STATUS_AGGREGATION")

    # Update peer
    resource.expect("PUT", "/cm/peers/peer1",
        data=peer1,
        retdata=json.loads(json_peer1))
    cms.update_peer("peer1", "peer1", "url1", "username", "password")

    resource.expect("PUT", "/cm/peers/peer2",
        data=peer2,
        retdata=json.loads(json_peer2))
    cms.update_peer("peer2", "peer2", "url2", "username", "password",
        peer_type="STATUS_AGGREGATION")

    # Read peer
    resource.expect("GET", "/cm/peers/peer1", params=params_replication,
        retdata=json.loads(json_peer1))
    cms.get_peer("peer1")
    resource.expect("GET", "/cm/peers/peer2",
        params=params_status_aggregation, retdata=json.loads(json_peer2))
    cms.get_peer("peer2", peer_type="STATUS_AGGREGATION")

    # Test peer connectivity
    resource.expect("POST", "/cm/peers/peer1/commands/test",
        params=params_replication,
        retdata=json.loads(SAMPLE_COMMAND_JSON))
    cms.test_peer_connectivity("peer1")
    resource.expect("POST", "/cm/peers/peer2/commands/test",
        params=params_status_aggregation,
        retdata=json.loads(SAMPLE_COMMAND_JSON))
    cms.test_peer_connectivity("peer2", peer_type="STATUS_AGGREGATION")
Exemple #24
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            cm_host=dict(required=True, type='str'),
            cm_port=dict(required=False, type='int', default=7180),
            cm_username=dict(required=True, type='str'),
            cm_password=dict(required=True, type='str', no_log=True),
            cm_tls=dict(required=False, type='bool', default=False),
            cluster_name=dict(required=False, type='str',default='cluster01'),
            hostname=dict(required=True, type='str'),
            service_name=dict(required=True, type='str'),
            role_type=dict(required=True, type='str'),
            role_name=dict(required=True, type='str'),
            restart_role=dict(required=False, type='bool', default=True),
            action=dict(choices=['create', 'delete', 'update'])
        )
    )

    cm_host = module.params.get('cm_host')
    cm_port = module.params.get('cm_port')
    cm_username = module.params.get('cm_username')
    cm_password = module.params.get('cm_password')
    cm_tls = module.params.get('cm_tls')
    cluster_name = module.params.get('cluster_name')
    hostname = module.params.get('hostname')
    service_name = module.params.get('service_name')
    role_type = module.params.get('role_type')
    role_name = module.params.get('role_name')
    restart_role = module.params.get('restart_role')
    action = module.params.get('action')

    changed = False
    start_roles = True
    cmd_timeout = 420

    if not api_enabled:
        module.fail_json(changed=changed, msg='cm_api required for this module')

    try:
        resource = ApiResource(cm_host, server_port=cm_port,
                                  username=cm_username,
                                  password=cm_password,
                                  use_tls=cm_tls,
                                  version=cm_version)
        cms = ClouderaManager(api)
        cluster = resource.get_cluster(cluster_name)
        service = cluster.get_service(service_name)
    except ApiException as e:
        module.fail_json(changed=changed,
                         msg="Can't connect to CM API: {0}".format(e))

    def restart_roles():
        global service
        print "Restarting role %s on host %s in role group %s" % (role_name, host_name, role_name)
          try:
            role = service.get_role(agent_name)
            cmds = service.restart_roles(role.name)
            for cmd in cmds:
              print " Waiting for restart..."
              cmd.wait(CMD_TIMEOUT)
              print "Role restarted."
          except ApiException as err:
              "Failed to restart role %s on host %s in role group %s. %s"
                  % (role_name, host_name, role_name, err.message)
def main():
    api = ApiResource(cm_host,
                      cm_port,
                      cm_username,
                      cm_password,
                      version=api_num)
    cm = ClouderaManager(api)
    #cm.host_install(host_username, host_list, password=host_password, cm_repo_url=cm_repo_url)
    MANAGER = api.get_cloudera_manager()
    #MANAGER.update_config)
    print "Connected to CM host on " + cm_host + " and updated CM configuration"

    CLUSTER = init_cluster(api, cluster_name, cdh_version, host_list,
                           host_list)

    deploy_management(MANAGER, MGMT_SERVICENAME, MGMT_SERVICE_CONFIG,
                      MGMT_ROLE_CONFIG, AMON_ROLENAME, AMON_ROLE_CONFIG,
                      APUB_ROLENAME, APUB_ROLE_CONFIG, ESERV_ROLENAME,
                      ESERV_ROLE_CONFIG, HMON_ROLENAME, HMON_ROLE_CONFIG,
                      SMON_ROLENAME, SMON_ROLE_CONFIG, NAV_ROLENAME,
                      NAV_ROLE_CONFIG, NAVMS_ROLENAME, NAVMS_ROLE_CONFIG,
                      RMAN_ROLENAME, RMAN_ROLE_CONFIG)

    print "Deployed CM management service " + MGMT_SERVICENAME + " to run on " + cm_host + "now service is stop!"

    deploy_parcels(CLUSTER, PARCELS)
    print "Downloaded and distributed parcels: "
    PRETTY_PRINT.pprint(PARCELS)

    zookeeper_service = deploy_zookeeper(CLUSTER, ZOOKEEPER_SERVICE_NAME,
                                         ZOOKEEPER_HOSTS,
                                         ZOOKEEPER_SERVICE_CONFIG,
                                         ZOOKEEPER_ROLE_CONFIG)
    print "Deployed ZooKeeper " + ZOOKEEPER_SERVICE_NAME + " to run on: "
    PRETTY_PRINT.pprint(ZOOKEEPER_HOSTS)

    hdfs_service = deploy_hdfs(CLUSTER, HDFS_SERVICE_NAME, HDFS_SERVICE_CONFIG,
                               HDFS_NAMENODE_SERVICE_NAME, HDFS_NAMENODE_HOST,
                               HDFS_NAMENODE_CONFIG,
                               HDFS_SECONDARY_NAMENODE_HOST,
                               HDFS_SECONDARY_NAMENODE_CONFIG,
                               HDFS_DATANODE_HOSTS, HDFS_DATANODE_CONFIG,
                               HDFS_GATEWAY_HOSTS, HDFS_GATEWAY_CONFIG)
    print "Deployed HDFS service " + HDFS_SERVICE_NAME + " using NameNode on " + HDFS_NAMENODE_HOST + ", SecondaryNameNode on " + HDFS_SECONDARY_NAMENODE_HOST + ", and DataNodes running on: "
    PRETTY_PRINT.pprint(HDFS_DATANODE_HOSTS)
    init_hdfs(hdfs_service, HDFS_SERVICE_NAME, 600)
    # Test move last method to here orginal is from post_startup function
    #hdfs_service.create_hdfs_tmp()
    print "Initialized HDFS service"

    yarn_service = deploy_yarn(CLUSTER, YARN_SERVICE_NAME, YARN_SERVICE_CONFIG,
                               YARN_RM_HOST, YARN_RM_CONFIG, YARN_JHS_HOST,
                               YARN_JHS_CONFIG, YARN_NM_HOSTS, YARN_NM_CONFIG,
                               YARN_GW_HOSTS, YARN_GW_CONFIG)
    print "Deployed YARN service " + YARN_SERVICE_NAME + " using ResourceManager on " + YARN_RM_HOST + ", JobHistoryServer on " + YARN_JHS_HOST + ", and NodeManagers on "
    PRETTY_PRINT.pprint(YARN_NM_HOSTS)

    #deploy_hbase(CLUSTER, HBASE_SERVICE_NAME, HBASE_SERVICE_CONFIG, HBASE_HM_HOST, HBASE_HM_CONFIG, HBASE_RS_HOSTS, HBASE_RS_CONFIG, HBASE_THRIFTSERVER_SERVICE_NAME, HBASE_THRIFTSERVER_HOST, HBASE_THRIFTSERVER_CONFIG, HBASE_GW_HOSTS, HBASE_GW_CONFIG)
    deploy_hbase(CLUSTER, HBASE_SERVICE_NAME, HBASE_SERVICE_CONFIG,
                 HBASE_HM_HOST, HBASE_HM_CONFIG, HBASE_RS_HOSTS,
                 HBASE_RS_CONFIG, HBASE_GW_HOSTS, HBASE_GW_CONFIG)
    print "Deployed HBase service " + HBASE_SERVICE_NAME + " using HMaster on " + HBASE_HM_HOST + " and RegionServers on "
    PRETTY_PRINT.pprint(HBASE_RS_HOSTS)

    hive_service = deploy_hive(CLUSTER, HIVE_SERVICE_NAME, HIVE_SERVICE_CONFIG,
                               HIVE_HMS_HOST, HIVE_HMS_CONFIG, HIVE_HS2_HOST,
                               HIVE_HS2_CONFIG, HIVE_GW_HOSTS, HIVE_GW_CONFIG)
    print "Depoyed Hive service " + HIVE_SERVICE_NAME + " using HiveMetastoreServer on " + HIVE_HMS_HOST + " and HiveServer2 on " + HIVE_HS2_HOST
    init_hive(hive_service)
    print "Initialized Hive service"

    impala_service = deploy_impala(CLUSTER, IMPALA_SERVICE_NAME,
                                   IMPALA_SERVICE_CONFIG, IMPALA_SS_HOST,
                                   IMPALA_SS_CONFIG, IMPALA_CS_HOST,
                                   IMPALA_CS_CONFIG, IMPALA_ID_HOSTS,
                                   IMPALA_ID_CONFIG)
    print "Deployed Impala service " + IMPALA_SERVICE_NAME + " using StateStore on " + IMPALA_SS_HOST + ", CatalogServer on " + IMPALA_CS_HOST + ", and ImpalaDaemons on "
    PRETTY_PRINT.pprint(IMPALA_ID_HOSTS)

    #CLUSTER.stop().wait()
    CLUSTER.start().wait()
    #post_startup(CLUSTER, hdfs_service, oozie_service)

    oozie_service = deploy_oozie(CLUSTER, OOZIE_SERVICE_NAME,
                                 OOZIE_SERVICE_CONFIG, OOZIE_SERVER_HOST,
                                 OOZIE_SERVER_CONFIG)
    print "Deployed Oozie service " + OOZIE_SERVICE_NAME + " using OozieServer on " + OOZIE_SERVER_HOST

    hue_service = deploy_hue(CLUSTER, HUE_SERVICE_NAME, HUE_SERVICE_CONFIG,
                             HUE_SERVER_HOST, HUE_SERVER_CONFIG, HUE_KTR_HOST,
                             HUE_KTR_CONFIG)
    print "Deployed HUE service " + HUE_SERVICE_NAME + " using HueServer on " + HUE_SERVER_HOST

    #post_startup(CLUSTER, hdfs_service)
    print "About to restart cluster."
    CLUSTER.stop().wait()
    CLUSTER.start().wait()
    print "Done restarting cluster."

    post_startup(CLUSTER, hdfs_service, oozie_service)
from cm_api.api_client import ApiResource
from cm_api.endpoints.cms import ClouderaManager

cm_host = "127.0.0.1"

api = ApiResource(cm_host, username="******", password="******")

cms = ClouderaManager(api)

cmd = cms.get_service().restart()
cmd = cmd.wait()
print "Cloudera Manager Restart. Active: %s. Success: %s" % (cmd.active,
                                                             cmd.success)

cluster = api.get_cluster("Spark")
print cluster

restart_cluster = cluster.restart()
restart_cluster = restart_cluster.wait()
print "Cluster %s. Status - restart success: %s." % (cluster.name,
                                                     restart_cluster.success)

print "Cluster %s. Status - Configuration Stale -- Redeploying configurations" % cluster.name
redeploy_config = cluster.deploy_client_config().wait()
redeploy_config = redeploy_config.wait()
print "New configuration success: %s." % redeploy_config.success
Exemple #27
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            cm_host=dict(required=True, type='str'),
            cm_port=dict(required=False, type='int', default=7180),
            cm_username=dict(required=True, type='str'),
            cm_password=dict(required=True, type='str', no_log=True),
            cm_tls=dict(required=False, type='bool', default=False),
            cluster_name=dict(required=False, type='str',default='cluster'),
            hostname=dict(required=True, type='str'),
            template_name=dict(required=True, type='str'),
	    cm_version=dict(required=False, type='int', default=13),
	    redeploy_config=dict(required=False, type='bool',default='True'),
            action=dict(choices=['create', 'apply', 'delete','config'])
        )
    )

    cm_host = module.params.get('cm_host')
    cm_port = module.params.get('cm_port')
    cm_username = module.params.get('cm_username')
    cm_password = module.params.get('cm_password')
    cm_tls = module.params.get('cm_tls')
    cluster_name = module.params.get('cluster_name')
    hostname = module.params.get('hostname')
    template_name = module.params.get('template_name')
    cm_version = module.params.get('cm_version')   
    redeploy_config = module.params.get('redeploy_config')
    action = module.params.get('action')

    changed = False

    if not api_enabled:
        module.fail_json(changed=changed, msg='cm_api required for this module')

    try:
        resource = ApiResource(cm_host, server_port=cm_port,
                                  username=cm_username,
                                  password=cm_password,
                                  use_tls=cm_tls,
                                  version=cm_version)
        cluster = resource.get_cluster(cluster_name)
	template = cluster.get_host_template(template_name)
        cms = ClouderaManager(resource)        
    except ApiException as e:
        module.fail_json(changed=changed,
                         msg="Can't connect to CM API: {0}".format(e))

    def redeploy_client_config(cluster):
        cluster.deploy_client_config()

    if action == "apply":
        try:
          host = list()
          host.append(hostname)
	  cmd = template.apply_host_template(host,True)
          while True:
                if cmd.wait(CMD_TIMEOUT).success:
                        break
          cluster.deploy_client_config()
	  module.exit_json(changed=True, rc=0)
        except Exception as e:
          module.fail_json(changed=changed, msg="{0}".format(e))

    if action == "config":
	try:
		redeploy_client_config(cluster)
		module.exit_json(changed=True, rc=0)
	except Exception as e:
		module.fail_json(changed=changed, msg="{0}".format(e))

    module.exit_json(changed=False, settings=cms.get_config('summary'))