コード例 #1
0
 def __init__(self):
     import cm_client as cm
     cm.configuration.username = "******"
     cm.configuration.password = "******"
     self.api_client = cm.ApiClient(self.api_url)
     self.services_api_health = cm.ServicesResourceApi(self.api_client)
     self.services_api_IO = cm.TimeSeriesResourceApi(self.api_client)
コード例 #2
0
def cluster_stats():
	"""Gets the total YARN meory & VCores using the Cloudera anager API"""

	#req = requests.get("http://nightly6x-unsecure-1.nightly6x-unsecure.root.hwx.site:7180/api/v40/tools/echo?message=hello")
	#print req.content

	cm_client.configuration.username = '******'
	cm_client.configuration.password = '******'

	# Create an instance of the API class
	api_host = 'http://nightly6x-unsecure-1.nightly6x-unsecure.root.hwx.site'
	port = '7180'
	api_version = 'v33'
	# Construct base URL for API
	# http://cmhost:7180/api/v30
	api_url = api_host + ':' + port + '/api/' + api_version
	print api_url
	api_client = cm_client.ApiClient(api_url)
	cluster_api_instance = cm_client.ClustersResourceApi(api_client)

	api_response = cluster_api_instance.read_clusters(view='SUMMARY')
	for cluster in api_response.items:
		print cluster.name, "-", cluster.full_version

	services_api_instance = cm_client.ServicesResourceApi(api_client)
	services = services_api_instance.read_services(cluster.name, view='FULL')

	for service in services.items:
		
		if service.type=="YARN":
			yarn = service

	print yarn.name
    

	api_url_v5 = api_host + '/api/' + 'v5'
	api_client_v5 = cm_client.ApiClient(api_url_v5)
	print api_url_v5
	services_api_instance_v5 = cm_client.ServicesResourceApi(api_client_v5)
	#print services_api_instance_v5.get_metrics(cluster.name, hdfs.name)
	metrics = services_api_instance_v5.get_metrics(cluster.name, yarn.name)
	for m in metrics.items:
		print "%s (%s)" % (m.name, m.unit)
コード例 #3
0
ファイル: cm_info.py プロジェクト: xingangzhang/HDFSManage
 def init_conn(self, host, port, version):
     """初始化连接,构建cm的连接实例
     """
     url = host + ':' + port + '/api/' + version
     client = cm_client.ApiClient(url)
     # 生成资源API
     self._cluster_api_instance = cm_client.ClustersResourceApi(client)
     # 为了获取namemode的资源API
     self._services_api_instance = cm_client.ServicesResourceApi(client)
     self._roles_api_instance = cm_client.RolesResourceApi(client)
     self._host_api_instance = cm_client.HostsResourceApi(client)
コード例 #4
0
 def get_service_api(self, cluster_name):
     try:
         services_api_instance = cm_client.ServicesResourceApi(
             self.api_client)
         services_api_response = services_api_instance.read_services(
             cluster_name, view='full')
         return services_api_response
     except ApiException as e:
         self.log.error(
             'ERROR with ServicesResourceApi > read_services at {}'.format(
                 self.url))
         raise e
コード例 #5
0
 def _get_available_metrics(self):
     if self.services:
         new_available_metrics_map = {}
         for service in self.services:
             services_api_instance = cm_client.ServicesResourceApi(
                 self.metrics_api_client)
             metrics_api_response = services_api_instance.get_metrics(
                 self.cluster.name, service.name)
             new_metrics_list = []
             for m in metrics_api_response.items:
                 if m.data:
                     new_metrics_list.append(m.name)
             new_available_metrics_map[service.name] = new_metrics_list
         self.availableMetrics = new_available_metrics_map
コード例 #6
0
    def inspect_roles(self, cluster, service):
        api_instance = cm_client.ServicesResourceApi(self.api_client)
        roles = self.roles_api_instance.read_roles(cluster.display_name,
                                                   service.name)
        for role in roles.items:

            if role.health_summary == 'BAD':
                self.bad_alert_count += 1
                hostname = self.host_details[role.host_ref.host_id]
                msg = "Role name: {}\nState: {}\nHealth: {}\n".format(
                    role.name, role.role_state, role.health_summary, hostname)
                self.msg_body += "Role name: {}</br>State: {}</br>Health: {}</br>".format(
                    role.name, role.role_state, role.health_summary, hostname)
                print(self.msg_body)
コード例 #7
0
ファイル: Bot_Cloudera_v1.py プロジェクト: ozzienich/Telegram
def cm_clusterZ1_services_command(update: Update,
                                  context: CallbackContext) -> None:
    chat_ids = update.message.chat_id
    api_client = cm_client.ApiClient(api_url_drc)
    api_instance = cm_client.ServicesResourceApi(api_client)
    api_response = api_instance.read_services('clusterZ1')
    mo = ('Cluster: <b>clusterZ1</b>:\n')
    for cluster in api_response.items:
        if (cluster.health_summary == 'GOOD'):
            mo = (mo + '\U00002705')

        mo = mo + ('[<b>' + cluster.display_name + '</b>] => ' +
                   cluster.health_summary + '=> ' + cluster.service_state +
                   '\n')

    context.bot.send_message(chat_id=chat_ids,
                             parse_mode=ParseMode.HTML,
                             text=(mo))
コード例 #8
0
 def _get_services(self):
     services_api_instance = cm_client.ServicesResourceApi(self.api_client)
     api_response = services_api_instance.read_services(self.cluster.name,
                                                        view='FULL')
     self.services = api_response.items
     if self.services:
         for service in self.services:
             if service.health_checks:
                 for health_check in service.health_checks:
                     health_check_event = {}
                     health_check_event["serviceName"] = service.name
                     health_check_event["serviceType"] = service.type
                     health_check_event[
                         "explanation"] = health_check.explanation
                     health_check_event["name"] = health_check.name
                     health_check_event["value"] = health_check.summary
                     health_check_event["timestamp"] = time.time()
                     self.health_checks_processor.process(
                         self._append_global_fields(health_check_event))
コード例 #9
0
 def __init__(self, cm_host, env, send_alert):
     cm_client.configuration.username = '******'
     cm_client.configuration.password = '******'
     self.env = env
     api_host = 'http://' + cm_host
     port = '7180'
     api_version = 'v19'
     api_url = api_host + ':' + port + '/api/' + api_version
     self.send_alert_email = send_alert
     print(api_url)
     self.api_client = cm_client.ApiClient(api_url)
     self.cluster_api_instance = cm_client.ClustersResourceApi(
         self.api_client)
     self.services_api_instance = cm_client.ServicesResourceApi(
         self.api_client)
     self.roles_api_instance = cm_client.RolesResourceApi(self.api_client)
     self.host_api_instance = cm_client.HostsResourceApi(self.api_client)
     self.host_details = self.get_hostname_by_id()
     self.bad_alert_count = 0
     self.msg_body = ""
コード例 #10
0
def main():
    cm_client.configuration.username = ADMIN_USER
    cm_client.configuration.password = ADMIN_PASS
    # Create an instance of the API class
    api_host = 'http://'+ CM_HOST
    port = '7180'
    api_version = 'v18'
    api_url = api_host + ':' + port + '/api/' + api_version
    api_client = cm_client.ApiClient(api_url)
    host_api_instance = cm_client.HostsResourceApi(api_client)
    service_api_instance = cm_client.ServicesResourceApi(api_client)
    sentry_host_id=fetch_hostid(HostsApi=host_api_instance, hostname=SENTRY_HA_HOST)
    cluster_name = CLUSTER_NAME
    service_name = "Sentry"
    body = cm_client.ApiEnableSentryHaArgs(new_sentry_host_id=sentry_host_id, new_sentry_role_name= "Sentry-Server-2", zk_service_name="ZOOKEEPER")
    setup_logger.info("Enabling Sentry HA.....")
    time.sleep(60)
    res=service_api_instance.enable_sentry_ha_command(cluster_name, service_name, body=body)
    time.sleep(60)
    setup_logger.info("Sentry HA Enabled...")
コード例 #11
0
def main(passed_username, passed_password, passed_database):

    PEER_NAME = 'PRODUCTION'                        # Previously
    TARGET_CLUSTER_NAME = 'DEV'                     #     defined
    SOURCE_CLUSTER_NAME = 'cluster'                 #       at Experian

    cm_host = 'br1andvhmn11.passporthealth.com'


    cm_client.configuration.username = passed_username

    # Ensure that password is quoted
    cm_client.configuration.password = "******" + passed_password + "'"

    sourceDatabase = passed_database

    # Setup authentication for SSL
    cm_client.configuration.verify_ssl = True
    cm_client.configuration.ssl_ca_cert = '/opt/cloudera/security/pki/x509/truststore.pem'

    # Create an instance of the API class
    api_host = 'https://br1andvhmn11.passporthealth.com'
    port = '7183'
    api_version = 'v30'

    impala_host = 'br1anprhsn02.passporthealth.com'

    # Construct base URL for API
    # http://cmhost:7180/api/v30
    api_url = api_host + ':' + port + '/api/' + api_version
    api_client = cm_client.ApiClient(api_url)
    cluster_api_instance = cm_client.ClustersResourceApi(api_client)

    # Lists all known clusters.
    api_response = cluster_api_instance.read_clusters(view='SUMMARY')
    for cluster in api_response.items:
        print cluster.name, "-", cluster.full_version

        services_api_instance = cm_client.ServicesResourceApi(api_client)
        services = services_api_instance.read_services(cluster.name, view='FULL')
        for service in services.items:
    #        print service.display_name, "-", service.type
            if service.type == 'HIVE':
            targetHive = service
            targetCluster = cluster

            print targetHive.name, targetHive.service_state, targetHive.health_summary

            for health_check in targetHive.health_checks:
                print health_check.name, "---", health_check.summary

    #		print "Source database = " + sourceDatabase

    ###show_statement = "'show tables in " + sourceDatabase +"'"
    ###streamOperand = "impala-shell -i " + impala_host + " -d default -k --ssl --ca_cert=/opt/cloudera/security/pki/x509/truststore.pem -q " + show_statement
    ###stream=os.popen(streamOperand)
    ###
    ###output=stream.readlines()
    ###lineno =0
    ###numtables = 0
    ###tablenames = []
    ###for line in output:
    ###    if lineno <= 2:     # skip heading lines
    ###        pass
    ###    elif line[0:3] == "+--":                    # skip last line
    ###        pass
    ###    else:                                       # strip out tablename
    ###        name = line[2:]
    ###        blank = name.index(' ')
    ###        tablenames.append(name[0:blank])
    ###        numtables +=1
    ###    lineno +=1
    ###print str(numtables) + " tables in database " + sourceDatabase
    ###for table in tablenames:
    ###	print table


    tablenames = []
    tablenames.append("test")
    tablenames.append("test2")


    api_root = ApiResource(cm_host, username=passed_username, password=passed_password,  use_tls=True)

    PEER_NAME = 'PRODUCTION'
    SOURCE_HDFS_NAME = 'hdfs'
    TARGET_HDFS_NAME = 'hdfs'
    SOURCE_HIVE_NAME = 'hive'
    TARGET_HIVE_NAME = 'hive'
    SOURCE_CLUSTER_NAME = 'cluster'
    TARGET_CLUSTER_NAME = 'DEV'
    TARGET_YARN_SERVICE = 'yarn'




    # Setup for Hive replication
    hive =  api_root.get_cluster(TARGET_CLUSTER_NAME).get_service(TARGET_HIVE_NAME)
    hive_args = ApiHiveReplicationArguments(None)
    hdfs_args = ApiHdfsReplicationArguments(None)                   # Needed for replicating table data stored in HDFS
    hive_args.sourceService = ApiServiceRef(None, peerName=PEER_NAME, clusterName=SOURCE_CLUSTER_NAME, serviceName=SOURCE_HIVE_NAME)

    # Define tables to replicate
    table_filters = []
    table = ApiHiveTable(None)

    for tab in tablenames:
        table.database = (passed_database)
        table.tableName = (tab)
        table_filters = []
        table_filters.append(table)
        print "Replicating " + passed_database + "." + tab

        hive_args.tableFilters = table_filters
        hive_args.force = True                                          # Overwrite existing tables
        hive_args.replicateData = True                                  # Replicate table data stored in HDFS
        hdfs_args.skipChecksumChecks = True
        hdfs_args.skipListingChecksumChecks = True
        hdfs_args.preserveBlockSize = True
        hdfs_args.preserveReplicationCount = True
        hdfs_args.preservePermissions = True

        # Define HDFS portion of the Hive replication as needed
        hdfs_args.destinationPath = '/user/bob.marshall/repltest'       # Argument? Path relative to servicename?
        hdfs_args.mapreduceServiceName = TARGET_YARN_SERVICE
        hdfs_args.userName = passed_username
        hdfs_args.sourceUser = passed_username

        hive_args.hdfsArguments = hdfs_args

        start = datetime.datetime.now()
        end = start + datetime.timedelta(days=1)
        interval = "DAY"
        numinterval = 1
        pause = True

        print "Creating Hive Replication Schedule"
        schedule = hive.create_replication_schedule(start, end, interval, numinterval, pause, hive_args)
        print "Starting Hive Replication"
        cmd = hive.trigger_replication_schedule(schedule.id)
        print "Waiting for completion"
        cmd = cmd.wait()
        print "Getting result"
        result = hive.get_replication_schedule(schedule.id).history[0].hiveResult
        print result

        print "Cleanup... Remove Hive replication schedule"
        sch = hive.delete_replication_schedule(schedule.id)
        print sch

    exit(0)

#scheds = hive.get_replication_schedules()
#sch = hive.delete_replication_schedule(162)


# Setup for HDFS replication
hdfs = api_root.get_cluster(TARGET_CLUSTER_NAME).get_service(TARGET_HDFS_NAME)
hdfs_args = ApiHdfsReplicationArguments(None)
hdfs_args.sourceService = ApiServiceRef(None, peerName=PEER_NAME, clusterName=SOURCE_CLUSTER_NAME, serviceName=SOURCE_HDFS_NAME)
hdfs_args.sourcePath = '/user/bob.marshall/repltest'
hdfs_args.destinationPath = '/user/bob.marshall/repltest'
hdfs_args.mapreduceServiceName = TARGET_YARN_SERVICE
hdfs_args.userName = args.username
hdfs_args.sourceUser = args.username
hdfs_args.preserveBlockSize = True
hdfs_args.preserveReplicationCount = True
hdfs_args.preservePermissions = True
hdfs_args.skipChecksumChecks = True
hdfs_args.skipListingChecksumChecks = True
start = datetime.datetime.now()
end = start + datetime.timedelta(days=1)
interval = "DAY"
numinterval = 1
pause = True
#schedule = hdfs.create_replication_schedule(start, end, interval, interval, pause, hdfs_args)
print "Creating HDFS Replication Schedule"
schedule = hdfs.create_replication_schedule(start, end, "DAY", 1, True, hdfs_args)
print "Starting HDFS Replication"
cmd = hdfs.trigger_replication_schedule(schedule.id)
print "Waiting for completion"
cmd = cmd.wait()
print "Getting result"
result = hdfs.get_replication_schedule(schedule.id).history[0].hdfsResult
print result

print "Cleanup... Remove HDFS replication schedule"
sch = hdfs.delete_replication_schedule(schedule.id)
print sch

#scheds = hdfs.get_replication_schedules()
#sch = hdfs.delete_replication_schedule(27)

if __name__ == "__main__":

    parser = argparse.ArgumentParser(description='Perform BDR jobs while getting around BDR limitations.')
    parser.add_argument("username")
    parser.add_argument("password")
    parser.add_argument("database")
    args = parser.parse_args()

    main(args.username, args.password, args.database)
コード例 #12
0
ファイル: prepare_env.py プロジェクト: mwiewior/perf-report
# Create an instance of the API class
api_host = options.url
port = options.port
api_version = 'v19'
api_url = api_host + ':' + port + '/api/' + api_version
print(api_url)
api_client = cm_client.ApiClient(api_url)
cluster_api_instance = cm_client.ClustersResourceApi(api_client)

#

# Lists all known clusters.
api_response = cluster_api_instance.read_clusters(view='SUMMARY')
cluster = api_response.items[0]
services_api_instance = cm_client.ServicesResourceApi(api_client)
services = services_api_instance.read_services(cluster.name, view='FULL')
for service in services.items:
    # print service.display_name, "-", service.type
    if service.type == 'YARN':
        yarn = service
        # roles = services_api_instance.read_roles(cluster.name, hdfs.name)
roles_api_instance = cm_client.RolesResourceApi(api_client)
command_api_instance = cm_client.RoleCommandsResourceApi(api_client)
roles = roles_api_instance.read_roles(cluster.name, yarn.name)
# print (roles)

dn_roles = [
    role.name for role in roles.items
    if role.type == 'NODEMANAGER' and role.host_ref.host_id in host_ids_action
]
コード例 #13
0
ファイル: HiveBDRcurrent.py プロジェクト: rmarshasatx/HiveBDR
def main(passed_username, passed_password, passed_database):

    PEER_NAME = 'PRODUCTION'                        # Previously
    TARGET_CLUSTER_NAME = 'DEV'                     #     defined
    SOURCE_CLUSTER_NAME = 'cluster'                 #       at Experian

    cm_host = 'br1andvhmn11.passporthealth.com'


    cm_client.configuration.username = passed_username

    cm_client.configuration.password = passed_password

    sourceDatabase = passed_database

    # Setup authentication for SSL
    cm_client.configuration.verify_ssl = True
    cm_client.configuration.ssl_ca_cert = '/opt/cloudera/security/pki/x509/truststore.pem'

    # Create an instance of the API class
    api_host = 'https://br1andvhmn11.passporthealth.com'
    port = '7183'
    api_version = 'v30'

#For testing, until firewall opened, pull tablenames from target cluster. We are therefore simulating an overwrite. 
#For real, change impalahost to pull tablenames from source cluster.
#   impala_host = 'br1anprhsn02.passporthealth.com'

    impala_host = 'br1andvhmn02.passporthealth.com'   
 
    # Construct base URL for API
    # http://cmhost:7180/api/v30
    api_url = api_host + ':' + port + '/api/' + api_version
    api_client = cm_client.ApiClient(api_url)
    cluster_api_instance = cm_client.ClustersResourceApi(api_client)

    # Lists all known clusters.
    api_response = cluster_api_instance.read_clusters(view='SUMMARY')
    for cluster in api_response.items:
        print cluster.name, "-", cluster.full_version

        services_api_instance = cm_client.ServicesResourceApi(api_client)
        services = services_api_instance.read_services(cluster.name, view='FULL')
        for service in services.items:
    #        print service.display_name, "-", service.type
            if service.type == 'HIVE':
    	        targetHive = service
                targetCluster = cluster

            	print targetHive.name, targetHive.service_state, targetHive.health_summary

           	for health_check in targetHive.health_checks:
                	print health_check.name, "---", health_check.summary

    #		print "Source database = " + sourceDatabase

    ###show_statement = "'show tables in " + sourceDatabase +"'"
    ###streamOperand = "impala-shell -i " + impala_host + " -d default -k --ssl --ca_cert=/opt/cloudera/security/pki/x509/truststore.pem -q " + show_statement
    ###stream=os.popen(streamOperand)
    ###
    ###output=stream.readlines()
    ###lineno =0
    ###numtables = 0
    ###tablenames = []
    ###for line in output:
    ###    if lineno <= 2:     # skip heading lines
    ###        pass
    ###    elif line[0:3] == "+--":                    # skip last line
    ###        pass
    ###    else:                                       # strip out tablename
    ###        name = line[2:]
    ###        blank = name.index(' ')
    ###        tablenames.append(name[0:blank])
    ###        numtables +=1
    ###    lineno +=1
    ###print str(numtables) + " tables in database " + sourceDatabase
    ###for table in tablenames:
    ###	print table


    tablenames = []
    tablenames.append('b05006')
    tablenames.append('b05007_bgpr_cape_rename')
    tablenames.append('b07001_bgpr_cape')
    tablenames.append('b13008_bgpr_cape')
    tablenames.append('c16001_bgpr_cape')
    tablenames.append('c24070_bgpr_cape')
    tablenames.append('cde2018_bg_a1')
    tablenames.append('cde2018_bg_a2')
    tablenames.append('cde2018_bg_a3')
    tablenames.append('cde2018_bg_a4')
    tablenames.append('cde2018_bg_b')
    tablenames.append('cde2018_bg_c1')
    tablenames.append('cde2018_bg_c2')
    tablenames.append('cde2018_bg_c3')
    tablenames.append('cde2018_bg_d')
    tablenames.append('cons_exp_annual_cye2018_bg')
    tablenames.append('cons_exp_annual_fyp2018_bg')
    tablenames.append('cons_exp_avgannual_cye2018_bg')
    tablenames.append('cons_exp_avgannual_fyp2018_bg')
    tablenames.append('cons_exp_gifts_cye2018_bg')
    tablenames.append('cons_exp_gifts_fyp2018_bg')
    tablenames.append('cye2018_bg_a1')
    tablenames.append('cye2018_bg_a2')
    tablenames.append('cye2018_bg_a3')
    tablenames.append('cye2018_bg_a4')
    tablenames.append('cye2018_bg_b')
    tablenames.append('cye2018_bg_c1')
    tablenames.append('cye2018_bg_c2')
    tablenames.append('cye2018_bg_c3')
    tablenames.append('cye2018_bg_d')
    tablenames.append('fyp2018_bg_a1')
    tablenames.append('fyp2018_bg_a2')
    tablenames.append('fyp2018_bg_a3')
    tablenames.append('fyp2018_bg_a4')
    tablenames.append('fyp2018_bg_b')
    tablenames.append('fyp2018_bg_c1')
    tablenames.append('fyp2018_bg_c2')
    tablenames.append('fyp2018_bg_c3')
    tablenames.append('fyp2018_bg_d')

    api_root = ApiResource(cm_host, username=passed_username, password=passed_password,  use_tls=True)
    
    PEER_NAME = 'PRODUCTION'
    SOURCE_HDFS_NAME = 'hdfs'
    TARGET_HDFS_NAME = 'hdfs'
    SOURCE_HIVE_NAME = 'hive'
    TARGET_HIVE_NAME = 'hive'
    SOURCE_CLUSTER_NAME = 'cluster'
    TARGET_CLUSTER_NAME = 'DEV'
    TARGET_YARN_SERVICE = 'yarn'




    # Setup for Hive replication
    hive =  api_root.get_cluster(TARGET_CLUSTER_NAME).get_service(TARGET_HIVE_NAME)
    hive_args = ApiHiveReplicationArguments(None)
    hdfs_args = ApiHdfsReplicationArguments(None)                   # Needed for replicating table data stored in HDFS
    hive_args.sourceService = ApiServiceRef(None, peerName=PEER_NAME, clusterName=SOURCE_CLUSTER_NAME, serviceName=SOURCE_HIVE_NAME)

    # Define tables to replicate
    table_filters = []
    table = ApiHiveTable(None)

    for tab in tablenames:
#       table.database = (passed_database)
#
#  Hardwire database name for 2020-11-24 replication
#
        table.database = 'lake_consumerview'
        table.tableName = (tab)
        table_filters = []
        table_filters.append(table)
        print "Replicating " + passed_database + "." + tab

        hive_args.tableFilters = table_filters
        hive_args.force = True                                          # Overwrite existing tables
        hive_args.replicateData = True                                  # Replicate table data stored in HDFS
        hdfs_args.skipChecksumChecks = True
        hdfs_args.skipListingChecksumChecks = True
        hdfs_args.preserveBlockSize = True
        hdfs_args.preserveReplicationCount = True
        hdfs_args.preservePermissions = True

        # Define HDFS portion of the Hive replication as needed
#        hdfs_args.destinationPath = '/'
        hdfs_args.mapreduceServiceName = TARGET_YARN_SERVICE
        hdfs_args.userName = passed_username
        hdfs_args.sourceUser = passed_username

        hive_args.hdfsArguments = hdfs_args

        start = datetime.datetime.now()
        end = start + datetime.timedelta(days=1)
        interval = "DAY"
        numinterval = 1
        pause = True

        print "Creating Hive Replication Schedule"
        schedule = hive.create_replication_schedule(start, end, interval, numinterval, pause, hive_args)
        print "Starting Hive Replication"
        cmd = hive.trigger_replication_schedule(schedule.id)
        print "Waiting for completion"
        cmd = cmd.wait()
        print "Getting result"
        result = hive.get_replication_schedule(schedule.id).history[0].hiveResult
        print result

        print "Cleanup... Remove Hive replication schedule"
        sch = hive.delete_replication_schedule(schedule.id)
        print sch

    exit(0)

    #scheds = hive.get_replication_schedules()
    #sch = hive.delete_replication_schedule(162)
    
    
    # Setup for HDFS replication
    hdfs = api_root.get_cluster(TARGET_CLUSTER_NAME).get_service(TARGET_HDFS_NAME)
    hdfs_args = ApiHdfsReplicationArguments(None)
    hdfs_args.sourceService = ApiServiceRef(None, peerName=PEER_NAME, clusterName=SOURCE_CLUSTER_NAME, serviceName=SOURCE_HDFS_NAME)
    hdfs_args.sourcePath = '/user/bob.marshall/repltest'
    hdfs_args.destinationPath = '/user/bob.marshall/repltest'
    hdfs_args.mapreduceServiceName = TARGET_YARN_SERVICE
    hdfs_args.userName = args.username
    hdfs_args.sourceUser = args.username
    hdfs_args.preserveBlockSize = True
    hdfs_args.preserveReplicationCount = True
    hdfs_args.preservePermissions = True
    hdfs_args.skipChecksumChecks = True
    hdfs_args.skipListingChecksumChecks = True
    start = datetime.datetime.now()
    end = start + datetime.timedelta(days=1)
    interval = "DAY"
    numinterval = 1
    pause = True
    #schedule = hdfs.create_replication_schedule(start, end, interval, interval, pause, hdfs_args)
    print "Creating HDFS Replication Schedule"
    schedule = hdfs.create_replication_schedule(start, end, "DAY", 1, True, hdfs_args)
    print "Starting HDFS Replication"
    cmd = hdfs.trigger_replication_schedule(schedule.id)
    print "Waiting for completion"
    cmd = cmd.wait()
    print "Getting result"
    result = hdfs.get_replication_schedule(schedule.id).history[0].hdfsResult
    print result
    
    print "Cleanup... Remove HDFS replication schedule"
    sch = hdfs.delete_replication_schedule(schedule.id)
    print sch
コード例 #14
0
def get_cluster_info():
    api_response = cluster_api_instance.read_clusters(view='SUMMARY')
    global cluster
    for cluster in api_response.items:
        logger.info('Cluster name: ' +  cluster.name)
        logger.info('Cluster version: ' + cluster.full_version)

    if cluster.name == '':
        logger.info('No cluster found, exiting')
        sys.exit()

    if cluster.full_version.startswith("6."):
        global services_api_instance
        services_api_instance = cm_client.ServicesResourceApi(api_client)

        #get a list of the deployed services)
        services = services_api_instance.read_services(cluster.name, view='FULL')
        global service
        for service in services.items:
            #print service.display_name, "-", service.type
            logger.info('service display name: ' + service.display_name +  ", Service type: " + service.type)

            #uncomment these lines to see role info
            #roles = services_api_instance.read_roles(cluster.name, service.name)
            #for role in roles.items:
            #   print "Role name: %s\nState: %s\nHealth: %s\nHost: %s" % (role.name, role.role_state, role.health_summary, role.host_ref.host_id)

            #create handles for services
            if service.type == 'HDFS':
                global hdfs
                global hdfs_service_state
                hdfs = service.name
                hdfs_service_state = service.service_state
            elif service.type == 'HIVE':
                global hive
                global hive_service_state
                hive = service.name
                hive_service_state = service.service_state
            elif service.type == 'SPARK_ON_YARN':
                global sparkOnYarn
                global sparkOnYarn_service_state
                sparkOnYarn = service.name
                sparkOnYarn_service_state = service.service_state
            elif service.type == 'ZOOKEEPER':
                global zookeeper
                global zookeeper_service_state
                zookeeper = service.name
                zookeeper_service_state = service.service_state
            elif service.type == 'OOZIE':
                global oozie
                global oozie_service_state
                oozie = service.name
                oozie_service_state = service.service_state
            elif service.type == 'KAFKA':
                global kafka
                global kafka_service_state
                kafka = service.name
                kafka_service_state = service.service_state
            elif service.type == 'KUDU':
                global kudu
                global kudu_service_state
                kudu = service.name
                kudu_service_state = service.service_state
            elif service.type == 'HUE':
                global hue
                global hue_service_state
                hue = service.name
                hue_service_state = service.service_state
            elif service.type == 'IMPALA':
                global impala
                global impala_service_state
                impala = service.name
                impala_service_state = service.service_state
            elif service.type == 'YARN':
                global yarn
                global yarn_service_state
                yarn = service.name
                yarn_service_state = service.service_state
            elif service.type == 'HBASE':
                global hbase
                global hbase_service_state
                hbase = service.name
                hbase_service_state = service.service_state
            else:
                logger.info('No services found.')