Esempio n. 1
0
 def init_conn(self, host, port, version):
     """初始化连接,构建cm的连接实例
     """
     url = host + ':' + port + '/api/' + version
     client = cm_client.ApiClient(url)
     # 生成资源API
     self._cluster_api_instance = cm_client.ClustersResourceApi(client)
     # 为了获取namemode的资源API
     self._services_api_instance = cm_client.ServicesResourceApi(client)
     self._roles_api_instance = cm_client.RolesResourceApi(client)
     self._host_api_instance = cm_client.HostsResourceApi(client)
 def _get_cluster(self):
     cluster_api_instance = cm_client.ClustersResourceApi(self.api_client)
     api_response = cluster_api_instance.read_clusters(
         view='SUMMARY', _request_timeout=20.0)
     clusterResponse = None
     for cluster in api_response.items:
         if cluster.cluster_type == self.cluster_type:
             clusterResponse = cluster
     if clusterResponse:
         self.cluster = clusterResponse
     else:
         raise Exception("Not found any clusters yet.")
Esempio n. 3
0
 def get_cluster_api(self):
     try:
         cluster_api_instance = cm_client.ClustersResourceApi(
             self.api_client)
         cluster_api_response = cluster_api_instance.read_clusters(
             view='full')
         return cluster_api_response
     except ApiException as e:
         self.log.error(
             'ERROR with ClustersResourceApi > read_clusters at {}'.format(
                 self.url))
         raise e
Esempio n. 4
0
def cm_kafkaXX1_host_command(update: Update, context: CallbackContext) -> None:
    chat_ids = update.message.chat_id
    api_client = cm_client.ApiClient(api_url_dc)
    api_instance = cm_client.ClustersResourceApi(api_client)
    api_response = api_instance.list_hosts('kafkaXX1')
    print(api_response)
    mo = ('Cluster: <b>kafkaXX1</b>:\n')
    for cluster in api_response.items:
        mo = mo + ('Host: ' + cluster.hostname + '\n')

    context.bot.send_message(chat_id=chat_ids,
                             parse_mode=ParseMode.HTML,
                             text=(mo))
Esempio n. 5
0
def saveClusterTemplate():
    cluster_api_instance = cm_client.ClustersResourceApi(api_client)

    # Lists all known clusters.
    api_response = cluster_api_instance.read_clusters(view='SUMMARY')
    cluster_name = ''
    for cluster in api_response.items:
        cluster_name = cluster.name
        template = cluster_api_instance.export(cluster_name)

        json_dict = api_client.sanitize_for_serialization(template)
        with open('/tmp/cluster_template.json', 'w') as out_file:
            json.dump(json_dict, out_file, indent=4, sort_keys=True)

        print(cluster.name, "-", cluster.full_version)
Esempio n. 6
0
def cm_drc_cluster_command(update: Update, context: CallbackContext) -> None:
    chat_ids = update.message.chat_id
    api_client = cm_client.ApiClient(api_url_drc)
    cluster_api_instance = cm_client.ClustersResourceApi(api_client)
    api_response = cluster_api_instance.read_clusters(view='full')
    for cluster in api_response.items:
        mo = ('Site: <b>DRC</b>:\n')
        mo = mo + ('Name: <b>' + cluster.name + '</b>\n')
        mo = mo + ('Version: ' + cluster.full_version + '\n')
        mo = mo + ('Status: ' + cluster.entity_status + '\n')
        mo = mo + ('Maintenance: ' + str(cluster.maintenance_mode) + '\n')
        mo = mo + ('URL: ' + cluster.cluster_url + '\n')
        context.bot.send_message(chat_id=chat_ids,
                                 parse_mode=ParseMode.HTML,
                                 text=(mo))
Esempio n. 7
0
def refresh():
    cluster_api_instance = cm_client.ClustersResourceApi(api_client)
    restart_command = cluster_api_instance.deploy_client_config(cluster_name)
    print("Deploy client config ...")
    wait(restart_command)
    print("Active: %s. Success: %s" % (restart_command.active, restart_command.success))
    restart_command = cluster_api_instance.refresh(cluster_name)
    print("Refresh ...")
    wait(restart_command)
    print("Active: %s. Success: %s" % (restart_command.active, restart_command.success))
    restart_args = cm_client.ApiRestartClusterArgs()
    print("Restarting the cluster...")
    restart_command = cluster_api_instance.restart_command(cluster_name, body=restart_args)
    wait(restart_command)
    print("Active: %s. Success: %s" % (restart_command.active, restart_command.success))
Esempio n. 8
0
def stop_all_clusters(api_client, api_host):
    api_instance = cm_client.ClustersResourceApi(api_client)
    try:
        # Stop the Clusters managed by this Cloudera Manager.
        api_response = api_instance.read_clusters(view='summary')
        print(f'Stopping Cluster(s) managed by {api_host}:')
        for cluster in api_response.items:
            print(f'Stopping {cluster.display_name}', end='', flush=True)
            api_instance.stop_command(cluster.name)
            while True:
                print('.', end='', flush=True)
                time.sleep(1)
                api_response = api_instance.read_cluster(cluster.name)
                if api_response.entity_status == 'STOPPED':
                    print('\nStopped.', flush=True)
                    break
    except ApiException as e:
        print(f'Exception while calling ClusterResourceApi->read_clusters/stop_command: {e}')
Esempio n. 9
0
def cluster_stats():
	"""Gets the total YARN meory & VCores using the Cloudera anager API"""

	#req = requests.get("http://nightly6x-unsecure-1.nightly6x-unsecure.root.hwx.site:7180/api/v40/tools/echo?message=hello")
	#print req.content

	cm_client.configuration.username = '******'
	cm_client.configuration.password = '******'

	# Create an instance of the API class
	api_host = 'http://nightly6x-unsecure-1.nightly6x-unsecure.root.hwx.site'
	port = '7180'
	api_version = 'v33'
	# Construct base URL for API
	# http://cmhost:7180/api/v30
	api_url = api_host + ':' + port + '/api/' + api_version
	print api_url
	api_client = cm_client.ApiClient(api_url)
	cluster_api_instance = cm_client.ClustersResourceApi(api_client)

	api_response = cluster_api_instance.read_clusters(view='SUMMARY')
	for cluster in api_response.items:
		print cluster.name, "-", cluster.full_version

	services_api_instance = cm_client.ServicesResourceApi(api_client)
	services = services_api_instance.read_services(cluster.name, view='FULL')

	for service in services.items:
		
		if service.type=="YARN":
			yarn = service

	print yarn.name
    

	api_url_v5 = api_host + '/api/' + 'v5'
	api_client_v5 = cm_client.ApiClient(api_url_v5)
	print api_url_v5
	services_api_instance_v5 = cm_client.ServicesResourceApi(api_client_v5)
	#print services_api_instance_v5.get_metrics(cluster.name, hdfs.name)
	metrics = services_api_instance_v5.get_metrics(cluster.name, yarn.name)
	for m in metrics.items:
		print "%s (%s)" % (m.name, m.unit)
Esempio n. 10
0
 def __init__(self, cm_host, env, send_alert):
     cm_client.configuration.username = '******'
     cm_client.configuration.password = '******'
     self.env = env
     api_host = 'http://' + cm_host
     port = '7180'
     api_version = 'v19'
     api_url = api_host + ':' + port + '/api/' + api_version
     self.send_alert_email = send_alert
     print(api_url)
     self.api_client = cm_client.ApiClient(api_url)
     self.cluster_api_instance = cm_client.ClustersResourceApi(
         self.api_client)
     self.services_api_instance = cm_client.ServicesResourceApi(
         self.api_client)
     self.roles_api_instance = cm_client.RolesResourceApi(self.api_client)
     self.host_api_instance = cm_client.HostsResourceApi(self.api_client)
     self.host_details = self.get_hostname_by_id()
     self.bad_alert_count = 0
     self.msg_body = ""
Esempio n. 11
0
def restart_cluster():
    def wait(cmd, timeout=None):
        SYNCHRONOUS_COMMAND_ID = -1
        if cmd.id == SYNCHRONOUS_COMMAND_ID:
            return cmd

        SLEEP_SECS = 5
        if timeout is None:
            deadline = None
        else:
            deadline = time.time() + timeout

        try:
            cmd_api_instance = cm_client.CommandsResourceApi(api_client)
            while True:
                cmd = cmd_api_instance.read_command(long(cmd.id))
                pprint(cmd)
                if not cmd.active:
                    return cmd

                if deadline is not None:
                    now = time.time()
                    if deadline < now:
                        return cmd
                    else:
                        time.sleep(min(SLEEP_SECS, deadline - now))
                else:
                    time.sleep(SLEEP_SECS)
        except Exception as e:
            print
            "Exception reading and waiting for command %s\n" % e

    # Restart cluster

    clusters_api_instance = cm_client.ClustersResourceApi(api_client)
    restart_args = cm_client.ApiRestartClusterArgs("true")
    restart_command = clusters_api_instance.restart_command(cluster,
                                                            body=restart_args)
    wait(restart_command)
Esempio n. 12
0
import cm_client
from cm_client.rest import ApiException
from collections import namedtuple
from pprint import pprint
import json

# Configure HTTP basic authorization: basic
cm_client.configuration.username = '******'
cm_client.configuration.password = '******'

api_url = "http://10.209.239.13:7180/api/v17"
api_client = cm_client.ApiClient(api_url)

# create an instance of the API class
cluster_name = 'Cluster 1' # str |
clusters_api_instance = cm_client.ClustersResourceApi(api_client)
template = clusters_api_instance.export(cluster_name)

# Following step allows python fields with under_score to map
# to matching camelCase name in the API model before writing to json file.
json_dict = api_client.sanitize_for_serialization(template)
with open('/home/tpc/hs/tpcxBB-pipeline/13sep-2019-cluster_template.json', 'w') as out_file:
    json.dump(json_dict, out_file, indent=4, sort_keys=True)
Esempio n. 13
0
def main(passed_username, passed_password, passed_database):

    PEER_NAME = 'PRODUCTION'                        # Previously
    TARGET_CLUSTER_NAME = 'DEV'                     #     defined
    SOURCE_CLUSTER_NAME = 'cluster'                 #       at Experian

    cm_host = 'br1andvhmn11.passporthealth.com'


    cm_client.configuration.username = passed_username

    # Ensure that password is quoted
    cm_client.configuration.password = "******" + passed_password + "'"

    sourceDatabase = passed_database

    # Setup authentication for SSL
    cm_client.configuration.verify_ssl = True
    cm_client.configuration.ssl_ca_cert = '/opt/cloudera/security/pki/x509/truststore.pem'

    # Create an instance of the API class
    api_host = 'https://br1andvhmn11.passporthealth.com'
    port = '7183'
    api_version = 'v30'

    impala_host = 'br1anprhsn02.passporthealth.com'

    # Construct base URL for API
    # http://cmhost:7180/api/v30
    api_url = api_host + ':' + port + '/api/' + api_version
    api_client = cm_client.ApiClient(api_url)
    cluster_api_instance = cm_client.ClustersResourceApi(api_client)

    # Lists all known clusters.
    api_response = cluster_api_instance.read_clusters(view='SUMMARY')
    for cluster in api_response.items:
        print cluster.name, "-", cluster.full_version

        services_api_instance = cm_client.ServicesResourceApi(api_client)
        services = services_api_instance.read_services(cluster.name, view='FULL')
        for service in services.items:
    #        print service.display_name, "-", service.type
            if service.type == 'HIVE':
            targetHive = service
            targetCluster = cluster

            print targetHive.name, targetHive.service_state, targetHive.health_summary

            for health_check in targetHive.health_checks:
                print health_check.name, "---", health_check.summary

    #		print "Source database = " + sourceDatabase

    ###show_statement = "'show tables in " + sourceDatabase +"'"
    ###streamOperand = "impala-shell -i " + impala_host + " -d default -k --ssl --ca_cert=/opt/cloudera/security/pki/x509/truststore.pem -q " + show_statement
    ###stream=os.popen(streamOperand)
    ###
    ###output=stream.readlines()
    ###lineno =0
    ###numtables = 0
    ###tablenames = []
    ###for line in output:
    ###    if lineno <= 2:     # skip heading lines
    ###        pass
    ###    elif line[0:3] == "+--":                    # skip last line
    ###        pass
    ###    else:                                       # strip out tablename
    ###        name = line[2:]
    ###        blank = name.index(' ')
    ###        tablenames.append(name[0:blank])
    ###        numtables +=1
    ###    lineno +=1
    ###print str(numtables) + " tables in database " + sourceDatabase
    ###for table in tablenames:
    ###	print table


    tablenames = []
    tablenames.append("test")
    tablenames.append("test2")


    api_root = ApiResource(cm_host, username=passed_username, password=passed_password,  use_tls=True)

    PEER_NAME = 'PRODUCTION'
    SOURCE_HDFS_NAME = 'hdfs'
    TARGET_HDFS_NAME = 'hdfs'
    SOURCE_HIVE_NAME = 'hive'
    TARGET_HIVE_NAME = 'hive'
    SOURCE_CLUSTER_NAME = 'cluster'
    TARGET_CLUSTER_NAME = 'DEV'
    TARGET_YARN_SERVICE = 'yarn'




    # Setup for Hive replication
    hive =  api_root.get_cluster(TARGET_CLUSTER_NAME).get_service(TARGET_HIVE_NAME)
    hive_args = ApiHiveReplicationArguments(None)
    hdfs_args = ApiHdfsReplicationArguments(None)                   # Needed for replicating table data stored in HDFS
    hive_args.sourceService = ApiServiceRef(None, peerName=PEER_NAME, clusterName=SOURCE_CLUSTER_NAME, serviceName=SOURCE_HIVE_NAME)

    # Define tables to replicate
    table_filters = []
    table = ApiHiveTable(None)

    for tab in tablenames:
        table.database = (passed_database)
        table.tableName = (tab)
        table_filters = []
        table_filters.append(table)
        print "Replicating " + passed_database + "." + tab

        hive_args.tableFilters = table_filters
        hive_args.force = True                                          # Overwrite existing tables
        hive_args.replicateData = True                                  # Replicate table data stored in HDFS
        hdfs_args.skipChecksumChecks = True
        hdfs_args.skipListingChecksumChecks = True
        hdfs_args.preserveBlockSize = True
        hdfs_args.preserveReplicationCount = True
        hdfs_args.preservePermissions = True

        # Define HDFS portion of the Hive replication as needed
        hdfs_args.destinationPath = '/user/bob.marshall/repltest'       # Argument? Path relative to servicename?
        hdfs_args.mapreduceServiceName = TARGET_YARN_SERVICE
        hdfs_args.userName = passed_username
        hdfs_args.sourceUser = passed_username

        hive_args.hdfsArguments = hdfs_args

        start = datetime.datetime.now()
        end = start + datetime.timedelta(days=1)
        interval = "DAY"
        numinterval = 1
        pause = True

        print "Creating Hive Replication Schedule"
        schedule = hive.create_replication_schedule(start, end, interval, numinterval, pause, hive_args)
        print "Starting Hive Replication"
        cmd = hive.trigger_replication_schedule(schedule.id)
        print "Waiting for completion"
        cmd = cmd.wait()
        print "Getting result"
        result = hive.get_replication_schedule(schedule.id).history[0].hiveResult
        print result

        print "Cleanup... Remove Hive replication schedule"
        sch = hive.delete_replication_schedule(schedule.id)
        print sch

    exit(0)

#scheds = hive.get_replication_schedules()
#sch = hive.delete_replication_schedule(162)


# Setup for HDFS replication
hdfs = api_root.get_cluster(TARGET_CLUSTER_NAME).get_service(TARGET_HDFS_NAME)
hdfs_args = ApiHdfsReplicationArguments(None)
hdfs_args.sourceService = ApiServiceRef(None, peerName=PEER_NAME, clusterName=SOURCE_CLUSTER_NAME, serviceName=SOURCE_HDFS_NAME)
hdfs_args.sourcePath = '/user/bob.marshall/repltest'
hdfs_args.destinationPath = '/user/bob.marshall/repltest'
hdfs_args.mapreduceServiceName = TARGET_YARN_SERVICE
hdfs_args.userName = args.username
hdfs_args.sourceUser = args.username
hdfs_args.preserveBlockSize = True
hdfs_args.preserveReplicationCount = True
hdfs_args.preservePermissions = True
hdfs_args.skipChecksumChecks = True
hdfs_args.skipListingChecksumChecks = True
start = datetime.datetime.now()
end = start + datetime.timedelta(days=1)
interval = "DAY"
numinterval = 1
pause = True
#schedule = hdfs.create_replication_schedule(start, end, interval, interval, pause, hdfs_args)
print "Creating HDFS Replication Schedule"
schedule = hdfs.create_replication_schedule(start, end, "DAY", 1, True, hdfs_args)
print "Starting HDFS Replication"
cmd = hdfs.trigger_replication_schedule(schedule.id)
print "Waiting for completion"
cmd = cmd.wait()
print "Getting result"
result = hdfs.get_replication_schedule(schedule.id).history[0].hdfsResult
print result

print "Cleanup... Remove HDFS replication schedule"
sch = hdfs.delete_replication_schedule(schedule.id)
print sch

#scheds = hdfs.get_replication_schedules()
#sch = hdfs.delete_replication_schedule(27)

if __name__ == "__main__":

    parser = argparse.ArgumentParser(description='Perform BDR jobs while getting around BDR limitations.')
    parser.add_argument("username")
    parser.add_argument("password")
    parser.add_argument("database")
    args = parser.parse_args()

    main(args.username, args.password, args.database)
Esempio n. 14
0
 def cluster_api(self):
     if self._cluster_api is None:
         self._cluster_api = cm_client.ClustersResourceApi(self.api_client)
     return self._cluster_api
Esempio n. 15
0
def main(passed_username, passed_password, passed_database):

    PEER_NAME = 'PRODUCTION'                        # Previously
    TARGET_CLUSTER_NAME = 'DEV'                     #     defined
    SOURCE_CLUSTER_NAME = 'cluster'                 #       at Experian

    cm_host = 'br1andvhmn11.passporthealth.com'


    cm_client.configuration.username = passed_username

    cm_client.configuration.password = passed_password

    sourceDatabase = passed_database

    # Setup authentication for SSL
    cm_client.configuration.verify_ssl = True
    cm_client.configuration.ssl_ca_cert = '/opt/cloudera/security/pki/x509/truststore.pem'

    # Create an instance of the API class
    api_host = 'https://br1andvhmn11.passporthealth.com'
    port = '7183'
    api_version = 'v30'

#For testing, until firewall opened, pull tablenames from target cluster. We are therefore simulating an overwrite. 
#For real, change impalahost to pull tablenames from source cluster.
#   impala_host = 'br1anprhsn02.passporthealth.com'

    impala_host = 'br1andvhmn02.passporthealth.com'   
 
    # Construct base URL for API
    # http://cmhost:7180/api/v30
    api_url = api_host + ':' + port + '/api/' + api_version
    api_client = cm_client.ApiClient(api_url)
    cluster_api_instance = cm_client.ClustersResourceApi(api_client)

    # Lists all known clusters.
    api_response = cluster_api_instance.read_clusters(view='SUMMARY')
    for cluster in api_response.items:
        print cluster.name, "-", cluster.full_version

        services_api_instance = cm_client.ServicesResourceApi(api_client)
        services = services_api_instance.read_services(cluster.name, view='FULL')
        for service in services.items:
    #        print service.display_name, "-", service.type
            if service.type == 'HIVE':
    	        targetHive = service
                targetCluster = cluster

            	print targetHive.name, targetHive.service_state, targetHive.health_summary

           	for health_check in targetHive.health_checks:
                	print health_check.name, "---", health_check.summary

    #		print "Source database = " + sourceDatabase

    ###show_statement = "'show tables in " + sourceDatabase +"'"
    ###streamOperand = "impala-shell -i " + impala_host + " -d default -k --ssl --ca_cert=/opt/cloudera/security/pki/x509/truststore.pem -q " + show_statement
    ###stream=os.popen(streamOperand)
    ###
    ###output=stream.readlines()
    ###lineno =0
    ###numtables = 0
    ###tablenames = []
    ###for line in output:
    ###    if lineno <= 2:     # skip heading lines
    ###        pass
    ###    elif line[0:3] == "+--":                    # skip last line
    ###        pass
    ###    else:                                       # strip out tablename
    ###        name = line[2:]
    ###        blank = name.index(' ')
    ###        tablenames.append(name[0:blank])
    ###        numtables +=1
    ###    lineno +=1
    ###print str(numtables) + " tables in database " + sourceDatabase
    ###for table in tablenames:
    ###	print table


    tablenames = []
    tablenames.append('b05006')
    tablenames.append('b05007_bgpr_cape_rename')
    tablenames.append('b07001_bgpr_cape')
    tablenames.append('b13008_bgpr_cape')
    tablenames.append('c16001_bgpr_cape')
    tablenames.append('c24070_bgpr_cape')
    tablenames.append('cde2018_bg_a1')
    tablenames.append('cde2018_bg_a2')
    tablenames.append('cde2018_bg_a3')
    tablenames.append('cde2018_bg_a4')
    tablenames.append('cde2018_bg_b')
    tablenames.append('cde2018_bg_c1')
    tablenames.append('cde2018_bg_c2')
    tablenames.append('cde2018_bg_c3')
    tablenames.append('cde2018_bg_d')
    tablenames.append('cons_exp_annual_cye2018_bg')
    tablenames.append('cons_exp_annual_fyp2018_bg')
    tablenames.append('cons_exp_avgannual_cye2018_bg')
    tablenames.append('cons_exp_avgannual_fyp2018_bg')
    tablenames.append('cons_exp_gifts_cye2018_bg')
    tablenames.append('cons_exp_gifts_fyp2018_bg')
    tablenames.append('cye2018_bg_a1')
    tablenames.append('cye2018_bg_a2')
    tablenames.append('cye2018_bg_a3')
    tablenames.append('cye2018_bg_a4')
    tablenames.append('cye2018_bg_b')
    tablenames.append('cye2018_bg_c1')
    tablenames.append('cye2018_bg_c2')
    tablenames.append('cye2018_bg_c3')
    tablenames.append('cye2018_bg_d')
    tablenames.append('fyp2018_bg_a1')
    tablenames.append('fyp2018_bg_a2')
    tablenames.append('fyp2018_bg_a3')
    tablenames.append('fyp2018_bg_a4')
    tablenames.append('fyp2018_bg_b')
    tablenames.append('fyp2018_bg_c1')
    tablenames.append('fyp2018_bg_c2')
    tablenames.append('fyp2018_bg_c3')
    tablenames.append('fyp2018_bg_d')

    api_root = ApiResource(cm_host, username=passed_username, password=passed_password,  use_tls=True)
    
    PEER_NAME = 'PRODUCTION'
    SOURCE_HDFS_NAME = 'hdfs'
    TARGET_HDFS_NAME = 'hdfs'
    SOURCE_HIVE_NAME = 'hive'
    TARGET_HIVE_NAME = 'hive'
    SOURCE_CLUSTER_NAME = 'cluster'
    TARGET_CLUSTER_NAME = 'DEV'
    TARGET_YARN_SERVICE = 'yarn'




    # Setup for Hive replication
    hive =  api_root.get_cluster(TARGET_CLUSTER_NAME).get_service(TARGET_HIVE_NAME)
    hive_args = ApiHiveReplicationArguments(None)
    hdfs_args = ApiHdfsReplicationArguments(None)                   # Needed for replicating table data stored in HDFS
    hive_args.sourceService = ApiServiceRef(None, peerName=PEER_NAME, clusterName=SOURCE_CLUSTER_NAME, serviceName=SOURCE_HIVE_NAME)

    # Define tables to replicate
    table_filters = []
    table = ApiHiveTable(None)

    for tab in tablenames:
#       table.database = (passed_database)
#
#  Hardwire database name for 2020-11-24 replication
#
        table.database = 'lake_consumerview'
        table.tableName = (tab)
        table_filters = []
        table_filters.append(table)
        print "Replicating " + passed_database + "." + tab

        hive_args.tableFilters = table_filters
        hive_args.force = True                                          # Overwrite existing tables
        hive_args.replicateData = True                                  # Replicate table data stored in HDFS
        hdfs_args.skipChecksumChecks = True
        hdfs_args.skipListingChecksumChecks = True
        hdfs_args.preserveBlockSize = True
        hdfs_args.preserveReplicationCount = True
        hdfs_args.preservePermissions = True

        # Define HDFS portion of the Hive replication as needed
#        hdfs_args.destinationPath = '/'
        hdfs_args.mapreduceServiceName = TARGET_YARN_SERVICE
        hdfs_args.userName = passed_username
        hdfs_args.sourceUser = passed_username

        hive_args.hdfsArguments = hdfs_args

        start = datetime.datetime.now()
        end = start + datetime.timedelta(days=1)
        interval = "DAY"
        numinterval = 1
        pause = True

        print "Creating Hive Replication Schedule"
        schedule = hive.create_replication_schedule(start, end, interval, numinterval, pause, hive_args)
        print "Starting Hive Replication"
        cmd = hive.trigger_replication_schedule(schedule.id)
        print "Waiting for completion"
        cmd = cmd.wait()
        print "Getting result"
        result = hive.get_replication_schedule(schedule.id).history[0].hiveResult
        print result

        print "Cleanup... Remove Hive replication schedule"
        sch = hive.delete_replication_schedule(schedule.id)
        print sch

    exit(0)

    #scheds = hive.get_replication_schedules()
    #sch = hive.delete_replication_schedule(162)
    
    
    # Setup for HDFS replication
    hdfs = api_root.get_cluster(TARGET_CLUSTER_NAME).get_service(TARGET_HDFS_NAME)
    hdfs_args = ApiHdfsReplicationArguments(None)
    hdfs_args.sourceService = ApiServiceRef(None, peerName=PEER_NAME, clusterName=SOURCE_CLUSTER_NAME, serviceName=SOURCE_HDFS_NAME)
    hdfs_args.sourcePath = '/user/bob.marshall/repltest'
    hdfs_args.destinationPath = '/user/bob.marshall/repltest'
    hdfs_args.mapreduceServiceName = TARGET_YARN_SERVICE
    hdfs_args.userName = args.username
    hdfs_args.sourceUser = args.username
    hdfs_args.preserveBlockSize = True
    hdfs_args.preserveReplicationCount = True
    hdfs_args.preservePermissions = True
    hdfs_args.skipChecksumChecks = True
    hdfs_args.skipListingChecksumChecks = True
    start = datetime.datetime.now()
    end = start + datetime.timedelta(days=1)
    interval = "DAY"
    numinterval = 1
    pause = True
    #schedule = hdfs.create_replication_schedule(start, end, interval, interval, pause, hdfs_args)
    print "Creating HDFS Replication Schedule"
    schedule = hdfs.create_replication_schedule(start, end, "DAY", 1, True, hdfs_args)
    print "Starting HDFS Replication"
    cmd = hdfs.trigger_replication_schedule(schedule.id)
    print "Waiting for completion"
    cmd = cmd.wait()
    print "Getting result"
    result = hdfs.get_replication_schedule(schedule.id).history[0].hdfsResult
    print result
    
    print "Cleanup... Remove HDFS replication schedule"
    sch = hdfs.delete_replication_schedule(schedule.id)
    print sch