def download_dist_activate_parcel(cluster, api, parcel): """ input: parcel object, output: should see parcel activated under Cloudera Manager UI """ print "Downloading %s", parcel.product + parcel.version cmd = parcel.start_download() while parcel.stage != "DOWNLOADED": sleep(5) print parcel.stage print("."), parcel = get_parcel(api, parcel.product, parcel.version, cluster.name) print "parcel downloaded", parcel.product + " ,version:" + parcel.version + "DOWNLOADED" cmd = parcel.start_distribution() print "Distributing" while parcel.stage != "DISTRIBUTED": sleep(5) print("."), sys.stdout.flush() parcel = get_parcel(api, parcel.product, parcel.version, cluster.name) print "parcel distributed", parcel.product + ", version:", parcel.version + "DISTRIBUTED" cmd = parcel.activate() if cmd.success != True: print "activation failed!!!" exit(0) print "Activating" while parcel.stage != "ACTIVATED": sleep(5) print ".", parcel = get_parcel(api, parcel.product, parcel.version, cluster.name) print "parcel activated", parcel.product + ", version:", parcel.version + "ACTIVATED"
def download_dist_activate_parcel(cluster,api,parcel): """ input: parcel object, output: should see parcel activated under Cloudera Manager UI """ print "Downloading %s", parcel.product+parcel.version cmd = parcel.start_download() while parcel.stage != "DOWNLOADED": sleep(5) print parcel.stage print("."), parcel=get_parcel(api, parcel.product, parcel.version, cluster.name) print "parcel downloaded", parcel.product+" ,version:" + parcel.version + "DOWNLOADED" cmd = parcel.start_distribution() print "Distributing" while parcel.stage != "DISTRIBUTED": sleep(5) print("."), sys.stdout.flush() parcel= get_parcel(api,parcel.product, parcel.version, cluster.name) print "parcel distributed" , parcel.product + ", version:", parcel.version + "DISTRIBUTED" cmd = parcel.activate() if cmd.success != True: print "activation failed!!!" exit(0) print "Activating" while parcel.stage != "ACTIVATED": sleep(5) print".", parcel = get_parcel(api, parcel.product, parcel.version, cluster.name) print "parcel activated" , parcel.product + ", version:", parcel.version + "ACTIVATED"
def distribution_parcels(api_resource, cluster): # get and list all available parcels print "Available parcels:" for cdh_parcel in cluster.get_all_parcels(): print '\t' + cdh_parcel.product + ' ' + cdh_parcel.version for cdh_parcel in cluster.get_all_parcels(): parcel_full_name = cdh_parcel.product + ' ' + cdh_parcel.version # download the parcel print "Downing parcel " + parcel_full_name + ". This might take a while." cmd = cdh_parcel.start_download() if cmd.success != True: print "Parcel " + parcel_full_name + " download failed!" exit(0) # make sure the download finishes while cdh_parcel.stage != 'DOWNLOADED': sleep(5) cdh_parcel = get_parcel(api_resource, cdh_parcel.product, cdh_parcel.version, cluster_name) print "Parcel " + parcel_full_name + " downloaded" # distribute the parcel print "Starting distribution parcel " + parcel_full_name + ". This might take a while." cmd = cdh_parcel.start_distribution() if cmd.success != True: print "Parcel " + parcel_full_name + " distribution failed!" exit(0) # make sure the distribution finishes while cdh_parcel.stage != "DISTRIBUTED": sleep(5) cdh_parcel = get_parcel(api_resource, cdh_parcel.product, cdh_parcel.version, cluster_name) print "Parcel " + parcel_full_name + " distributed." # activate the parcel cmd = cdh_parcel.activate() if cmd.success != True: print "Parcel " + parcel_full_name + " activation failed!" exit(0) # make sure the activation finishes while cdh_parcel.stage != "ACTIVATED": cdh_parcel = get_parcel(api_resource, cdh_parcel.product, cdh_parcel.version, cluster_name) print "Parcel " + parcel_full_name + " activated"
def wait_for_parcel(cmd, api, parcel, cluster_name, stage): if cmd.success != True: print "Parcel stage transition to %s failed: " % stage exit(7) while parcel.stage != stage: sleep(5) parcel = get_parcel(api, parcel.product, parcel.version, cluster_name) return parcel
def get_parcel(self, product, version): """ Lookup a parcel by product and version. @param product: the product name @param version: the product version @return: An ApiParcel object """ return parcels.get_parcel(self._get_resource_root(), product, version, self.name)
if p.product == "KAFKA": PARCEL = p PARCEL_PROCUCT = p.product PARCEL_VERSION = p.version print PARCEL print "Starting parcel download. This may take a while" cmd = PARCEL.start_download() if cmd.success != True: print "Parcel download failed!" exit(0) while PARCEL.stage != "DOWNLOADED": sleep(5) PARCEL = get_parcel(api, PARCEL_PROCUCT, PARCEL_VERSION, cluster_name) print "Parcel downloaded" print "Starting parcel distribution. This might take a while" cmd = PARCEL.start_distribution() if cmd.success != True: print "Parcel distribution failed!" exit(0) while PARCEL.stage != "DISTRIBUTED": sleep(5) PARCEL = get_parcel(api, PARCEL_PROCUCT, PARCEL_VERSION, cluster_name) print "Parcel distributed"
def set_up_cluster(): # get a handle on the instance of CM that we have running api = ApiResource(cm_host, cm_port, cm_username, cm_password, version=19) # get the CM instance cm = ClouderaManager(api) print "*************************************" print " Starting Auto Deployment of Cluster " print "*************************************" # {'owner': ROAttr(), 'uuid': ROAttr(), 'expiration': ROAttr(),} TRIAL = False try: trial_active = cm.get_license() print trial_active if trial_active.owner == "Trial License": print "Trial License is already set - will NOT continue now." print "Assuming Cluster is already setup" TRIAL = True else: print "Setting up `Trial License`." cm.begin_trial() except: cm.begin_trial() if TRIAL: exit(0) # create the management service service_setup = ApiServiceSetupInfo(name=cm_service_name, type="MGMT") try: if not cm.get_service().name: cm.create_mgmt_service(service_setup) else: print "Service already exist." except: cm.create_mgmt_service(service_setup) # install hosts on this CM instance cmd = cm.host_install(host_username, host_list, password=host_password, cm_repo_url=cm_repo_url, unlimited_jce=True) print "Installing hosts. This might take a while." while cmd.success == None: sleep(5) cmd = cmd.fetch() print cmd if cmd.success != True: print "cm_host_install failed: " + cmd.resultMessage exit(0) print "cm_host_install succeeded" # first auto-assign roles and auto-configure the CM service cm.auto_assign_roles() cm.auto_configure() # create a cluster on that instance cluster = create_cluster(api, cluster_name, cdh_version) # add all our hosts to the cluster cluster.add_hosts(host_list) cluster = api.get_cluster(cluster_name) parcels_list = [] # get and list all available parcels print "Available parcels:" for p in cluster.get_all_parcels(): print '\t' + p.product + ' ' + p.version if p.version.startswith(cdh_version_number) and p.product == "CDH": parcels_list.append(p) if len(parcels_list) == 0: print "No " + cdh_version + " parcel found!" exit(0) cdh_parcel = parcels_list[0] for p in parcels_list: if p.version > cdh_parcel.version: cdh_parcel = p # download the parcel print "Starting parcel download. This might take a while." cmd = cdh_parcel.start_download() if cmd.success != True: print "Parcel download failed!" exit(0) # make sure the download finishes while cdh_parcel.stage != 'DOWNLOADED': sleep(5) cdh_parcel = get_parcel(api, cdh_parcel.product, cdh_parcel.version, cluster_name) print cdh_parcel.product + ' ' + cdh_parcel.version + " downloaded" # distribute the parcel print "Starting parcel distribution. This might take a while." cmd = cdh_parcel.start_distribution() if cmd.success != True: print "Parcel distribution failed!" exit(0) # make sure the distribution finishes while cdh_parcel.stage != "DISTRIBUTED": sleep(5) cdh_parcel = get_parcel(api, cdh_parcel.product, cdh_parcel.version, cluster_name) print cdh_parcel.product + ' ' + cdh_parcel.version + " distributed" # activate the parcel cmd = cdh_parcel.activate() if cmd.success != True: print "Parcel activation failed!" exit(0) # make sure the activation finishes while cdh_parcel.stage != "ACTIVATED": cdh_parcel = get_parcel(api, cdh_parcel.product, cdh_parcel.version, cluster_name) print cdh_parcel.product + ' ' + cdh_parcel.version + " activated" # inspect hosts and print the result print "Inspecting hosts. This might take a few minutes." cmd = cm.inspect_hosts() while cmd.success == None: cmd = cmd.fetch() if cmd.success != True: print "Host inpsection failed!" exit(0) print "Hosts successfully inspected: \n" + cmd.resultMessage # create all the services we want to add; we will only create one instance # of each for s in service_types_and_names.keys(): service = cluster.create_service(service_types_and_names[s], s) # we will auto-assign roles; you can manually assign roles using the # /clusters/{clusterName}/services/{serviceName}/role endpoint or by using # ApiService.createRole() cluster.auto_assign_roles() cluster.auto_configure() # # this will set up the Hive and the reports manager databases because we # # can't auto-configure those two things # hive = cluster.get_service(service_types_and_names["HIVE"]) # hive_config = {"hive_metastore_database_host": hive_metastore_host, \ # "hive_metastore_database_name": hive_metastore_name, \ # "hive_metastore_database_password": hive_metastore_password, \ # "hive_metastore_database_port": hive_metastore_database_port, \ # "hive_metastore_database_type": hive_metastore_database_type} # hive.update_config(hive_config) # start the management service cm_service = cm.get_service() cm_service.start().wait() # this will set the Reports Manager database password # first we find the correct role rm_role = None for r in cm.get_service().get_all_roles(): if r.type == "REPORTSMANAGER": rm_role = r if rm_role == None: print "No REPORTSMANAGER role found!" exit(0) # then we get the corresponding role config group -- even though there is # only once instance of each CM management service, we do this just in case # it is not placed in the base group rm_role_group = rm_role.roleConfigGroupRef rm_rcg = get_role_config_group(api, rm_role.type, \ rm_role_group.roleConfigGroupName, None) # update the appropriate fields in the config rm_rcg_config = {"headlamp_database_host": reports_manager_host, \ "headlamp_database_name": reports_manager_name, \ "headlamp_database_user": reports_manager_username, \ "headlamp_database_password": reports_manager_password, \ "headlamp_database_type": reports_manager_database_type} rm_rcg.update_config(rm_rcg_config) # restart the management service with new configs cm_service.restart().wait() # execute the first run command print "Excuting first run command. This might take a while." cmd = cluster.first_run() while cmd.success == None: cmd = cmd.fetch() if cmd.success != True: print "The first run command failed: " + cmd.resultMessage() exit(0) print "First run successfully executed. Your cluster has been set up!"
def create_default_cluster(self): """ Create a default cluster and Cloudera Manager Service on master host """ log.info("Creating a new Cloudera Cluster") # create the management service # first check if mamagement service already exists service_setup = ApiServiceSetupInfo(name=self.cm_service_name, type="MGMT") self.cm_manager.create_mgmt_service(service_setup) # install hosts on this CM instance cmd = self.cm_manager.host_install(self.host_username, self.host_list, password=self.host_password, cm_repo_url=self.cm_repo_url) log.debug("Installing hosts. This might take a while...") while cmd.success is None: sleep(5) cmd = cmd.fetch() if cmd.success is not True: log.error("Adding hosts to Cloudera Manager failed: {0}".format(cmd.resultMessage)) log.info("Host added to Cloudera Manager") # first auto-assign roles and auto-configure the CM service self.cm_manager.auto_assign_roles() self.cm_manager.auto_configure() # create a cluster on that instance cluster = self.cm_api_resource.create_cluster(self.cluster_name, self.cdh_version) log.info("Cloudera cluster: {0} created".format(self.cluster_name)) # add all hosts on the cluster cluster.add_hosts(self.host_list) cluster = self.cm_api_resource.get_cluster(self.cluster_name) # get and list all available parcels parcels_list = [] log.debug("Installing parcels...") for p in cluster.get_all_parcels(): print '\t' + p.product + ' ' + p.version if p.version.startswith(self.cdh_version_number) and p.product == "CDH": parcels_list.append(p) if len(parcels_list) == 0: log.error("No {0} parcel found!".format(self.cdh_version)) cdh_parcel = parcels_list[0] for p in parcels_list: if p.version > cdh_parcel.version: cdh_parcel = p # download the parcel log.debug("Starting parcel downloading...") cmd = cdh_parcel.start_download() if cmd.success is not True: log.error("Parcel download failed!") # make sure the download finishes while cdh_parcel.stage != 'DOWNLOADED': sleep(5) cdh_parcel = get_parcel(self.cm_api_resource, cdh_parcel.product, cdh_parcel.version, self.cluster_name) log.info("Parcel: {0} {1} downloaded".format(cdh_parcel.product, cdh_parcel.version)) # distribute the parcel log.info("Distributing parcels...") cmd = cdh_parcel.start_distribution() if cmd.success is not True: log.error("Parcel distribution failed!") # make sure the distribution finishes while cdh_parcel.stage != "DISTRIBUTED": sleep(5) cdh_parcel = get_parcel(self.cm_api_resource, cdh_parcel.product, cdh_parcel.version, self.cluster_name) log.info("Parcel: {0} {1} distributed".format(cdh_parcel.product, cdh_parcel.version)) # activate the parcel log.info("Activating parcels...") cmd = cdh_parcel.activate() if cmd.success is not True: log.error("Parcel activation failed!") # make sure the activation finishes while cdh_parcel.stage != "ACTIVATED": cdh_parcel = get_parcel(self.cm_api_resource, cdh_parcel.product, cdh_parcel.version, self.cluster_name) log.info("Parcel: {0} {1} activated".format(cdh_parcel.product, cdh_parcel.version)) # inspect hosts and print the result log.info("Inspecting hosts. This might take a few minutes") cmd = self.cm_manager.inspect_hosts() while cmd.success is None: sleep(5) cmd = cmd.fetch() if cmd.success is not True: log.error("Host inpsection failed!") log.info("Hosts successfully inspected:\n".format(cmd.resultMessage)) log.info("Cluster {0} installed".format(self.cluster_name))
def set_up_cluster(): # get a handle on the instance of CM that we have running api = ApiResource(cm_host, cm_port, cm_username, cm_password, version=7) # get the CM instance cm = ClouderaManager(api) # activate the CM trial license cm.begin_trial() # create the management service service_setup = ApiServiceSetupInfo(name=cm_service_name, type="MGMT") cm.create_mgmt_service(service_setup) # install hosts on this CM instance cmd = cm.host_install(host_username, host_list, password=host_password, cm_repo_url=cm_repo_url) print "Installing hosts. This might take a while." while cmd.success == None: sleep(5) cmd = cmd.fetch() if cmd.success != True: print "cm_host_install failed: " + cmd.resultMessage exit(0) print "cm_host_install succeeded" # first auto-assign roles and auto-configure the CM service cm.auto_assign_roles() cm.auto_configure() # create a cluster on that instance cluster = create_cluster(api, cluster_name, cdh_version) # add all our hosts to the cluster cluster.add_hosts(host_list) cluster = api.get_cluster("Cluster 1") parcels_list = [] # get and list all available parcels print "Available parcels:" for p in cluster.get_all_parcels(): print '\t' + p.product + ' ' + p.version if p.version.startswith(cdh_version_number) and p.product == "CDH": parcels_list.append(p) if len(parcels_list) == 0: print "No " + cdh_version + " parcel found!" exit(0) cdh_parcel = parcels_list[0] for p in parcels_list: if p.version > cdh_parcel.version: cdh_parcel = p # download the parcel print "Starting parcel download. This might take a while." cmd = cdh_parcel.start_download() if cmd.success != True: print "Parcel download failed!" exit(0) # make sure the download finishes while cdh_parcel.stage != 'DOWNLOADED': sleep(5) cdh_parcel = get_parcel(api, cdh_parcel.product, cdh_parcel.version, cluster_name) print cdh_parcel.product + ' ' + cdh_parcel.version + " downloaded" # distribute the parcel print "Starting parcel distribution. This might take a while." cmd = cdh_parcel.start_distribution() if cmd.success != True: print "Parcel distribution failed!" exit(0) # make sure the distribution finishes while cdh_parcel.stage != "DISTRIBUTED": sleep(5) cdh_parcel = get_parcel(api, cdh_parcel.product, cdh_parcel.version, cluster_name) print cdh_parcel.product + ' ' + cdh_parcel.version + " distributed" # activate the parcel cmd = cdh_parcel.activate() if cmd.success != True: print "Parcel activation failed!" exit(0) # make sure the activation finishes while cdh_parcel.stage != "ACTIVATED": cdh_parcel = get_parcel(api, cdh_parcel.product, cdh_parcel.version, cluster_name) print cdh_parcel.product + ' ' + cdh_parcel.version + " activated" # inspect hosts and print the result print "Inspecting hosts. This might take a few minutes." cmd = cm.inspect_hosts() while cmd.success == None: cmd = cmd.fetch() if cmd.success != True: print "Host inpsection failed!" exit(0) print "Hosts successfully inspected: \n" + cmd.resultMessage # create all the services we want to add; we will only create one instance # of each for s in service_types_and_names.keys(): service = cluster.create_service(service_types_and_names[s], s) # we will auto-assign roles; you can manually assign roles using the # /clusters/{clusterName}/services/{serviceName}/role endpoint or by using # ApiService.createRole() cluster.auto_assign_roles() cluster.auto_configure() # this will set up the Hive and the reports manager databases because we # can't auto-configure those two things hive = cluster.get_service(service_types_and_names["HIVE"]) hive_config = { "hive_metastore_database_host" : hive_metastore_host, \ "hive_metastore_database_name" : hive_metastore_name, \ "hive_metastore_database_password" : hive_metastore_password, \ "hive_metastore_database_port" : hive_metastore_database_port, \ "hive_metastore_database_type" : hive_metastore_database_type } hive.update_config(hive_config) # start the management service cm_service = cm.get_service() cm_service.start().wait() # this will set the Reports Manager database password # first we find the correct role rm_role = None for r in cm.get_service().get_all_roles(): if r.type == "REPORTSMANAGER": rm_role = r if rm_role == None: print "No REPORTSMANAGER role found!" exit(0) # then we get the corresponding role config group -- even though there is # only once instance of each CM management service, we do this just in case # it is not placed in the base group rm_role_group = rm_role.roleConfigGroupRef rm_rcg = get_role_config_group(api, rm_role.type, \ rm_role_group.roleConfigGroupName, None) # update the appropriate fields in the config rm_rcg_config = { "headlamp_database_host" : reports_manager_host, \ "headlamp_database_name" : reports_manager_name, \ "headlamp_database_user" : reports_manager_username, \ "headlamp_database_password" : reports_manager_password, \ "headlamp_database_type" : reports_manager_database_type } rm_rcg.update_config(rm_rcg_config) # restart the management service with new configs cm_service.restart().wait() # execute the first run command print "Excuting first run command. This might take a while." cmd = cluster.first_run() while cmd.success == None: cmd = cmd.fetch() if cmd.success != True: print "The first run command failed: " + cmd.resultMessage() exit(0) print "First run successfully executed. Your cluster has been set up!"
def create_cluster(self): """ Create a cluster and Cloudera Manager Service on master host """ log.info("Creating Cloudera cluster: '{0}'. Please wait...".format(self.cluster_name)) ### CM Definitions ### CM_CONFIG = { 'TSQUERY_STREAMS_LIMIT' : 1000, } ### Create and deploy new cluster ## ar = self.cm_api_resource manager = self.cm_manager manager.update_config(CM_CONFIG) log.info("Connected to CM host on " + self.cm_host + " and updated CM configuration") ## Initialize a cluster ## cluster = self.init_cluster(ar) log.info("Initialized cluster " + self.cluster_name + " which uses CDH version " + self.cdh_version_number) ## Deploy management service ## deploy_management(manager) log.info("Deployed CM management service " + self.mgmt_service_name + " to run on " + self.cm_host) # install hosts on this CM instance cmd = self.cm_manager.host_install(self.host_username, self.host_list, password=self.host_password, cm_repo_url=self.cm_repo_url) log.debug("Installing hosts. This might take a while...") while cmd.success is None: sleep(5) cmd = cmd.fetch() if cmd.success is not True: log.error("Adding hosts to Cloudera Manager failed: {0}".format(cmd.resultMessage)) log.debug("Host added to Cloudera Manager") # first auto-assign roles and auto-configure the CM service self.cm_manager.auto_assign_roles() self.cm_manager.auto_configure() # create a cluster on that instance cluster = self.cm_api_resource.create_cluster(self.cluster_name, self.cdh_version) log.debug("Cloudera cluster: {0} created".format(self.cluster_name)) # add all hosts on the cluster cluster.add_hosts(self.host_list) cluster = self.cm_api_resource.get_cluster(self.cluster_name) # get and list all available parcels parcels_list = [] log.debug("Installing parcels...") for p in cluster.get_all_parcels(): print '\t' + p.product + ' ' + p.version if p.version.startswith(self.cdh_version_number) and p.product == "CDH": parcels_list.append(p) if len(parcels_list) == 0: log.error("No {0} parcel found!".format(self.cdh_version)) cdh_parcel = parcels_list[0] for p in parcels_list: if p.version > cdh_parcel.version: cdh_parcel = p # download the parcel log.debug("Starting parcel downloading...") cmd = cdh_parcel.start_download() if cmd.success is not True: log.error("Parcel download failed!") # make sure the download finishes while cdh_parcel.stage != 'DOWNLOADED': sleep(5) cdh_parcel = get_parcel(self.cm_api_resource, cdh_parcel.product, cdh_parcel.version, self.cluster_name) log.debug("Parcel: {0} {1} downloaded".format(cdh_parcel.product, cdh_parcel.version)) # distribute the parcel log.debug("Distributing parcels...") cmd = cdh_parcel.start_distribution() if cmd.success is not True: log.error("Parcel distribution failed!") # make sure the distribution finishes while cdh_parcel.stage != "DISTRIBUTED": sleep(5) cdh_parcel = get_parcel(self.cm_api_resource, cdh_parcel.product, cdh_parcel.version, self.cluster_name) log.debug("Parcel: {0} {1} distributed".format(cdh_parcel.product, cdh_parcel.version)) # activate the parcel log.debug("Activating parcels...") cmd = cdh_parcel.activate() if cmd.success is not True: log.error("Parcel activation failed!") # make sure the activation finishes while cdh_parcel.stage != "ACTIVATED": cdh_parcel = get_parcel(self.cm_api_resource, cdh_parcel.product, cdh_parcel.version, self.cluster_name) log.debug("Parcel: {0} {1} activated".format(cdh_parcel.product, cdh_parcel.version)) # inspect hosts and print the result log.debug("Inspecting hosts. This might take a few minutes") cmd = self.cm_manager.inspect_hosts() while cmd.success is None: sleep(5) cmd = cmd.fetch() if cmd.success is not True: log.error("Host inpsection failed!") log.debug("Hosts successfully inspected:\n".format(cmd.resultMessage)) log.info("Cluster '{0}' installed".format(self.cluster_name))
def create_default_cluster(self): """ Create a default cluster and Cloudera Manager Service on master host """ log.info("Creating a new Cloudera Cluster") # self.cm_host = socket.gethostname() log.debug("Cloudera adding host: {0}".format(self.cm_host)) self.host_list.append(self.cm_host) # create the management service # first check if mamagement service already exists service_setup = ApiServiceSetupInfo(name=self.cm_service_name, type="MGMT") self.cm_manager.create_mgmt_service(service_setup) # install hosts on this CM instance cmd = self.cm_manager.host_install(self.host_username, self.host_list, password=self.host_password, cm_repo_url=self.cm_repo_url) log.debug("Installing hosts. This might take a while...") while cmd.success is None: sleep(5) cmd = cmd.fetch() if cmd.success is not True: log.error("Adding hosts to Cloudera Manager failed: {0}".format(cmd.resultMessage)) log.info("Host added to Cloudera Manager") # first auto-assign roles and auto-configure the CM service self.cm_manager.auto_assign_roles() self.cm_manager.auto_configure() # create a cluster on that instance cluster = self.cm_api_resource.create_cluster(self.cluster_name, self.cdh_version) log.info("Cloudera cluster: {0} created".format(self.cluster_name)) # add all hosts on the cluster cluster.add_hosts(self.host_list) cluster = self.cm_api_resource.get_cluster(self.cluster_name) # get and list all available parcels parcels_list = [] log.debug("Installing parcels...") for p in cluster.get_all_parcels(): print '\t' + p.product + ' ' + p.version if p.version.startswith(self.cdh_version_number) and p.product == "CDH": parcels_list.append(p) if len(parcels_list) == 0: log.error("No {0} parcel found!".format(self.cdh_version)) cdh_parcel = parcels_list[0] for p in parcels_list: if p.version > cdh_parcel.version: cdh_parcel = p # download the parcel log.debug("Starting parcel downloading...") cmd = cdh_parcel.start_download() if cmd.success is not True: log.error("Parcel download failed!") # make sure the download finishes while cdh_parcel.stage != 'DOWNLOADED': sleep(5) cdh_parcel = get_parcel(self.cm_api_resource, cdh_parcel.product, cdh_parcel.version, self.cluster_name) log.info("Parcel: {0} {1} downloaded".format(cdh_parcel.product, cdh_parcel.version)) # distribute the parcel log.info("Distributing parcels...") cmd = cdh_parcel.start_distribution() if cmd.success is not True: log.error("Parcel distribution failed!") # make sure the distribution finishes while cdh_parcel.stage != "DISTRIBUTED": sleep(5) cdh_parcel = get_parcel(self.cm_api_resource, cdh_parcel.product, cdh_parcel.version, self.cluster_name) log.info("Parcel: {0} {1} distributed".format(cdh_parcel.product, cdh_parcel.version)) # activate the parcel log.info("Activating parcels...") cmd = cdh_parcel.activate() if cmd.success is not True: log.error("Parcel activation failed!") # make sure the activation finishes while cdh_parcel.stage != "ACTIVATED": cdh_parcel = get_parcel(self.cm_api_resource, cdh_parcel.product, cdh_parcel.version, self.cluster_name) log.info("Parcel: {0} {1} activated".format(cdh_parcel.product, cdh_parcel.version)) # inspect hosts and print the result log.info("Inspecting hosts. This might take a few minutes") cmd = self.cm_manager.inspect_hosts() while cmd.success is None: sleep(5) cmd = cmd.fetch() if cmd.success is not True: log.error("Host inpsection failed!") log.info("Hosts successfully inspected:\n".format(cmd.resultMessage)) log.info("Cluster {0} installed".format(self.cluster_name))
def deploy_parcels(self): """ Downloads and distributes parcels """ # get and list all available parcels parcels_list = [] for p in self.cluster.get_all_parcels(): print '\t' + p.product + ' ' + p.version if p.version.startswith( self.cdh_version_number) and p.product == "CDH": parcels_list.append(p) if len(parcels_list) == 0: log.error("No {0} parcel found!".format(self.cdh_version)) cdh_parcel = parcels_list[0] for p in parcels_list: if p.version > cdh_parcel.version: cdh_parcel = p # download the parcel log.debug("Starting parcel downloading...") cmd = cdh_parcel.start_download() if cmd.success is not True: log.error("Parcel download failed!") # make sure the download finishes while cdh_parcel.stage != 'DOWNLOADED': sleep(5) cdh_parcel = get_parcel(self.cm_api_resource, cdh_parcel.product, cdh_parcel.version, self.cluster_name) log.debug("Parcel: {0} {1} downloaded".format(cdh_parcel.product, cdh_parcel.version)) # distribute the parcel log.debug("Distributing parcels...") cmd = cdh_parcel.start_distribution() if cmd.success is not True: log.error("Parcel distribution failed!") # make sure the distribution finishes while cdh_parcel.stage != "DISTRIBUTED": sleep(5) cdh_parcel = get_parcel(self.cm_api_resource, cdh_parcel.product, cdh_parcel.version, self.cluster_name) log.debug("Parcel: {0} {1} distributed".format(cdh_parcel.product, cdh_parcel.version)) # activate the parcel log.debug("Activating parcels...") cmd = cdh_parcel.activate() if cmd.success is not True: log.error("Parcel activation failed!") # make sure the activation finishes while cdh_parcel.stage != "ACTIVATED": cdh_parcel = get_parcel(self.cm_api_resource, cdh_parcel.product, cdh_parcel.version, self.cluster_name) log.debug("Parcel: {0} {1} activated".format(cdh_parcel.product, cdh_parcel.version))