def vm_placement(self, obj_attr_list): ''' TBD ''' _availability_zone = None if len(obj_attr_list["availability_zone"]) > 1: _availability_zone = obj_attr_list["availability_zone"] if "compute_node" in obj_attr_list and _availability_zone: # _scheduler_hints = { "force_hosts" : obj_attr_list["host_name"] } for _host in self.oskconncompute[obj_attr_list["name"]].hypervisors.list(): if _host.hypervisor_hostname.count(obj_attr_list["compute_node"]): obj_attr_list["host_name"] = _host.hypervisor_hostname break if "host_name" in obj_attr_list: _availability_zone += ':' + obj_attr_list["host_name"] else: _msg = "Unable to find the compute_node \"" + obj_attr_list["compute_node"] _msg += "\", indicated during the instance creation. Will let" _msg += " the scheduler pick a compute node" cbwarn(_msg) obj_attr_list["availability_zone"] = _availability_zone return True
def vmcreate(self, obj_attr_list): # The only thing we're doing here is recording into MongoDB properly # exactly which container maps to which physical k8s node, so that we can # track in the database where the container lived. status = 0 msg = "" try: # Then just call the original function to actually do the create. status, msg = KubCmds.vmcreate(self, obj_attr_list) if status == 0: _vmc_attr_list = self.osci.get_object( obj_attr_list["cloud_name"], "VMC", False, obj_attr_list["vmc"], False) for droplet in loads(_vmc_attr_list["droplets"]): if droplet["name"] == obj_attr_list["node"]: obj_attr_list["host_name"] = droplet["id"] cbdebug( "Container " + obj_attr_list["name"] + " sent to Node " + obj_attr_list["node"] + " = " + str(obj_attr_list["host_name"]), True) obj_attr_list["droplet"] = dumps(droplet) break except Exception as e: for line in traceback.format_exc().splitlines(): cbwarn(line, True) finally: status, msg = self.common_messages("VM", obj_attr_list, "created", status, msg) return status, msg
def vmcregister(self, obj_attr_list) : status = 0 msg = "" try : cluster_list = obj_attr_list["clusters"].lower().strip().split(",") region = False size = False version = False worker_size = False for cluster in cluster_list : name, region, version, worker_size, nb_workers = cluster.split(":") if name == obj_attr_list["name"] : cbdebug("VMC " + name + " in " + region + " using version " + version + " and " + nb_workers + " workers each of size " + worker_size, True) break if not region : return 104, "VMC " + name + " not found in CLUSTERS configuration list. Please correct and try again: " + cluster_list obj_attr_list["region"] = region obj_attr_list["nb_workers"] = nb_workers obj_attr_list["k8s_version"] = version obj_attr_list["k8s_worker_size"] = worker_size if "kubeconfig" not in obj_attr_list : self.access = obj_attr_list["access"] self.headers = {"Authorization" : "Bearer " + obj_attr_list["credentials"]} obj_attr_list["kuuid"], obj_attr_list["kubeconfig"] = self.create_cluster(obj_attr_list) if not obj_attr_list["kuuid"] : return 458, "vmcregister did not find a UUID, No k8s for you." status, msg = KubCmds.vmcregister(self, obj_attr_list) except Exception, e : for line in traceback.format_exc().splitlines() : cbwarn(line, True) status, msg = self.common_messages("VMC", obj_attr_list, "registered", status, msg)
def vmcregister(self, obj_attr_list) : status = 0 msg = "" try : cluster_list = obj_attr_list["clusters"].lower().strip().split(",") region = False size = False version = False worker_size = False for cluster in cluster_list : name, region, version, worker_size, nb_workers = cluster.split(":") if name == obj_attr_list["name"] : cbdebug("VMC " + name + " in " + region + " using version " + version + " and " + nb_workers + " workers each of size " + worker_size, True) break if not region : return 104, "VMC " + name + " not found in CLUSTERS configuration list. Please correct and try again: " + cluster_list obj_attr_list["region"] = region obj_attr_list["nb_workers"] = nb_workers obj_attr_list["k8s_version"] = version obj_attr_list["k8s_worker_size"] = worker_size if "kubeconfig" not in obj_attr_list : self.access = obj_attr_list["access"] self.headers = {"Authorization" : "Bearer " + obj_attr_list["credentials"]} obj_attr_list["kuuid"], obj_attr_list["kubeconfig"] = self.create_cluster(obj_attr_list) if not obj_attr_list["kuuid"] : return 458, "vmcregister did not find a UUID, No k8s for you." status, msg = KubCmds.vmcregister(self, obj_attr_list) except Exception, e : for line in traceback.format_exc().splitlines() : cbwarn(line, True) status, msg = self.common_messages("VMC", obj_attr_list, "registered", status, msg)
def connect(self, tout): try: #if tout and tout > 0: # MysqlMgdConn.conn.set_connection_timeout(tout) if not self.mysql_conn or not self.mysql_conn.is_connected(): cbdebug("Opening to: " + self.database) self.mysql_conn = mysql.connector.connect( host=self.host, port=self.port, user=self.username, password=self.password) cursor = self.mysql_conn.cursor() try: cursor.execute("use " + self.database) except mysql.connector.Error as err: if err.errno == mysql.connector.errorcode.ER_BAD_DB_ERROR: cbwarn("Database not found. Will create later.") cursor.close() _msg = "A connection to MySQL running on host " _msg += self.host + ", port " + str(self.port) _msg += ", with a timeout of " _msg += str(tout) + "s was established." cbdebug(_msg) except mysql.connector.Error as err: if err.errno == mysql.connector.errorcode.ER_ACCESS_DENIED_ERROR: _msg = "Something is wrong with your MySQL user name or password." cberr(_msg) raise MetricStoreMgdConnException(str(_msg), 1) else: _msg = "Unknown MySQL error: " + str(err) cberr(_msg) raise MetricStoreMgdConnException(str(_msg), 2)
def get_instances(self, obj_attr_list, obj_type="vm", endpoints="all", identifier="all"): ''' TBD ''' _instances = [] _fmsg = "Error while getting instances" _call = "NA" if endpoints == "all": _endpoints = list(self.lxdconn.keys()) else: _endpoints = [endpoints] try: for _endpoint in _endpoints: if identifier == "all": _call = "containers.all()" _instances = self.lxdconn[_endpoint].containers.all() else: _call = "containers.get()" _instances = self.lxdconn[_endpoint].containers.get( identifier) _status = 0 except CldOpsException as obj: _status = obj.status _xfmsg = str(obj.msg) except LXDError.LXDAPIException as obj: _status = 18127 _xfmsg = str(obj) except Exception as e: _status = 23 _xfmsg = str(e) finally: if _status: _fmsg = "(While getting instance(s) through API call \"" + _call + "\") " + _xfmsg if identifier not in self.api_error_counter: self.api_error_counter[identifier] = 0 self.api_error_counter[identifier] += 1 if self.api_error_counter[identifier] > self.max_api_errors: raise CldOpsException(_fmsg, _status) else: cbwarn(_fmsg) return [] else: return _instances
def update_document(self, table, document, disconnect_finish=False): table = table.replace('-', "dash") self.operation_mutex.acquire() try: cursor = self.conn_check() if "_id" in document and isinstance(document["_id"], bytes): document["_id"] = document["_id"].decode("utf-8") if "original_mysql_id" not in document: if "_id" in document: # Attempt to find the original ID first statement = "select id from " + table + " where _id = '" + document[ "_id"] + "'" cursor.execute(statement) while True: rows = cursor.fetchmany(1) if not len(rows): break for (original_mysql_id, ) in rows: document["original_mysql_id"] = original_mysql_id if "original_mysql_id" not in document: cursor.close() self.operation_mutex.release() cbwarn( "This document does not have a pre-existing identifier. Cannot update. Will insert first" ) document["original_mysql_id"] = self.add_document( table, document, disconnect_finish=disconnect_finish) return statement = "update " + table + " set document = '" + json.dumps( document) + "' where id = " + str( document["original_mysql_id"]) result = cursor.execute(statement) cursor.close() self.mysql_conn.commit() if disconnect_finish: self.disconnect() except mysql.connector.Error as err: self.operation_mutex.release() _msg = "Unable to update documents from the table \"" _msg += table + ": " + str(err) cberr(_msg) raise MetricStoreMgdConnException(str(_msg), 8) except Exception as e: self.operation_mutex.release() cberr(_msg) raise MetricStoreMgdConnException(str(_msg), 67) self.operation_mutex.release()
def connect(self, access, credentials, vmc_name, extra_parms = {}, diag = False, generate_rc = False) : try : extra_parms["kubeyaml"] = False if not diag : if "kubeconfig" in extra_parms and extra_parms["kubeconfig"] : extra_parms["kubeyaml"] = yaml.safe_load(extra_parms["kubeconfig"]) return KubCmds.connect(self, access, credentials, vmc_name, extra_parms, diag, generate_rc) except Exception, e : for line in traceback.format_exc().splitlines() : cbwarn(line, True) raise e
def connect(self, access, credentials, vmc_name, extra_parms = {}, diag = False, generate_rc = False) : try : extra_parms["kubeyaml"] = False if not diag : if "kubeconfig" in extra_parms and extra_parms["kubeconfig"] : extra_parms["kubeyaml"] = yaml.safe_load(extra_parms["kubeconfig"]) return KubCmds.connect(self, access, credentials, vmc_name, extra_parms, diag, generate_rc) except Exception, e : for line in traceback.format_exc().splitlines() : cbwarn(line, True) raise e
def select_url(source, depsdict): ''' TBD ''' depsdict[source + "_addr_list"] = [] depsdict[source + "_addr"] = False if source == "repo": _element = "package repository" else: _element = "python pip repository" print '\n' _msg = "Selecting " + _element + " address...." cbinfo(_msg) for _key in sorted(depsdict.keys()): if _key.count(source + "-addr"): _index = int(_key.replace(source + "-addr", '')) depsdict[source + "_addr_list"].insert(_index, depsdict[_key]) for _repo_addr in depsdict[source + "_addr_list"]: if check_url("http://" + _repo_addr, "ARCH", depsdict["carch"]): depsdict[source + "_addr"] = _repo_addr if len(depsdict[source + "_addr_list"]): if depsdict[source + "_addr"]: _msg = "A " + _element + " in \"" + depsdict[ source + "_addr"] + "\" seems to be up" depsdict[source + "_dropbox"] = "http://" + depsdict[source + "_addr"] + "/dropbox" depsdict[source + "_credentials_url"] = "http://" + depsdict[ source + "_addr"] + "/dropbox/ssh_keys" cbinfo(_msg) else: _msg = "##### None of the indicated " + _element + " was available. ".replace( "repository", "repositories") if source == "repo": _msg += "Will ignore any repository URL that has the keyword REPO_ADDR..." cbwarn(_msg) else: _msg = "##### No " + _element + " specified. ".replace( "repository", "repositories") if source == "repo": _msg += "Will ignore any repository URL that has the keyword REPO_ADDR..." cbwarn(_msg) return True
def disconnect(self): try: _status = 100 _fmsg = "An error has occurred, but no error message was captured" # Nothing to do _status = 0 except AttributeError: # If the "close" method does not exist, proceed normally. _msg = "The \"close\" method does not exist or is not callable" cbwarn(_msg) _status = 0 except Exception, e: _status = 23 _fmsg = str(e)
def vmcunregister(self, obj_attr_list) : status = 0 msg = "" try : self.access = obj_attr_list["access"] self.headers = {"Authorization" : "Bearer " + obj_attr_list["credentials"]} status, msg = KubCmds.vmcunregister(self, obj_attr_list, force_all = True) if status == 0 and "kubeconfig" in obj_attr_list : if obj_attr_list["kuuid"] : success = self.destroy_cluster(obj_attr_list, obj_attr_list["kuuid"]) if not success : status = 463 msg = "Failed to destroy k8s cluster" except Exception, e : for line in traceback.format_exc().splitlines() : cbwarn(line, True) status, msg = self.common_messages("VMC", obj_attr_list, "unregistered", status, msg)
def vmcunregister(self, obj_attr_list) : status = 0 msg = "" try : self.access = obj_attr_list["access"] self.headers = {"Authorization" : "Bearer " + obj_attr_list["credentials"]} status, msg = KubCmds.vmcunregister(self, obj_attr_list, force_all = True) if status == 0 and "kubeconfig" in obj_attr_list : if obj_attr_list["kuuid"] : success = self.destroy_cluster(obj_attr_list, obj_attr_list["kuuid"]) if not success : status = 463 msg = "Failed to destroy k8s cluster" except Exception, e : for line in traceback.format_exc().splitlines() : cbwarn(line, True) status, msg = self.common_messages("VMC", obj_attr_list, "unregistered", status, msg)
def extra_vmccleanup(self, obj_attr_list): ''' TBD ''' _wait = int(obj_attr_list["update_frequency"]) _existing_pips = True while _existing_pips: _existing_pips = False for credentials_list in obj_attr_list["credentials"].split(";"): credentials = credentials_list.split(":") tenant = credentials[0] self.common_messages("VMC", obj_attr_list, "cleaning up vvs", 0, '') obj_attr_list["tenant"] = tenant _pips = self.get_adapter(credentials_list).ex_list_public_ips( obj_attr_list["resource_group"]) for _pip in _pips: if _pip.name.count("cb-" + obj_attr_list["username"] + "-" + obj_attr_list["cloud_name"]): try: cbdebug( "Destroying: " + _pip.name + " (" + tenant + ")", True) self.get_adapter( credentials_list).ex_delete_public_ip(_pip) except MalformedResponseError as e: self.dump_httplib_headers(credentials_list) raise CldOpsException( "The Cloud's API is misbehaving", 1483) except Exception as e: for line in traceback.format_exc().splitlines(): cbwarn(line, True) self.dump_httplib_headers(credentials_list) _existing_pips = True else: _msg = "Cleaning up " + self.get_description( ) + ". Ignoring Public IP: " + _pip.name cbdebug(_msg) if _existing_pips: _wait = self.backoff(obj_attr_list, _wait) return True
def post_vmdelete_process(self, obj_attr_list, connection): ''' TBD ''' if "cloud_pip_name" in obj_attr_list: for _pip in connection.ex_list_public_ips( obj_attr_list["resource_group"]): if _pip.name == obj_attr_list["cloud_pip_name"]: _pip_inst = _pip break try: connection.ex_delete_public_ip(_pip_inst) except Exception as e: cbwarn("While attempting to delete Public IP \"" + _pip_inst.name + "\": " + str(e)) return True
def select_url(source, depsdict) : ''' TBD ''' depsdict[source + "_addr_list"] = [] depsdict[source + "_addr"] = False if source == "repo" : _element = "package repository" else : _element = "python pip repository" print '\n' _msg = "Selecting " + _element + " address...." cbinfo(_msg) for _key in sorted(depsdict.keys()) : if _key.count(source + "-addr") : _index = int(_key.replace(source + "-addr",'')) depsdict[source + "_addr_list"].insert(_index, depsdict[_key]) for _repo_addr in depsdict[source + "_addr_list"] : if check_url("http://" + _repo_addr, "ARCH", depsdict["carch"]) : depsdict[source + "_addr"] = _repo_addr if len(depsdict[source + "_addr_list"]) : if depsdict[source + "_addr"] : _msg = "A " + _element + " in \"" + depsdict[source + "_addr"] + "\" seems to be up" depsdict[source + "_dropbox"] = "http://" + depsdict[source + "_addr"] + "/dropbox" depsdict[source + "_credentials_url"] = "http://" + depsdict[source + "_addr"] + "/dropbox/ssh_keys" cbinfo(_msg) else : _msg = "##### None of the indicated " + _element + " was available. ".replace("repository","repositories") if source == "repo" : _msg += "Will ignore any repository URL that has the keyword REPO_ADDR..." cbwarn(_msg) else : _msg = "##### No " + _element + " specified. ".replace("repository","repositories") if source == "repo" : _msg += "Will ignore any repository URL that has the keyword REPO_ADDR..." cbwarn(_msg) return True
def vmcreate(self, obj_attr_list) : # The only thing we're doing here is recording into MongoDB properly # exactly which container maps to which physical k8s node, so that we can # track in the database where the container lived. status = 0 msg = "" try : # Then just call the original function to actually do the create. status, msg = KubCmds.vmcreate(self, obj_attr_list) if status == 0 : _vmc_attr_list = self.osci.get_object(obj_attr_list["cloud_name"], "VMC", False, obj_attr_list["vmc"], False) for droplet in loads(_vmc_attr_list["droplets"]) : if droplet["name"] == obj_attr_list["node"] : obj_attr_list["host_name"] = droplet["id"] cbdebug("Container " + obj_attr_list["name"] + " sent to Node " + obj_attr_list["node"] + " = " + str(obj_attr_list["host_name"]), True) obj_attr_list["droplet"] = dumps(droplet) break except Exception, e : for line in traceback.format_exc().splitlines() : cbwarn(line, True)
def destroy_cluster(self, temp_obj_attr_list, kuuid) : try : obj_attr_list = copy.deepcopy(temp_obj_attr_list) s = self.get_session() cbdebug("Destroying Cluster: " + kuuid, True) obj_attr_list["kubeconfig"] = self.get_kubeconfig(kuuid) KubCmds.vmccleanup(self, obj_attr_list, force_all = True) KubCmds.purge_connection(self, obj_attr_list) r = s.delete(self.access + "/kubernetes/clusters/" + kuuid) if r.status_code not in [202, 204] : cbdebug("Failed to delete: " + str(r.status_code), True) raise CldOpsException("Destroy cluster failed.", 462) # Check for delete complete.... cbdebug("Waiting for delete to finish...") while True : r = s.get(self.access + "/kubernetes/clusters/" + kuuid) if r.status_code == 404 : cbdebug("Done.") break sleep(5) cbdebug("Deleted " + kuuid) return True except Exception, e : for line in traceback.format_exc().splitlines() : cbwarn(line, True) cberr("Failure to destroy k8s cluster: " + str(e), True) return False
def destroy_cluster(self, temp_obj_attr_list, kuuid) : try : obj_attr_list = copy.deepcopy(temp_obj_attr_list) s = self.get_session() cbdebug("Destroying Cluster: " + kuuid, True) obj_attr_list["kubeconfig"] = self.get_kubeconfig(kuuid) KubCmds.vmccleanup(self, obj_attr_list, force_all = True) KubCmds.purge_connection(self, obj_attr_list) r = s.delete(self.access + "/kubernetes/clusters/" + kuuid) if r.status_code not in [202, 204] : cbdebug("Failed to delete: " + str(r.status_code), True) raise CldOpsException("Destroy cluster failed.", 462) # Check for delete complete.... cbdebug("Waiting for delete to finish...") while True : r = s.get(self.access + "/kubernetes/clusters/" + kuuid) if r.status_code == 404 : cbdebug("Done.") break sleep(5) cbdebug("Deleted " + kuuid) return True except Exception, e : for line in traceback.format_exc().splitlines() : cbwarn(line, True) cberr("Failure to destroy k8s cluster: " + str(e), True) return False
def vmcreate(self, obj_attr_list): ''' TBD ''' try: _status = 100 _fmsg = "An error has occurred when creating new Droplet, but no error message was captured" obj_attr_list["cloud_vm_uuid"] = "NA" _instance = False obj_attr_list["cloud_vm_name"] = "cb-" + obj_attr_list["username"] obj_attr_list["cloud_vm_name"] += '-' + "vm" + obj_attr_list[ "name"].split("_")[1] obj_attr_list["cloud_vm_name"] += '-' + obj_attr_list["role"] if obj_attr_list["ai"] != "none": obj_attr_list[ "cloud_vm_name"] += '-' + obj_attr_list["ai_name"] obj_attr_list["cloud_vm_name"] = obj_attr_list[ "cloud_vm_name"].replace("_", "-") obj_attr_list[ "last_known_state"] = "about to connect to DigitalOcean" access_token = obj_attr_list["credentials"] cbdebug("Connecting to DigitalOcean...") self.connect(access_token) _time_mark_prs = int(time()) obj_attr_list[ "mgt_002_provisioning_request_sent"] = _time_mark_prs - int( obj_attr_list["mgt_001_provisioning_request_originated"]) obj_attr_list["last_known_state"] = "about to send create request" _msg = "Attempting to create a Droplet " _msg += obj_attr_list["imageid1"] _msg += " on DigitalOcean, creating a vm named " _msg += obj_attr_list["cloud_vm_name"] cbdebug(_msg, True) _msg = "Looking for an existing image named " _msg += obj_attr_list["imageid1"] cbdebug(_msg, True) image = False for x in self.images: if x.name == obj_attr_list[ "imageid1"] or x.id == obj_attr_list["imageid1"]: image = x break if not image: cbdebug("Image is missing. Refreshing image list...", True) self.images = catalogs.digitalocean.list_images() for x in self.images: if x.name == obj_attr_list[ "imageid1"] or x.id == obj_attr_list["imageid1"]: image = x break if not image: raise CldOpsException( "Image doesn't exist at DigitalOcean. Check your configuration: " + obj_attr_list["imageid1"], _status) cbdebug( "Launching new Droplet with hostname " + obj_attr_list["cloud_vm_name"], True) _reservation = catalogs.digitalocean.create_node( image=image, name=obj_attr_list["cloud_vm_name"], size=[x for x in self.sizes if x.id == obj_attr_list["size"]][0], location=[ x for x in self.locations if x.id == obj_attr_list["vmc_name"] ][0], ex_user_data=self.populate_cloudconfig(obj_attr_list), ex_create_attr={ "ssh_keys": obj_attr_list["key_name"].split(","), "private_networking": True }) obj_attr_list["last_known_state"] = "sent create request" cbdebug("Sent command to create node, waiting for creation...", True) if _reservation: obj_attr_list["last_known_state"] = "vm created" sleep(int(obj_attr_list["update_frequency"])) obj_attr_list["cloud_vm_uuid"] = _reservation.uuid cbdebug("Success. New instance UUID is " + _reservation.uuid, True) self.take_action_if_requested("VM", obj_attr_list, "provision_started") _time_mark_prc = self.wait_for_instance_ready( obj_attr_list, _time_mark_prs) self.wait_for_instance_boot(obj_attr_list, _time_mark_prc) obj_attr_list["host_name"] = "unknown" if "instance_obj" in obj_attr_list: del obj_attr_list["instance_obj"] _status = 0 else: obj_attr_list["last_known_state"] = "vm creation failed" _fmsg = "Failed to obtain instance's (cloud-assigned) uuid. The " _fmsg += "instance creation failed for some unknown reason." cberr(_fmsg) _status = 100 except CldOpsException, obj: _status = obj.status _fmsg = str(obj.msg) cbwarn("Error during reservation creation: " + _fmsg)
def dependency_checker_installer(hostname, depsdict, username, operation, options): ''' TBD ''' try: _status = 100 _dep_missing = -1 _fmsg = "An error has occurred, but no error message was captured" if len(options.wks) > 1: if options.wks.count("_ycsb"): options.wks += ",ycsb" if options.wks.count(",ycsb"): options.wks += ",mongo_ycsb,cassandra_ycsb,redis_ycsb" if options.wks.count(",acmeair"): options.wks += ",mongo_acmeair" deps_file_parser(depsdict, username, options, "127.0.0.1") docker_file_parser(depsdict, username, options, "127.0.0.1") preparation_file_parser(depsdict, username, options, "127.0.0.1") if "Filestore_ip" not in depsdict: depsdict["Filestore_ip"], depsdict["Filestore_port"], depsdict[ "Filestore_username"] = options.filestore.split('-') depsdict["cdistkind"], depsdict["cdistver"], depsdict[ "cdistmajorver"], depsdict["cdistnam"], depsdict[ "carch"] = get_linux_distro() depsdict["3rdpartydir"] = options.tpdir depsdict["scriptsdir"] = options.wksdir depsdict["credentialsdir"] = options.creddir depsdict["username"] = username if options.addr: depsdict["repo-addr1"] = options.addr depsdict["pip-addr1"] = options.addr if depsdict["carch"] == "x86_64": depsdict["carch1"] = "x86_64" depsdict["carch2"] = "x86-64" depsdict["carch3"] = "amd64" elif depsdict["carch"] == "ppc64le": depsdict["carch1"] = "ppc64le" depsdict["carch2"] = "ppc64" depsdict["carch3"] = "ppc64" else: depsdict["carch1"] = "aarch64" depsdict["carch2"] = "aarch64" depsdict["carch3"] = "aarch64" _missing_dep = [] _dep_list = [0] * 5000 if str(options.addr) != "bypass": select_url("repo", depsdict) select_url("pip", depsdict) _raise_exception = True else: depsdict["pip_addr"] = None depsdict["repo_addr"] = None _raise_exception = False for _key in depsdict.keys(): if _key.count("-order"): _dependency = _key.replace("-order", '') _order = int(depsdict[_key]) * 20 _dep_list.insert(_order, _dependency) _dep_list = [x for x in _dep_list if x != 0] print '\n' if options.role.count("workload"): options.tag = "base," + options.role _msg = "##### This node will be used to play a role in the Virtual Applications" _msg += " (AIs) \"" + str( options.wks) + "\". Only a subset of the depedencies" _msg += " will be " + operation + "ed. This node cannot be used as an Orchestrator Node\n" _msg += "\n" cbinfo(_msg) else: options.tag = "base," + options.role + ',' + options.clouds _msg = "##### This node will be prepared as an Orchestration Node." _msg += " The full set of dependencies will be " + operation + "ed. " _msg += "\n" cbinfo(_msg) options.tag = options.tag.split(',') _selected_dep_list = [] for _dep in _dep_list: for _tag in options.tag: if _dep + "-tag" in depsdict: _dep_tag_list = depsdict[_dep + "-tag"].split(',') else: _dep_tag_list = ["workload"] if _tag in _dep_tag_list: if _dep not in _selected_dep_list: _selected_dep_list.append(_dep) _dep_list = _selected_dep_list _process_manager = ProcessManagement(hostname) _status, _std_out, _y = _process_manager.run_os_command( "sudo cat /proc/1/cgroup | grep -c docker", raise_exception=False) if _status: depsdict["indocker"] = False else: if str(_std_out.replace("\n", '')) == '0': depsdict["indocker"] = False else: depsdict["indocker"] = True _msg = "##### DETECTED OPERATING SYSTEM KIND: " + depsdict["cdistkind"] cbinfo(_msg) _msg = "##### DETECTED OPERATING SYSTEM VERSION: " + depsdict[ "cdistver"] + " (" + depsdict["cdistmajorver"] + ')' cbinfo(_msg) _msg = "##### DETECTED OPERATING SYSTEM NAME: " + depsdict["cdistnam"] cbinfo(_msg) _msg = "##### DETECTED ARCHITECTURE: " + depsdict["carch"] cbinfo(_msg) _msg = "##### DETECTED RUNNING INSIDE DOCKER: " + str( depsdict["indocker"]) cbinfo(_msg) print '\n' if operation == "configure": if "repo" in _dep_list: _dep_list.remove("repo") if depsdict["cdistkind"] == "AMI": _msg = "This node runs the \"" + depsdict["cdistkind"] + "\" Linux " _msg += "distribution. Will treat it as \"rhel\", but will disable" _msg += " the repository manipulation." cbinfo(_msg) depsdict["cdistkind"] = "rhel" if "repo" in _dep_list: _dep_list.remove("repo") if depsdict["carch"].count("ppc") and "mongdob" in _dep_list: _msg = "##### The processors on this node have a \"Power\" architecture." _msg += "Removing MongoDB and Chef (client) from the dependency list" cbwarn(_msg) _dep_list.remove("mongodb") _dep_list.remove("chef-client") if "java" in _dep_list and "oraclejava" in _dep_list: _msg = "Since both \"java\" and \"oraclejava\" are listed as dependencies" _msg += ", only \"oraclejava\" will be used" cbinfo(_msg) _dep_list.remove("java") _dep_list.remove("java-home") _fmsg = "" _dep_missing = 0 for _dep in _dep_list: _status, _msg = execute_command("configure", _dep, depsdict, \ hostname = "127.0.0.1", \ username = username, \ venv = options.venv, \ raise_exception = _raise_exception) if _status: _dep_missing += 1 _missing_dep.append(_dep) cberr(_msg) if operation == "install": _status, _msg = execute_command("install", _dep, depsdict, \ hostname = "127.0.0.1", \ username = username, \ venv = options.venv, \ raise_exception = _raise_exception) if not _status: _dep_missing -= 1 _missing_dep.remove(_dep) cbinfo(_msg) else: cberr(_msg) else: cbinfo(_msg) _status = _dep_missing _fmsg += ','.join(_missing_dep) except KeyError, e: _status = 22 _fmsg = "Unable to find entry " + str( e ) + " in dependencies dictionary. Check you dependencies configuration file(s)"
def create_cluster(self, obj_attr_list) : kuuid = False kubeconfig = False try : s = self.get_session() kname = "cb-" + obj_attr_list["username"] + '-' + obj_attr_list["cloud_name"].lower() + "-" + obj_attr_list["name"] # First make sure there aren't any pre-existing clusters with this name. # If so, destroy them. r = s.get(self.access + "/kubernetes/clusters") if r.status_code == 200 : for cluster in r.json()["kubernetes_clusters"] : if cluster["name"] == kname : self.destroy_cluster(obj_attr_list, cluster["id"]) else : cbdebug("Cluster cleanup failed: " + str(r.status_code), True) raise CldOpsException("Could not cleanup old clusters.", 470) cbdebug("Creating cluster for: " + obj_attr_list["name"], True) create = { "name": kname, "region": obj_attr_list["region"], "version" : obj_attr_list["k8s_version"], "node_pools": [ { "size" : obj_attr_list["k8s_worker_size"], "count" : int(obj_attr_list["nb_workers"]), "name" : kname + "-pool" } ] } cbdebug("Requesting JSON: " + str(create)) # need tolerate errors, obviously r = s.post(self.access + "/kubernetes/clusters", json = create) if r.status_code == 201 : j = r.json() kuuid = j["kubernetes_cluster"]["id"] cbdebug("Waiting for ready status uuid: " + kuuid) else : cbdebug("Create failed: " + str(r.status_code), True) raise CldOpsException("No k8s for you. Code: " + str(r.status_code), 459) while True : r = s.get(self.access + "/kubernetes/clusters/" + kuuid) if r.status_code == 200 : if r.json()["kubernetes_cluster"]["status"]["state"] != "provisioning" : cbdebug("Done.") break sleep(5) kubeconfig = self.get_kubeconfig(kuuid) fwname = "k8s-" + kuuid + "-worker" cbdebug("Modifying firewall " + fwname + " for this cluster...") if r.status_code == 200 : r = s.get(self.access + "/firewalls") else : cbdebug("Failed to get firewall " + fwname + ": " + str(r.status_code), True) raise CldOpsException("No k8s for you.", 461) firewalls = r.json() fwuuid = False for fw in firewalls["firewalls"] : if fw['name'] == fwname : fwuuid = fw['id'] cbdebug("Firewall found: " + fwuuid) # Unfortunately, the DO api has a bug in it where they return that the k8s # cluster has been created, when in fact they are still performing operations # on it. In this case, the firewall rules needed by k8s itself are not yet # installed, and if we install our rules too fast, they get deleted. # So, first wait until we "see" that their rules have been installed first # before we proceed. (Not more than a few seconds). found = False while not found : r = s.get(self.access + "/firewalls/" + fwuuid) cbdebug("Checking for ready firewall rules...") if r.status_code == 200 : rules = r.json() for rule in rules["firewall"]["inbound_rules"] : if str(rule["ports"]).count("30000-32767") : cbdebug("Found rule: " + str(rule)) found = True break else : cbdebug("Error " + str(r.status_code) + " checking on rule update.") sleep(5) cbdebug("Rules ready. Adding our rule to firewall " + fwuuid) vm_defaults = self.osci.get_object(obj_attr_list["cloud_name"], "GLOBAL", False, "vm_defaults", False) ports_base = int(vm_defaults["ports_base"]) ports_range = int(vm_defaults["ports_range"]) ports_end = ports_base + ports_range rule = { "inbound_rules": [ { "protocol": "tcp", "ports": str(ports_base) + "-" + str(ports_end), "sources": { "addresses": [ "0.0.0.0/0", "::/0" ] } }, { "protocol": "tcp", "ports": "22", "sources": { "addresses": [ "0.0.0.0/0", "::/0" ] } } ] } r = s.post(self.access + "/firewalls/" + fwuuid + "/rules", json = rule) if r.status_code == 204 : cbdebug("Successfully added firewall rule to " + fwuuid) else : cbdebug("Firewall rule add failed: " + str(r.status_code), True) raise CldOpsException("No k8s for you.", 462) r = s.get(self.access + "/droplets?tag_name=k8s:" + kuuid) if r.status_code == 200 : droplets = r.json()["droplets"] obj_attr_list["droplets"] = dumps(droplets) for droplet in droplets : cbdebug("Droplet ID: " + str(droplet["id"]), True) for network in droplet["networks"]["v4"] : cbdebug(" ==> " + network["type"] + " = " + network["ip_address"], True) else : cberr("Failed to list droplet IDs: " + str(r.status_code), True) raise CldOpsException("No k8s for you.", 463) except Exception, e : for line in traceback.format_exc().splitlines() : cbwarn(line, True) cberr("Failure to create k8s cluster: " + str(e), True) if kuuid : self.destroy_cluster(obj_attr_list, kuuid) return False, False
def deps_file_parser(depsdict, username, options, hostname, process_manager = False) : ''' TBD ''' _file_name_list = [] _file_name_list.append(options.defdir + "/PUBLIC_dependencies.txt") _cleanup_repos = False if len(options.wks) > 1 : _workloads_list = options.wks.split(',') for _workload in _workloads_list : _file_name_list.append(options.wksdir + '/' + _workload + "/dependencies.txt") _cleanup_repos = True _file_name_list.append(options.defdir + "/IBM_dependencies.txt") _file_name_list.append(options.defdir + "/SPEC_dependencies.txt") if len(options.custom) : _file_name_list.append(options.cusdir + '/' + options.custom) print '\n' for _file in _file_name_list : if os.access(_file, os.F_OK) : try: _fd = open(_file, 'r') _fc = _fd.readlines() _fd.close() _msg = "##### File \"" + _file + "\" opened and loaded...." cbinfo(_msg) for _line in _fc : _line = _line.strip() if _line.count("#",0,2) : _sstr = None elif len(_line) < 3 : _sstr = None elif _line.count(" = ") : _sstr = " = " elif _line.count(" =") : _sstr = " =" elif _line.count("= ") : _sstr = "= " elif _line.count("=") : _sstr = "=" else : _sstr = None if _sstr : _key, _value = _line.split(_sstr) _key = _key.strip() depsdict[_key] = _value except Exception, e : _msg = "##### Error reading file \"" + _file + "\":" + str(e) cberr(_msg) exit(4) else : _msg = "##### File \"" + _file + "\" IGNORED...." cbwarn(_msg)
_status = 0 else : msg = "Volume creation failed. Aborting VM creation..." cbdebug(msg, True) raise CldOpsException(msg, _status) else : obj_attr_list["cloud_vv_uuid"] = "none" except CldOpsException, obj : _status = obj.status _fmsg = str(obj.msg) cbwarn("Error during reservation creation: " + _fmsg) except Exception, e : for line in traceback.format_exc().splitlines() : cbwarn(line, True) _status = 23 _fmsg = str(e) cbwarn("Error reaching digitalocean: " + _fmsg) finally : if "instance_obj" in obj_attr_list : del obj_attr_list["instance_obj"] if _status : _msg = "VM " + obj_attr_list["uuid"] + " could not be created " _msg += "on DigitalOcean \"" + obj_attr_list["cloud_name"] + "\" : " _msg += _fmsg + " (The VM creation will be rolled back)" cberr(_msg) if "cloud_vm_uuid" in obj_attr_list :
def create_cluster(self, obj_attr_list) : kuuid = False kubeconfig = False try : s = self.get_session() kname = "cb-" + obj_attr_list["username"] + '-' + obj_attr_list["cloud_name"].lower() + "-" + obj_attr_list["name"] # First make sure there aren't any pre-existing clusters with this name. # If so, destroy them. r = s.get(self.access + "/kubernetes/clusters") if r.status_code == 200 : for cluster in r.json()["kubernetes_clusters"] : if cluster["name"] == kname : self.destroy_cluster(obj_attr_list, cluster["id"]) else : cbdebug("Cluster cleanup failed: " + str(r.status_code), True) raise CldOpsException("Could not cleanup old clusters.", 470) cbdebug("Creating cluster for: " + obj_attr_list["name"], True) create = { "name": kname, "region": obj_attr_list["region"], "version" : obj_attr_list["k8s_version"], "node_pools": [ { "size" : obj_attr_list["k8s_worker_size"], "count" : int(obj_attr_list["nb_workers"]), "name" : kname + "-pool" } ] } cbdebug("Requesting JSON: " + str(create)) # need tolerate errors, obviously r = s.post(self.access + "/kubernetes/clusters", json = create) if r.status_code == 201 : j = r.json() kuuid = j["kubernetes_cluster"]["id"] cbdebug("Waiting for ready status uuid: " + kuuid) else : cbdebug("Create failed: " + str(r.status_code), True) raise CldOpsException("No k8s for you. Code: " + str(r.status_code), 459) while True : r = s.get(self.access + "/kubernetes/clusters/" + kuuid) if r.status_code == 200 : if r.json()["kubernetes_cluster"]["status"]["state"] != "provisioning" : cbdebug("Done.") break sleep(5) kubeconfig = self.get_kubeconfig(kuuid) fwname = "k8s-" + kuuid + "-worker" cbdebug("Modifying firewall " + fwname + " for this cluster...") if r.status_code == 200 : r = s.get(self.access + "/firewalls") else : cbdebug("Failed to get firewall " + fwname + ": " + str(r.status_code), True) raise CldOpsException("No k8s for you.", 461) firewalls = r.json() fwuuid = False for fw in firewalls["firewalls"] : if fw['name'] == fwname : fwuuid = fw['id'] cbdebug("Firewall found: " + fwuuid) cbdebug("Adding rule to firewall " + fwuuid) vm_defaults = self.osci.get_object(obj_attr_list["cloud_name"], "GLOBAL", False, "vm_defaults", False) ports_base = int(vm_defaults["ports_base"]) ports_range = int(vm_defaults["ports_range"]) ports_end = ports_base + ports_range rule = { "inbound_rules": [ { "protocol": "tcp", "ports": str(ports_base) + "-" + str(ports_end), "sources": { "addresses": [ "0.0.0.0/0", "::/0" ] } }, { "protocol": "tcp", "ports": "22", "sources": { "addresses": [ "0.0.0.0/0", "::/0" ] } } ] } r = s.post(self.access + "/firewalls/" + fwuuid + "/rules", json = rule) if r.status_code == 204 : cbdebug("Successfully added firewall rule to " + fwuuid) else : cbdebug("Firewall rule add failed: " + str(r.status_code), True) raise CldOpsException("No k8s for you.", 462) r = s.get(self.access + "/droplets?tag_name=k8s:" + kuuid) if r.status_code == 200 : droplets = r.json()["droplets"] obj_attr_list["droplets"] = dumps(droplets) for droplet in droplets : cbdebug("Droplet ID: " + str(droplet["id"]), True) for network in droplet["networks"]["v4"] : cbdebug(" ==> " + network["type"] + " = " + network["ip_address"], True) else : cberr("Failed to list droplet IDs: " + str(r.status_code), True) raise CldOpsException("No k8s for you.", 463) except Exception, e : for line in traceback.format_exc().splitlines() : cbwarn(line, True) cberr("Failure to create k8s cluster: " + str(e), True) if kuuid : self.destroy_cluster(obj_attr_list, kuuid) return False, False
def report_app_metrics(metriclist, sla_targets_list, ms_conn = "auto", \ os_conn = "auto", reset_syslog = True, force_conversion = None) : if ms_conn == "auto": _msci, _my_uuid, _expid, _username = get_ms_conn() else: _msci = ms_conn[0] _my_uuid = ms_conn[1] _expid = ms_conn[2] _username = ms_conn[3] if os_conn == "auto": _osci, _my_uuid, _cloud_name = get_os_conn() else: _osci = os_conn[0] _my_uuid = os_conn[1] _cloud_name = os_conn[2] if reset_syslog: setup_syslog(1) try: _metrics_dict = {} _sla_targets_dict = {} _reported_metrics_dict = {} _msg = "SLA violation verification" cbdebug(_msg) for _sla_target in sla_targets_list.split(): _sla_target = _sla_target.split(':') if len(_sla_target) == 2: if len(_sla_target[1]): _key = _sla_target[0].replace('sla_runtime_target_', '') _sla_targets_dict[_key] = _sla_target[1] _sla_status = "ok" for _metric in metriclist.split(): _metric = _metric.split(':') if len(_metric[1]): _metric[2], _metric[1] = unit_convert(force_conversion, _metric[2], _metric[1]) _metrics_dict["app_" + _metric[0]] = {} _metrics_dict["app_" + _metric[0]]["val"] = _metric[1] _metrics_dict["app_" + _metric[0]]["units"] = _metric[2] if _metric[0] in _sla_targets_dict: _sla_target, _condition = _sla_targets_dict[ _metric[0]].split('-') if len(str(_metric[1])): _metrics_dict["app_sla_runtime"] = {} _metrics_dict["app_sla_runtime"]["units"] = ' ' if _condition == "gt": if float(_metric[1]) >= float(_sla_target): True else: _sla_status = "violated" cbwarn("SLA VIOLATION!!!!!") if _condition == "lt": if float(_metric[1]) <= float(_sla_target): True else: _sla_status = "violated" cbwarn("SLA VIOLATION!!!!!") if "app_sla_runtime" in _metrics_dict: _metrics_dict["app_sla_runtime"]["val"] = _sla_status _metrics_dict["time"] = _metrics_dict["time"] = int(time()) _metrics_dict["time_cbtool"] = _osci.get_remote_time()[0] _metrics_dict["time_h"] = makeTimestamp() _metrics_dict["time_cbtool_h"] = makeTimestamp( _metrics_dict["time_cbtool"]) _metrics_dict["expid"] = _expid _metrics_dict["uuid"] = _my_uuid obj_attr_list = False _msg = "SLA violation status update" cbdebug(_msg) for _m in ["sla_runtime", "errors"]: if "app_" + _m in _metrics_dict: obj_attr_list = _osci.get_object(_cloud_name, "VM", False, _my_uuid, False) if "sticky_" + _m in obj_attr_list: _previous_m = obj_attr_list["sticky_" + _m] _current_m = _previous_m else: _previous_m = obj_attr_list[_m] _current_m = _metrics_dict["app_" + _m]["val"] if is_number(_previous_m): if float(_previous_m) > 0: _previous_m = "yes" else: _previous_m = "no" if is_number(_current_m): if float(_current_m) > 0: _current_m = "yes" else: _current_m = "no" _username = obj_attr_list["username"] _xmsg = '.' if str(obj_attr_list["sticky_app_status"]).lower() == "true": if _current_m == "violated" or _current_m == "yes": _xmsg = " (Due to \"application status stickyness)." _osci.update_object_attribute(_cloud_name, \ "VM", \ _my_uuid, \ False, \ "sticky_" + _m, \ _current_m) if _previous_m == _current_m: _msg = "Previous " + _m + " (\"" + _previous_m _msg += "\") and New (\"" + _current_m + "\")" _msg += " are the same. No updates needed" + _xmsg cbdebug(_msg) else: _msg = "Previous " + _m + " status (\"" + _previous_m _msg += "\") and New (\"" + _current_m + "\")" _msg += " are different. Updating attributes and views on the" _msg += " Metric Store" cbdebug(_msg) _osci.update_object_attribute(_cloud_name, \ "VM", \ _my_uuid, \ False, \ _m, \ _current_m) obj_attr_list[_m] = _previous_m _osci.remove_from_view(_cloud_name, "VM", obj_attr_list, "BY" + _m.upper()) obj_attr_list[_m] = _current_m _osci.add_to_view(_cloud_name, "VM", obj_attr_list, "BY" + _m.upper(), "arrival") _msg = "Determine average,min,max" cbdebug(_msg) update_avg_acc_max_min(_metrics_dict, _my_uuid, _username) _msg = "Report metrics" cbdebug(_msg) if "app_load_id" in _metrics_dict and _metrics_dict["app_load_id"][ "val"] == "1": _new_reported_metrics_dict = {} for _key in _metrics_dict.keys(): if not _key.count("time") and not _key.count( "uuid") and not _key.count("time_h"): _new_reported_metrics_dict[_key] = "1" _new_reported_metrics_dict["expid"] = _expid _new_reported_metrics_dict["_id"] = b64encode( sha1(_expid).digest()) _reported_metrics_dict = \ _msci.find_document("reported_runtime_app_VM_metric_names_" + \ _username, {"_id" : _new_reported_metrics_dict["_id"]}) if not _reported_metrics_dict: _reported_metrics_dict = {} _reported_metrics_dict.update(_new_reported_metrics_dict) _msci.add_document("runtime_app_VM_" + _username, _metrics_dict) _msg = "Application Metrics reported successfully. Data package sent was: \"" _msg += str(_metrics_dict) + "\"" cbdebug(_msg) _metrics_dict["_id"] = _metrics_dict["uuid"] _msci.update_document("latest_runtime_app_VM_" + _username, _metrics_dict) _msg = "Latest app performance data updated successfully" cbdebug(_msg) if len(_reported_metrics_dict): _msci.update_document( "reported_runtime_app_VM_metric_names_" + _username, _reported_metrics_dict) _msg = "Reported runtime application metric names collection " _msg += "updated successfully. Data package sent was: \"" _msg += str(_reported_metrics_dict) + "\"" cbdebug(_msg) if str(obj_attr_list["notification"]).lower() != "false": if obj_attr_list["notification_channel"].lower() == "auto": _channel = "APPLICATION" else: _channel = obj_attr_list["notification_channel"] _message = "VM object " + _my_uuid + " (" + obj_attr_list["name"] _message += ") submitted a new set of application metrics" _osci.publish_message(obj_attr_list["cloud_name"], "VM", _channel, _message, \ 1, \ float(obj_attr_list["timeout"])) _status = 0 except _msci.MetricStoreMgdConnException, obj: _status = obj.status _fmsg = str(obj.msg)
except Exception, e : _status = 23 _xfmsg = str(e) finally : if _status : _fmsg = "(While getting instance(s) through API call \"" + _call + "\") " + _xfmsg if identifier not in self.api_error_counter : self.api_error_counter[identifier] = 0 self.api_error_counter[identifier] += 1 if self.api_error_counter[identifier] > self.max_api_errors : raise CldOpsException(_fmsg, _status) else : cbwarn(_fmsg) return [] else : return _instances @trace def get_images(self, obj_attr_list) : ''' TBD ''' try : _status = 100 _hyper = '' _fmsg = "An error has occurred, but no error message was captured"
except Exception, e: _status = 23 _xfmsg = str(e) finally: if _status: _fmsg = "(While getting instance(s) through API call \"" + _call + "\") " + _xfmsg if identifier not in self.api_error_counter: self.api_error_counter[identifier] = 0 self.api_error_counter[identifier] += 1 if self.api_error_counter[identifier] > self.max_api_errors: raise CldOpsException(_fmsg, _status) else: cbwarn(_fmsg) return [] else: return _instances @trace def get_images(self, obj_attr_list): ''' TBD ''' try: _status = 100 _hyper = '' _fmsg = "An error has occurred, but no error message was captured"
else : obj_attr_list["last_known_state"] = "vm creation failed" _fmsg = "Failed to obtain instance's (cloud-assigned) uuid. The " _fmsg += "instance creation failed for some unknown reason." cberr(_fmsg) _status = 100 except CldOpsException, obj : _status = obj.status _fmsg = str(obj.msg) cbwarn("Error during reservation creation: " + _fmsg) except Exception, e : _status = 23 _fmsg = str(e) cbwarn("Error reaching digitalocean: " + _fmsg) finally : if "instance_obj" in obj_attr_list : del obj_attr_list["instance_obj"] if _status : _msg = "VM " + obj_attr_list["uuid"] + " could not be created " _msg += "on DigitalOcean \"" + obj_attr_list["cloud_name"] + "\" : " _msg += _fmsg + " (The VM creation will be rolled back)" cberr(_msg) if "cloud_vm_uuid" in obj_attr_list : obj_attr_list["mgt_deprovisioning_request_originated"] = int(time()) self.vmdestroy(obj_attr_list) else :
def report_app_metrics(metriclist, sla_targets_list, ms_conn = "auto", \ os_conn = "auto", reset_syslog = True, force_conversion = None) : if ms_conn == "auto" : _msci, _my_uuid, _expid, _username = get_ms_conn() else : _msci = ms_conn[0] _my_uuid = ms_conn[1] _expid = ms_conn[2] _username = ms_conn[3] if os_conn == "auto" : _osci, _my_uuid, _cloud_name = get_os_conn() else : _osci = os_conn[0] _my_uuid = os_conn[1] _cloud_name = os_conn[2] if reset_syslog : setup_syslog(1) try : _metrics_dict = {} _sla_targets_dict = {} _reported_metrics_dict = {} _msg = "SLA violation verification" cbdebug(_msg) for _sla_target in sla_targets_list.split() : _sla_target = _sla_target.split(':') if len(_sla_target) == 2 : if len(_sla_target[1]) : _key = _sla_target[0].replace('sla_runtime_target_','') _sla_targets_dict[_key] = _sla_target[1] _sla_status = "ok" for _metric in metriclist.split() : _metric = _metric.split(':') if len(_metric[1]) : _metric[2], _metric[1] = unit_convert(force_conversion, _metric[2], _metric[1]) _metrics_dict["app_" + _metric[0]] = {} _metrics_dict["app_" + _metric[0]]["val"] = _metric[1] _metrics_dict["app_" + _metric[0]]["units"] = _metric[2] if _metric[0] in _sla_targets_dict : _sla_target, _condition = _sla_targets_dict[_metric[0]].split('-') if len(str(_metric[1])) : _metrics_dict["app_sla_runtime"] = {} _metrics_dict["app_sla_runtime"]["units"] = ' ' if _condition == "gt" : if float(_metric[1]) >= float(_sla_target) : True else : _sla_status = "violated" cbwarn("SLA VIOLATION!!!!!") if _condition == "lt" : if float(_metric[1]) <= float(_sla_target) : True else : _sla_status = "violated" cbwarn("SLA VIOLATION!!!!!") if "app_sla_runtime" in _metrics_dict : _metrics_dict["app_sla_runtime"]["val"] = _sla_status _metrics_dict["time"] = _metrics_dict["time"] = int(time()) _metrics_dict["time_cbtool"] = _osci.get_remote_time()[0] _metrics_dict["time_h"] = makeTimestamp() _metrics_dict["time_cbtool_h"] = makeTimestamp(_metrics_dict["time_cbtool"]) _metrics_dict["expid"] = _expid _metrics_dict["uuid"] = _my_uuid obj_attr_list = False _msg = "SLA violation status update" cbdebug(_msg) for _m in [ "sla_runtime", "errors" ] : if "app_" + _m in _metrics_dict : obj_attr_list = _osci.get_object(_cloud_name, "VM", False, _my_uuid, False) if "sticky_" + _m in obj_attr_list : _previous_m = obj_attr_list["sticky_" + _m] _current_m = _previous_m else : _previous_m = obj_attr_list[_m] _current_m = _metrics_dict["app_" + _m]["val"] if is_number(_current_m) and float(_current_m) > 0 : _current_m = "yes" _username = obj_attr_list["username"] _xmsg = '.' if str(obj_attr_list["sticky_app_status"]).lower() == "true" : if _current_m == "violated" or _current_m == "yes" : _xmsg = " (Due to \"application status stickyness)." _osci.update_object_attribute(_cloud_name, \ "VM", \ _my_uuid, \ False, \ "sticky_" + _m, \ _current_m) if _previous_m == _current_m : _msg = "Previous " + _m + " (\"" + _previous_m _msg += "\") and New (\"" + _current_m + "\")" _msg += " are the same. No updates needed" + _xmsg cbdebug(_msg) else : _msg = "Previous " + _m + " status (\"" + _previous_m _msg += "\") and New (\"" + _current_m + "\")" _msg += " are different. Updating attributes and views on the" _msg += " Metric Store" cbdebug(_msg) _osci.update_object_attribute(_cloud_name, \ "VM", \ _my_uuid, \ False, \ _m, \ _current_m) obj_attr_list[_m] = _previous_m _osci.remove_from_view(_cloud_name, "VM", obj_attr_list, "BY" + _m.upper()) obj_attr_list[_m] = _current_m _osci.add_to_view(_cloud_name, "VM", obj_attr_list, "BY" + _m.upper(), "arrival") _msg = "Determine average,min,max" cbdebug(_msg) update_avg_acc_max_min(_metrics_dict, _my_uuid, _username) _msg = "Report metrics" cbdebug(_msg) if "app_load_id" in _metrics_dict and _metrics_dict["app_load_id"]["val"] == "1" : _new_reported_metrics_dict = {} for _key in _metrics_dict.keys() : if not _key.count("time") and not _key.count("uuid") and not _key.count("time_h") : _new_reported_metrics_dict[_key] = "1" _new_reported_metrics_dict["expid"] = _expid _new_reported_metrics_dict["_id"] = b64encode(sha1(_expid).digest()) _reported_metrics_dict = \ _msci.find_document("reported_runtime_app_VM_metric_names_" + \ _username, {"_id" : _new_reported_metrics_dict["_id"]}) if not _reported_metrics_dict : _reported_metrics_dict = {} _reported_metrics_dict.update(_new_reported_metrics_dict) _msci.add_document("runtime_app_VM_" + _username, _metrics_dict) _msg = "Application Metrics reported successfully. Data package sent was: \"" _msg += str(_metrics_dict) + "\"" cbdebug(_msg) _metrics_dict["_id"] = _metrics_dict["uuid"] _msci.update_document("latest_runtime_app_VM_" + _username, _metrics_dict) _msg = "Latest app performance data updated successfully" cbdebug(_msg) if len(_reported_metrics_dict) : _msci.update_document("reported_runtime_app_VM_metric_names_" + _username, _reported_metrics_dict) _msg = "Reported runtime application metric names collection " _msg += "updated successfully. Data package sent was: \"" _msg += str(_reported_metrics_dict) + "\"" cbdebug(_msg) if str(obj_attr_list["notification"]).lower() != "false" : if obj_attr_list["notification_channel"].lower() == "auto" : _channel = "APPLICATION" else : _channel = obj_attr_list["notification_channel"] _message = "VM object " + _my_uuid + " (" + obj_attr_list["name"] _message += ") submitted a new set of application metrics" _osci.publish_message(obj_attr_list["cloud_name"], "VM", _channel, _message, \ 1, \ float(obj_attr_list["timeout"])) _status = 0 except _msci.MetricStoreMgdConnException, obj : _status = obj.status _fmsg = str(obj.msg)
def build_repository_file_contents(depsdict, repo_name): ''' TBD ''' _msg = "Configuring repository \"" + repo_name + "\"..." cbinfo(_msg) _file_contents = "" if "local-url" in depsdict["repo_contents"][repo_name]: if len(depsdict["repo_contents"][repo_name]["local-url"]): if not depsdict["repo_addr"] and \ depsdict["repo_contents"][repo_name]["local-url"].count("REPO_ADDR") : _actual_url = depsdict["repo_contents"][repo_name][ "original-url"] else: _actual_url = depsdict["repo_contents"][repo_name]["local-url"] else: _actual_url = depsdict["repo_contents"][repo_name]["original-url"] if depsdict["repo_addr"]: _actual_url = _actual_url.replace("REPO_ADDR", depsdict["repo_addr"]) _actual_url = _actual_url.replace("REPO_RELEASE", depsdict["cdistver"]) _actual_url = _actual_url.replace("REPO_MAJOR_RELEASE", depsdict["cdistmajorver"]) _actual_url = _actual_url.replace("REPO_ARCH", depsdict["carch"]) if not check_url(_actual_url, "ARCH", depsdict["carch"]): _tested_urls = _actual_url _actual_url = depsdict["repo_contents"][repo_name]["original-url"] if not check_url(_actual_url, "ARCH", depsdict["carch"]): if not _tested_urls.count(_actual_url): _tested_urls += ',' + _actual_url _actual_url = False if _actual_url: _msg = "Valid URL found: " + _actual_url + "." cbinfo(_msg) else: _msg = "No URLs available for repository \"" + repo_name _msg += "\" (" + _tested_urls + ")." + " Will ignore this repository" _msg += ", but this might cause installation errors due to a lacking on certain dependencies" cbwarn(_msg) return False if depsdict["cdistkind"] == "ubuntu": for _dist in depsdict["repo_contents"][repo_name]["dists"].split(','): for _component in depsdict["repo_contents"][repo_name][ "components"].split(','): _file_contents += "deb " + _actual_url + ' ' + _dist + ' ' + _component + "\n" else: _file_contents += "[" + repo_name + "]\n" _file_contents += "name = " + repo_name + "\n" _file_contents += "baseurl = " + _actual_url + "\n" for _attr in [ "enabled", "skip_if_unavailable", "priority", "gpgcheck" ]: _file_contents += _attr + " = " + depsdict["repo_contents"][ repo_name][_attr] + "\n" if depsdict["repo_contents"][repo_name]["gpgcheck"] == "0": True else: _file_contents += "gpgkey = " + depsdict["repo_contents"][ repo_name]["gpgkey"] + "\n" return _file_contents
else: obj_attr_list["last_known_state"] = "vm creation failed" _fmsg = "Failed to obtain instance's (cloud-assigned) uuid. The " _fmsg += "instance creation failed for some unknown reason." cberr(_fmsg) _status = 100 except CldOpsException, obj: _status = obj.status _fmsg = str(obj.msg) cbwarn("Error during reservation creation: " + _fmsg) except Exception, e: _status = 23 _fmsg = str(e) cbwarn("Error reaching digitalocean: " + _fmsg) finally: if "instance_obj" in obj_attr_list: del obj_attr_list["instance_obj"] if _status: _msg = "VM " + obj_attr_list["uuid"] + " could not be created " _msg += "on DigitalOcean \"" + obj_attr_list[ "cloud_name"] + "\" : " _msg += _fmsg + " (The VM creation will be rolled back)" cberr(_msg) if "cloud_vm_uuid" in obj_attr_list: obj_attr_list[ "mgt_deprovisioning_request_originated"] = int(time())
def deps_file_parser(depsdict, username, options, hostname, process_manager=False): ''' TBD ''' _file_name_list = [] _file_name_list.append(options.defdir + "/PUBLIC_dependencies.txt") _cleanup_repos = False if len(options.wks) > 1: _workloads_list = options.wks.split(',') for _workload in _workloads_list: _file_name_list.append(options.wksdir + '/' + _workload + "/dependencies.txt") _cleanup_repos = True _file_name_list.append(options.defdir + "/IBM_dependencies.txt") _file_name_list.append(options.defdir + "/SPEC_dependencies.txt") if len(options.custom): _file_name_list.append(options.cusdir + '/' + options.custom) print '\n' for _file in _file_name_list: if os.access(_file, os.F_OK): try: _fd = open(_file, 'r') _fc = _fd.readlines() _fd.close() _msg = "##### File \"" + _file + "\" opened and loaded...." cbinfo(_msg) for _line in _fc: _line = _line.strip() if _line.count("#", 0, 2): _sstr = None elif len(_line) < 3: _sstr = None elif _line.count(" = "): _sstr = " = " elif _line.count(" ="): _sstr = " =" elif _line.count("= "): _sstr = "= " elif _line.count("="): _sstr = "=" else: _sstr = None if _sstr: _key, _value = _line.split(_sstr) _key = _key.strip() depsdict[_key] = _value except Exception, e: _msg = "##### Error reading file \"" + _file + "\":" + str(e) cberr(_msg) exit(4) else: _msg = "##### File \"" + _file + "\" IGNORED...." cbwarn(_msg)
def deps_file_parser(depsdict, username, options, hostname, process_manager = False) : ''' TBD ''' _file_name_list = [] _file_name_list.append(options.defdir + "/PUBLIC_dependencies.txt") _cleanup_repos = False if len(options.wks) > 1 : _workloads_list = options.wks.split(',') for _workload in _workloads_list : _file_name_list.append(options.wksdir + '/' + _workload + "/dependencies.txt") _cleanup_repos = True _file_name_list.append(options.defdir + "/IBM_dependencies.txt") _file_name_list.append(options.defdir + "/SPEC_dependencies.txt") if len(options.custom) : _file_name_list.append(options.cusdir + '/' + options.custom) print('\n') for _file in _file_name_list : if os.access(_file, os.F_OK) : try: _fd = open(_file, 'r') _fc = _fd.readlines() _fd.close() _msg = "##### File \"" + _file + "\" opened and loaded...." cbinfo(_msg) for _line in _fc : _line = _line.strip() if _line.count("#",0,2) : _sstr = None elif len(_line) < 3 : _sstr = None elif _line.count(" = ") : _sstr = " = " elif _line.count(" =") : _sstr = " =" elif _line.count("= ") : _sstr = "= " elif _line.count("=") : _sstr = "=" else : _sstr = None if _sstr : _key, _value = _line.split(_sstr) _key = _key.strip() depsdict[_key] = _value except Exception as e : _msg = "##### Error reading file \"" + _file + "\":" + str(e) cberr(_msg) exit(4) else : _msg = "##### File \"" + _file + "\" IGNORED...." cbwarn(_msg) if not len(depsdict) : _msg = "##### None of the files on the list \"" + str(_file_name_list) _msg += "\" contained configuration statements" cberr(_msg) exit(9) if _cleanup_repos : if not process_manager : process_manager = ProcessManagement(hostname) process_manager.run_os_command("sudo rm -rf /tmp/repoupdated", False) return True
def get_cmdline(depkey, depsdict, operation, process_manager=False, exception_if_no_url=False): ''' TBD ''' if operation != "configure": if depsdict[depkey + '-' + operation] == "man": _urls_key = depsdict["cdistnam"] + '-' + depkey + '-' + depsdict[ "carch"] + "-urls-" + depsdict[depkey + '-' + operation] if _urls_key not in depsdict: _urls_key = depsdict[ "cdistkind"] + '-' + depkey + '-' + depsdict[ "carch"] + "-urls-" + depsdict[depkey + '-' + operation] if _urls_key not in depsdict: _urls_key = "common-" + depkey + '-' + depsdict[ "carch"] + "-urls-" + depsdict[depkey + '-' + operation] if _urls_key not in depsdict: _urls_key = False else: _urls_key = depsdict[ "cdistnam"] + '-' + depkey + "-urls-" + depsdict[depkey + '-' + operation] if _urls_key not in depsdict: _urls_key = depsdict[ "cdistkind"] + '-' + depkey + "-urls-" + depsdict[ depkey + '-' + operation] if _urls_key not in depsdict: _urls_key = "common-" + depkey + "-urls-" + depsdict[ depkey + '-' + operation] if _urls_key not in depsdict: _urls_key = False else: _urls_key = False if _urls_key: if len(depsdict[_urls_key]) > 7: _tested_urls = '' _actual_url = False for _url in depsdict[_urls_key].split(','): if depsdict["repo_addr"]: _url = _url.replace("REPO_ADDR", depsdict["repo_addr"]) _url = _url.replace("REPO_RELEASE", depsdict["cdistver"]) _url = _url.replace("REPO_MAJOR_RELEASE", depsdict["cdistmajorver"]) _url = _url.replace("REPO_ARCH", depsdict["carch"]) _url = _url.replace("ARCH", depsdict["carch"].strip()) _url = _url.replace("DISTRO", depsdict["cdistkind"].strip()) _url = _url.replace("USERNAME", depsdict["username"].strip()) if check_url(_url, "ARCH", depsdict["carch"]): _actual_url = _url break else: if not _tested_urls.count(_url): _tested_urls += _url + ',' if not _actual_url: _msg = "##### None of the urls indicated to install \"" + depkey + "\" (" _msg += _tested_urls + ") seem to be functional." if exception_if_no_url: raise Exception(_msg) else: cbwarn(_msg) else: _actual_url = False else: _actual_url = False _actual_cmdline = "" _actual_cmdline_keys = '' if operation == "install": for _sub_step in ["preinstall", "install", "postinstall"]: _commandline_keys = [] _commandline_keys.append(depsdict["cdistnam"] + '-' + depkey + '-' + _sub_step + '-' + depsdict[depkey + '-' + operation]) _commandline_keys.append(depsdict["cdistkind"] + '-' + depkey + '-' + _sub_step + '-' + depsdict[depkey + '-' + operation]) _commandline_keys.append("common-" + depkey + '-' + _sub_step + '-' + depsdict[depkey + '-' + operation]) _x, _y = get_actual_cmdline(_commandline_keys, depsdict, _actual_url) _actual_cmdline_keys += _x + ',' _actual_cmdline += _y + ';' else: _commandline_keys = [depkey + '-' + operation] _x, _y = get_actual_cmdline(_commandline_keys, depsdict, _actual_url) _actual_cmdline_keys += _x + ',' _actual_cmdline += _y + ';' if _actual_cmdline_keys[0] == ',': _actual_cmdline_keys = _actual_cmdline_keys[1:] if _actual_cmdline_keys[-1] == ',': _actual_commandline_keys = _actual_cmdline_keys[0:-1] if _actual_cmdline[0] == ';': _actual_cmdline = _actual_cmdline[1:] if _actual_cmdline[-1] == ';': _actual_cmdline = _actual_cmdline[0:-1] _actual_cmdline = expand_command(_actual_cmdline, depsdict, process_manager) _actual_cmdline_keys = _actual_cmdline_keys.replace(",,", ',') _actual_cmdline_keys = _actual_cmdline_keys.replace(",,", ',') _actual_cmdline_keys = _actual_cmdline_keys.replace("_equal_", '=') _actual_cmdline = _actual_cmdline.replace(";;", ';') _actual_cmdline = _actual_cmdline.replace(";;", ';') _actual_cmdline = _actual_cmdline.replace(";;", ';') _actual_cmdline = _actual_cmdline.replace("_equal_", '=') return _actual_commandline_keys, _actual_cmdline
def vmcreate(self, obj_attr_list) : ''' TBD ''' try : _status = 100 _fmsg = "An error has occurred when creating new Droplet, but no error message was captured" obj_attr_list["cloud_vm_uuid"] = "NA" _instance = False volume = False obj_attr_list["cloud_vm_name"] = "cb-" + obj_attr_list["username"] obj_attr_list["cloud_vm_name"] += '-' + "vm" + obj_attr_list["name"].split("_")[1] obj_attr_list["cloud_vm_name"] += '-' + obj_attr_list["role"] if obj_attr_list["ai"] != "none" : obj_attr_list["cloud_vm_name"] += '-' + obj_attr_list["ai_name"] obj_attr_list["cloud_vm_name"] = obj_attr_list["cloud_vm_name"].replace("_", "-") obj_attr_list["last_known_state"] = "about to connect to DigitalOcean" self.take_action_if_requested("VM", obj_attr_list, "provision_originated") if obj_attr_list["ai"] != "none" : credential_pair = self.osci.pending_object_get(obj_attr_list["cloud_name"], "AI", obj_attr_list["ai"], "credential_pair") else : credential_pair = self.rotate_token(obj_attr_list["cloud_name"]) obj_attr_list["tenant"] = credential_pair.split(":")[0] obj_attr_list["credential"] = credential_pair.split(":")[1] obj_attr_list["credential_pair"] = credential_pair cbdebug("Connecting to DigitalOcean...") self.connect(credential_pair) obj_attr_list["last_known_state"] = "about to send create request" _msg = "Attempting to create a Droplet " _msg += obj_attr_list["imageid1"] _msg += " on DigitalOcean, creating a vm named " _msg += obj_attr_list["cloud_vm_name"] + " (" + obj_attr_list["tenant"] + ")" cbdebug(_msg, True) _msg = "Looking for an existing image named " _msg += obj_attr_list["imageid1"] cbdebug(_msg, True) keys = [] image = False tmp_keys = obj_attr_list["key_name"].split(",") for dontcare in range(0, 2) : for tmp_key in tmp_keys : for key in self.keys[credential_pair] : if tmp_key in [key.name, key.extra["id"]] and key.extra["id"] not in keys : keys.append(key.extra["id"]) if len(keys) == len(tmp_keys) : break cbdebug("Only found " + str(len(keys)) + " keys. Refreshing key list...", True) self.keys[credential_pair] = catalogs.digitalocean[credential_pair].list_key_pairs() if len(keys) != len(tmp_keys) : raise CldOpsException("Not all SSH keys exist. Check your configuration: " + obj_attr_list["key_name"], _status) for dontcare in range(0, 2) : for x in self.images : if x.name == obj_attr_list["imageid1"] or x.id == obj_attr_list["imageid1"] : image = x break if image : break cbdebug("Image is missing. Refreshing image list...", True) self.images = catalogs.digitalocean[credential_pair].list_images() if not image : raise CldOpsException("Image doesn't exist at DigitalOcean. Check your configuration: " + obj_attr_list["imageid1"], _status) cbdebug("Launching new Droplet with hostname " + obj_attr_list["cloud_vm_name"], True) _reservation = catalogs.digitalocean[credential_pair].create_node( image = image, name = obj_attr_list["cloud_vm_name"], size = [x for x in self.sizes if x.id == obj_attr_list["size"]][0], location = [x for x in self.locations if x.id == obj_attr_list["vmc_name"]][0], ex_user_data = self.populate_cloudconfig(obj_attr_list), ex_create_attr={ "ssh_keys": keys, "private_networking" : True } ) _time_mark_prs = int(time()) obj_attr_list["mgt_002_provisioning_request_sent"] = _time_mark_prs - int(obj_attr_list["mgt_001_provisioning_request_originated"]) obj_attr_list["last_known_state"] = "sent create request" cbdebug("Sent command to create node, waiting for creation...", True) if _reservation : obj_attr_list["last_known_state"] = "vm created" sleep(int(obj_attr_list["update_frequency"])) obj_attr_list["cloud_vm_uuid"] = _reservation.uuid cbdebug("Success. New instance UUID is " + _reservation.uuid, True) self.take_action_if_requested("VM", obj_attr_list, "provision_started") _time_mark_prc = self.wait_for_instance_ready(obj_attr_list, _time_mark_prs) self.wait_for_instance_boot(obj_attr_list, _time_mark_prc) obj_attr_list["host_name"] = "unknown" if "instance_obj" in obj_attr_list : del obj_attr_list["instance_obj"] _status = 0 else : obj_attr_list["last_known_state"] = "vm creation failed" _fmsg = "Failed to obtain instance's (cloud-assigned) uuid. The " _fmsg += "instance creation failed for some unknown reason." cberr(_fmsg) _status = 100 if "cloud_vv" in obj_attr_list : _status = 101 obj_attr_list["last_known_state"] = "about to send volume create request" obj_attr_list["cloud_vv_name"] = "cb-" + obj_attr_list["username"] obj_attr_list["cloud_vv_name"] += '-' + "vv" obj_attr_list["cloud_vv_name"] += obj_attr_list["name"].split("_")[1] obj_attr_list["cloud_vv_name"] += '-' + obj_attr_list["role"] if obj_attr_list["ai"] != "none" : obj_attr_list["cloud_vv_name"] += '-' + obj_attr_list["ai_name"] obj_attr_list["cloud_vv_name"] = obj_attr_list["cloud_vv_name"].replace("_", "-") _msg = "Creating a volume, with size " _msg += obj_attr_list["cloud_vv"] + " GB, on VMC \"" _msg += obj_attr_list["vmc_name"] + "\" with name " + obj_attr_list["cloud_vv_name"] + "..." cbdebug(_msg, True) _mark1 = int(time()) volume = catalogs.digitalocean[credential_pair].create_volume(int(obj_attr_list["cloud_vv"]), obj_attr_list["cloud_vv_name"], location = [x for x in self.locations if x.id == obj_attr_list["vmc_name"]][0]) sleep(int(obj_attr_list["update_frequency"])) obj_attr_list["cloud_vv_uuid"] = volume.id _mark2 = int(time()) obj_attr_list["do_015_create_volume_time"] = _mark2 - _mark1 if volume : _mark3 = int(time()) _msg = "Attaching the newly created Volume \"" _msg += obj_attr_list["cloud_vv_name"] + "\" (cloud-assigned uuid \"" _msg += obj_attr_list["cloud_vv_uuid"] + "\") to instance \"" _msg += obj_attr_list["cloud_vm_name"] + "\" (cloud-assigned uuid \"" _msg += obj_attr_list["cloud_vm_uuid"] + "\")" cbdebug(_msg) if not volume.attach(_reservation) : msg = "Volume attach failed. Aborting VM creation..." cbdebug(msg, True) volume.destroy() raise CldOpsException(msg, _status) cbdebug("Volume attach success.", True) _mark4 = int(time()) obj_attr_list["do_015_create_volume_time"] += (_mark4 - _mark3) _status = 0 else : msg = "Volume creation failed. Aborting VM creation..." cbdebug(msg, True) raise CldOpsException(msg, _status) else : obj_attr_list["cloud_vv_uuid"] = "none" except CldOpsException, obj : _status = obj.status _fmsg = str(obj.msg) cbwarn("Error during reservation creation: " + _fmsg)
def get_cmdline(depkey, depsdict, operation, process_manager = False, exception_if_no_url = False) : ''' TBD ''' if operation != "configure" : if depsdict[depkey + '-' + operation] == "man" : _urls_key = depsdict["cdistnam"] + '-' + depkey + '-' + depsdict["carch"] + "-urls-" + depsdict[depkey + '-' + operation] if _urls_key not in depsdict : _urls_key = depsdict["cdistkind"] + '-' + depkey + '-' + depsdict["carch"] + "-urls-" + depsdict[depkey + '-' + operation] if _urls_key not in depsdict : _urls_key = "common-" + depkey + '-' + depsdict["carch"] + "-urls-" + depsdict[depkey + '-' + operation] if _urls_key not in depsdict : _urls_key = False else : _urls_key = depsdict["cdistnam"] + '-' + depkey + "-urls-" + depsdict[depkey + '-' + operation] if _urls_key not in depsdict : _urls_key = depsdict["cdistkind"] + '-' + depkey + "-urls-" + depsdict[depkey + '-' + operation] if _urls_key not in depsdict : _urls_key = "common-" + depkey + "-urls-" + depsdict[depkey + '-' + operation] if _urls_key not in depsdict : _urls_key = False else : _urls_key = False if _urls_key : if len(depsdict[_urls_key]) > 7 : _tested_urls = '' _actual_url = False for _url in depsdict[_urls_key].split(',') : if depsdict["repo_addr"] : _url = _url.replace("REPO_ADDR", depsdict["repo_addr"]) _url = _url.replace("REPO_RELEASE", depsdict["cdistver"]) _url = _url.replace("REPO_MAJOR_RELEASE", depsdict["cdistmajorver"]) _url = _url.replace("REPO_ARCH", depsdict["carch"]) _url = _url.replace("ARCH", depsdict["carch"].strip()) _url = _url.replace("DISTRO", depsdict["cdistkind"].strip()) _url = _url.replace("USERNAME", depsdict["username"].strip()) if check_url(_url, "ARCH", depsdict["carch"]) : _actual_url = _url break else : if not _tested_urls.count(_url) : _tested_urls += _url + ',' if not _actual_url : _msg = "##### None of the urls indicated to install \"" + depkey + "\" (" _msg += _tested_urls + ") seem to be functional." if exception_if_no_url : raise Exception(_msg) else : cbwarn(_msg) else : _actual_url = False else : _actual_url = False _actual_cmdline = "" _actual_cmdline_keys = '' if operation == "install" : for _sub_step in [ "preinstall", "install", "postinstall"] : _commandline_keys = [] _commandline_keys.append(depsdict["cdistnam"] + '-' + depkey + '-' + _sub_step + '-' + depsdict[depkey + '-' + operation]) _commandline_keys.append(depsdict["cdistkind"] + '-' + depkey + '-' + _sub_step + '-' + depsdict[depkey + '-' + operation]) _commandline_keys.append("common-" + depkey + '-' + _sub_step + '-' + depsdict[depkey + '-' + operation]) _x, _y = get_actual_cmdline(_commandline_keys, depsdict, _actual_url) _actual_cmdline_keys += _x + ',' _actual_cmdline += _y + ';' else : _commandline_keys = [ depkey + '-' + operation ] _x, _y = get_actual_cmdline(_commandline_keys, depsdict, _actual_url) _actual_cmdline_keys += _x + ',' _actual_cmdline += _y + ';' if _actual_cmdline_keys[0] == ',' : _actual_cmdline_keys = _actual_cmdline_keys[1:] if _actual_cmdline_keys[-1] == ',' : _actual_commandline_keys = _actual_cmdline_keys[0:-1] if _actual_cmdline[0] == ';' : _actual_cmdline = _actual_cmdline[1:] if _actual_cmdline[-1] == ';' : _actual_cmdline = _actual_cmdline[0:-1] _actual_cmdline = expand_command(_actual_cmdline, depsdict, process_manager) _actual_cmdline_keys = _actual_cmdline_keys.replace(",,",',') _actual_cmdline_keys = _actual_cmdline_keys.replace(",,",',') _actual_cmdline_keys = _actual_cmdline_keys.replace("_equal_",'=') _actual_cmdline = _actual_cmdline.replace(";;",';') _actual_cmdline = _actual_cmdline.replace(";;",';') _actual_cmdline = _actual_cmdline.replace(";;",';') _actual_cmdline = _actual_cmdline.replace("_equal_",'=') return _actual_commandline_keys, _actual_cmdline
def build_repository_file_contents(depsdict, repo_name) : ''' TBD ''' _msg = "Configuring repository \"" + repo_name +"\"..." cbinfo(_msg) _file_contents = "" if "local-url" in depsdict["repo_contents"][repo_name] : if len(depsdict["repo_contents"][repo_name]["local-url"]) : if not depsdict["repo_addr"] and \ depsdict["repo_contents"][repo_name]["local-url"].count("REPO_ADDR") : _actual_url = depsdict["repo_contents"][repo_name]["original-url"] else : _actual_url = depsdict["repo_contents"][repo_name]["local-url"] else : _actual_url = depsdict["repo_contents"][repo_name]["original-url"] if depsdict["repo_addr"] : _actual_url = _actual_url.replace("REPO_ADDR", depsdict["repo_addr"]) _actual_url = _actual_url.replace("REPO_RELEASE", depsdict["cdistver"]) _actual_url = _actual_url.replace("REPO_MAJOR_RELEASE", depsdict["cdistmajorver"]) _actual_url = _actual_url.replace("REPO_ARCH", depsdict["carch"]) if not check_url(_actual_url, "ARCH", depsdict["carch"]) : _tested_urls = _actual_url _actual_url = depsdict["repo_contents"][repo_name]["original-url"] if not check_url(_actual_url, "ARCH", depsdict["carch"]) : if not _tested_urls.count(_actual_url) : _tested_urls += ',' + _actual_url _actual_url = False if _actual_url : _msg = "Valid URL found: " + _actual_url + "." cbinfo(_msg) else : _msg = "No URLs available for repository \"" + repo_name _msg += "\" (" + _tested_urls + ")." + " Will ignore this repository" _msg += ", but this might cause installation errors due to a lacking on certain dependencies" cbwarn(_msg) return False if depsdict["cdistkind"] == "ubuntu" : for _dist in depsdict["repo_contents"][repo_name]["dists"].split(',') : for _component in depsdict["repo_contents"][repo_name]["components"].split(',') : _file_contents += "deb " + _actual_url + ' ' + _dist + ' ' + _component + "\n" else : _file_contents += "[" + repo_name + "]\n" _file_contents += "name = " + repo_name + "\n" _file_contents += "baseurl = " + _actual_url + "\n" for _attr in [ "enabled", "skip_if_unavailable", "priority", "gpgcheck" ] : _file_contents += _attr + " = " + depsdict["repo_contents"][repo_name][_attr] + "\n" if depsdict["repo_contents"][repo_name]["gpgcheck"] == "0" : True else : _file_contents += "gpgkey = " + depsdict["repo_contents"][repo_name]["gpgkey"] + "\n" return _file_contents
def dependency_checker_installer(hostname, depsdict, username, operation, options) : ''' TBD ''' try : _status = 100 _dep_missing = -1 _fmsg = "An error has occurred, but no error message was captured" if len(options.wks) > 1 : if options.wks.count("_ycsb") : options.wks += ",ycsb" if options.wks.count(",ycsb") : options.wks += ",mongo_ycsb,cassandra_ycsb,redis_ycsb" if options.wks.count(",acmeair") : options.wks += ",mongo_acmeair" deps_file_parser(depsdict, username, options, "127.0.0.1") docker_file_parser(depsdict, username, options, "127.0.0.1") preparation_file_parser(depsdict, username, options, "127.0.0.1") if "Filestore_ip" not in depsdict : depsdict["Filestore_ip"], depsdict["Filestore_port"], depsdict["Filestore_username"] = options.filestore.split('-') depsdict["cdistkind"], depsdict["cdistver"], depsdict["cdistmajorver"], depsdict["cdistnam"], depsdict["carch"] = get_linux_distro() depsdict["3rdpartydir"] = options.tpdir depsdict["scriptsdir"] = options.wksdir depsdict["credentialsdir"] = options.creddir depsdict["username"] = username if options.addr : depsdict["repo-addr1"] = options.addr depsdict["pip-addr1"] = options.addr if depsdict["carch"] == "x86_64" : depsdict["carch1"] = "x86_64" depsdict["carch2"] = "x86-64" depsdict["carch3"] = "amd64" elif depsdict["carch"] == "ppc64le" : depsdict["carch1"] = "ppc64le" depsdict["carch2"] = "ppc64" depsdict["carch3"] = "ppc64" else: depsdict["carch1"] = "aarch64" depsdict["carch2"] = "aarch64" depsdict["carch3"] = "aarch64" _missing_dep = [] _dep_list = [0] * 5000 if str(options.addr) != "bypass" : select_url("repo", depsdict) select_url("pip", depsdict) _raise_exception = True else : depsdict["pip_addr"] = None depsdict["repo_addr"] = None _raise_exception = False for _key in depsdict.keys() : if _key.count("-order") : _dependency = _key.replace("-order",'') _order = int(depsdict[_key]) * 20 _dep_list.insert(_order, _dependency) _dep_list = [x for x in _dep_list if x != 0] print '\n' if options.role.count("workload") : options.tag = "base," + options.role _msg = "##### This node will be used to play a role in the Virtual Applications" _msg += " (AIs) \"" + str(options.wks) + "\". Only a subset of the depedencies" _msg += " will be " + operation + "ed. This node cannot be used as an Orchestrator Node\n" _msg += "\n" cbinfo(_msg) else : options.tag = "base," + options.role + ',' + options.clouds _msg = "##### This node will be prepared as an Orchestration Node." _msg += " The full set of dependencies will be " + operation + "ed. " _msg += "\n" cbinfo(_msg) options.tag = options.tag.split(',') _selected_dep_list = [] for _dep in _dep_list : for _tag in options.tag : if _dep + "-tag" in depsdict : _dep_tag_list = depsdict[_dep + "-tag"].split(',') else : _dep_tag_list = [ "workload" ] if _tag in _dep_tag_list : if _dep not in _selected_dep_list : _selected_dep_list.append(_dep) _dep_list = _selected_dep_list _process_manager = ProcessManagement(hostname) _status, _std_out, _y = _process_manager.run_os_command("sudo cat /proc/1/cgroup | grep -c docker", raise_exception = False) if _status : depsdict["indocker"] = False else : if str(_std_out.replace("\n",'')) == '0' : depsdict["indocker"] = False else : depsdict["indocker"] = True _msg = "##### DETECTED OPERATING SYSTEM KIND: " + depsdict["cdistkind"] cbinfo(_msg) _msg = "##### DETECTED OPERATING SYSTEM VERSION: " + depsdict["cdistver"] + " (" + depsdict["cdistmajorver"] + ')' cbinfo(_msg) _msg = "##### DETECTED OPERATING SYSTEM NAME: " + depsdict["cdistnam"] cbinfo(_msg) _msg = "##### DETECTED ARCHITECTURE: " + depsdict["carch"] cbinfo(_msg) _msg = "##### DETECTED RUNNING INSIDE DOCKER: " + str(depsdict["indocker"]) cbinfo(_msg) print '\n' if operation == "configure" : if "repo" in _dep_list : _dep_list.remove("repo") if depsdict["cdistkind"] == "AMI" : _msg = "This node runs the \"" + depsdict["cdistkind"] + "\" Linux " _msg += "distribution. Will treat it as \"rhel\", but will disable" _msg += " the repository manipulation." cbinfo(_msg) depsdict["cdistkind"] = "rhel" if "repo" in _dep_list : _dep_list.remove("repo") if depsdict["carch"].count("ppc") and "mongdob" in _dep_list : _msg = "##### The processors on this node have a \"Power\" architecture." _msg += "Removing MongoDB and Chef (client) from the dependency list" cbwarn(_msg) _dep_list.remove("mongodb") _dep_list.remove("chef-client") if "java" in _dep_list and "oraclejava" in _dep_list : _msg = "Since both \"java\" and \"oraclejava\" are listed as dependencies" _msg += ", only \"oraclejava\" will be used" cbinfo(_msg) _dep_list.remove("java") _dep_list.remove("java-home") _fmsg = "" _dep_missing = 0 for _dep in _dep_list : _status, _msg = execute_command("configure", _dep, depsdict, \ hostname = "127.0.0.1", \ username = username, \ venv = options.venv, \ raise_exception = _raise_exception) if _status : _dep_missing += 1 _missing_dep.append(_dep) cberr(_msg) if operation == "install" : _status, _msg = execute_command("install", _dep, depsdict, \ hostname = "127.0.0.1", \ username = username, \ venv = options.venv, \ raise_exception = _raise_exception) if not _status : _dep_missing -= 1 _missing_dep.remove(_dep) cbinfo(_msg) else : cberr(_msg) else : cbinfo(_msg) _status = _dep_missing _fmsg += ','.join(_missing_dep) except KeyError, e: _status = 22 _fmsg = "Unable to find entry " + str(e) + " in dependencies dictionary. Check you dependencies configuration file(s)"
def vmcreate(self, obj_attr_list) : ''' TBD ''' try : _status = 100 _fmsg = "An error has occurred when creating new Droplet, but no error message was captured" obj_attr_list["cloud_vm_uuid"] = "NA" _instance = False obj_attr_list["cloud_vm_name"] = "cb-" + obj_attr_list["username"] obj_attr_list["cloud_vm_name"] += '-' + "vm" + obj_attr_list["name"].split("_")[1] obj_attr_list["cloud_vm_name"] += '-' + obj_attr_list["role"] if obj_attr_list["ai"] != "none" : obj_attr_list["cloud_vm_name"] += '-' + obj_attr_list["ai_name"] obj_attr_list["cloud_vm_name"] = obj_attr_list["cloud_vm_name"].replace("_", "-") obj_attr_list["last_known_state"] = "about to connect to DigitalOcean" access_token = obj_attr_list["credentials"] cbdebug("Connecting to DigitalOcean...") self.connect(access_token) _time_mark_prs = int(time()) obj_attr_list["mgt_002_provisioning_request_sent"] = _time_mark_prs - int(obj_attr_list["mgt_001_provisioning_request_originated"]) obj_attr_list["last_known_state"] = "about to send create request" _msg = "Attempting to create a Droplet " _msg += obj_attr_list["imageid1"] _msg += " on DigitalOcean, creating a vm named " _msg += obj_attr_list["cloud_vm_name"] cbdebug(_msg, True) _msg = "Looking for an existing image named " _msg += obj_attr_list["imageid1"] cbdebug(_msg, True) image = False for x in self.images : if x.name == obj_attr_list["imageid1"] or x.id == obj_attr_list["imageid1"] : image = x break if not image : cbdebug("Image is missing. Refreshing image list...", True) self.images = catalogs.digitalocean.list_images() for x in self.images : if x.name == obj_attr_list["imageid1"] or x.id == obj_attr_list["imageid1"] : image = x break if not image : raise CldOpsException("Image doesn't exist at DigitalOcean. Check your configuration: " + obj_attr_list["imageid1"], _status) cbdebug("Launching new Droplet with hostname " + obj_attr_list["cloud_vm_name"], True) _reservation = catalogs.digitalocean.create_node( image = image, name = obj_attr_list["cloud_vm_name"], size = [x for x in self.sizes if x.id == obj_attr_list["size"]][0], location = [x for x in self.locations if x.id == obj_attr_list["vmc_name"]][0], ex_user_data = self.populate_cloudconfig(obj_attr_list), ex_create_attr={ "ssh_keys": obj_attr_list["key_name"].split(","), "private_networking" : True } ) obj_attr_list["last_known_state"] = "sent create request" cbdebug("Sent command to create node, waiting for creation...", True) if _reservation : obj_attr_list["last_known_state"] = "vm created" sleep(int(obj_attr_list["update_frequency"])) obj_attr_list["cloud_vm_uuid"] = _reservation.uuid cbdebug("Success. New instance UUID is " + _reservation.uuid, True) self.take_action_if_requested("VM", obj_attr_list, "provision_started") _time_mark_prc = self.wait_for_instance_ready(obj_attr_list, _time_mark_prs) self.wait_for_instance_boot(obj_attr_list, _time_mark_prc) obj_attr_list["host_name"] = "unknown" if "instance_obj" in obj_attr_list : del obj_attr_list["instance_obj"] _status = 0 else : obj_attr_list["last_known_state"] = "vm creation failed" _fmsg = "Failed to obtain instance's (cloud-assigned) uuid. The " _fmsg += "instance creation failed for some unknown reason." cberr(_fmsg) _status = 100 except CldOpsException, obj : _status = obj.status _fmsg = str(obj.msg) cbwarn("Error during reservation creation: " + _fmsg)
_dep_list.remove("repo") if depsdict["cdistkind"] == "AMI" : _msg = "This node runs the \"" + depsdict["cdistkind"] + "\" Linux " _msg += "distribution. Will treat it as \"rhel\", but will disable" _msg += " the repository manipulation." cbinfo(_msg) depsdict["cdistkind"] = "rhel" if "repo" in _dep_list : _dep_list.remove("repo") if depsdict["carch"].count("ppc") and "mongdob" in _dep_list : _msg = "##### The processors on this node have a \"Power\" architecture." _msg += "Removing MongoDB and Chef (client) from the dependency list" cbwarn(_msg) _dep_list.remove("mongodb") _dep_list.remove("chef-client") if "java" in _dep_list and "oraclejava" in _dep_list : _msg = "Since both \"java\" and \"oraclejava\" are listed as dependencies" _msg += ", only \"oraclejava\" will be used" cbinfo(_msg) _dep_list.remove("java") _dep_list.remove("java-home") _fmsg = "" _dep_missing = 0 for _dep in _dep_list :