def test_getitem_server(self): HEADING() filename = config_file("/cloudmesh_server.yaml") config = ConfigDict(filename=filename) print(config) existing = config.get("cloudmesh.server.mongo.db") test1 = existing is not None print("QUERY", existing) print("Port", config.get("cloudmesh.server.mongo.port"))
def test_server(self): HEADING() filename = config_file("/cloudmesh_server.yaml") config = ConfigDict(filename=filename) # print config existing = config.get("cloudmesh.server.mongo.db") test1 = existing is not None print("mongo.db =", existing) try: none_existing = config.get("cloudmesh.server.mongo.xyz") test2 = False except: print("Error") test2 = True assert test1 and test2
def test_launcher(self): HEADING() filename = config_file("/cloudmesh_launcher.yaml") config = ConfigDict(filename=filename) print(config) existing = config.get("cloudmesh.launcher.recipies") test1 = existing is not None print(existing) try: none_existing = config.get("cloudmesh.launcher.recipies.xyz") test2 = False except: print("Error") test2 = True assert test1 and test2
def readRackConfig(self, name, dir_yaml, dir_diag): rack_config = ConfigDict( filename=dir_yaml + "/" + self.default_rack_yaml) self.dict_rack_config = rack_config.get("cloudmesh.rack") lname = name.lower() # the rack name in cloudmesh_rack.yaml MUST be valid/predefined in # clouldmesh_cluster.yaml self.cluster_name = lname if lname in self.cluster_name_list else self.cluster_name_unknown # diag filename and temporary diag filename self.filename_diag = dir_diag + "/" + \ self.dict_rack_config["cluster"][self.cluster_name]["diag"] self.rack_count = self.dict_rack_config[ "cluster"][self.cluster_name]["count"] self.filename_diag_temp = self.filename_diag + ".temp" # additional process for 'all' # its count equals the sum of all other cluster exclude 'all' and # 'unknown' if name == "all": raw_set = set(self.dict_rack_config["cluster"].keys()) exclude_set = set(["all", "unknown"]) all_set = raw_set - exclude_set self.rack_count = 0 for rack in all_set: self.rack_count += self.dict_rack_config[ "cluster"][rack]["count"]
def create_project_from_file(cls, file_path): # implement() # return try: filename = path_expand(file_path) file_config = ConfigDict(filename=filename) except: Console.error("Could not load file, please check filename and its path") return try: project_config = file_config.get("cloudmesh", "project") project = Project() project_id = uuid.uuid4() project_config.update({'project_id': project_id}) update_document(project, project_config) except: Console.error("Could not get project information from yaml file, " "please check you yaml file, users information must be " "under 'cloudmesh' -> 'project' -> project1..." + str(sys.exc_info()[0])) return try: cls.add(project) Console.info("Project created in the database.") except: Console.error("Project creation in database failed, " + str(sys.exc_info())) return
def create_user_from_file(cls, file_path): try: filename = path_expand(file_path) file_config = ConfigDict(filename=filename) except: Console.error("Could not load file, please check filename and its path") return try: user_config = file_config.get("cloudmesh", "user") user_name = user_config['username'] user = User() update_document(user, user_config) except: Console.error("Could not get user information from yaml file, " "please check you yaml file, users information must be " "under 'cloudmesh' -> 'users' -> user1...") return try: if cls.check_exists(user_name) is False: cls.add(user) Console.info("User created in the database.") else: Console.error("User with user name " + user_name + " already exists.") return except: Console.error("User creation in database failed, " + str(sys.exc_info())) return
def readRackConfig(self, name, dir_yaml, dir_diag): rack_config = ConfigDict(filename=dir_yaml + "/" + self.default_rack_yaml) self.dict_rack_config = rack_config.get("cloudmesh.rack") lname = name.lower() # the rack name in cloudmesh_rack.yaml MUST be valid/predefined in # clouldmesh_cluster.yaml self.cluster_name = lname if lname in self.cluster_name_list else self.cluster_name_unknown # diag filename and temporary diag filename self.filename_diag = dir_diag + "/" + \ self.dict_rack_config["cluster"][self.cluster_name]["diag"] self.rack_count = self.dict_rack_config["cluster"][ self.cluster_name]["count"] self.filename_diag_temp = self.filename_diag + ".temp" # additional process for 'all' # its count equals the sum of all other cluster exclude 'all' and # 'unknown' if name == "all": raw_set = set(self.dict_rack_config["cluster"].keys()) exclude_set = set(["all", "unknown"]) all_set = raw_set - exclude_set self.rack_count = 0 for rack in all_set: self.rack_count += self.dict_rack_config["cluster"][rack][ "count"]
def user_apply(): form = UserRegistrationForm(request.form) if request.method == 'POST': #and form.validate(): data = dict(request.form) action = str(data['button'][0]) for key in data: data[key] = data[key][0] del data['button'] if action == 'save': users = Users() user = MongoUser() del data['confirm'] for d in data: user[d] = data[d] users.add(user) flash('Thanks for registering') return redirect('/') user_config = ConfigDict(filename=config_file("/cloudmesh_user_intf.yaml")) user_fields = user_config.get("cloudmesh.user") return render_template('management/user_apply.html', title="User Application", states=['save', 'cancel'], fields=user_fields, countries_list=[c for c in country_list()], roles_list=roles)
def __init__(self, username, filename, Credential, style=2.0, password=None): config = ConfigDict(filename=filename) self.password = password self[username] = {} for cloud in config.get("cloudmesh.clouds").keys(): self[username][cloud] = Credential(username, cloud, filename, style=style, password=self.password) self.encrypt(username, cloud, style)
def view(link=""): """run the browser""" from cloudmesh_base.ConfigDict import ConfigDict server_config = ConfigDict(filename=config_file("/cloudmesh_server.yaml")) host = server_config.get("cloudmesh.server.webui.host") port = server_config.get("cloudmesh.server.webui.port") url_link = "http://{0}:{1}/{2}".format(host, port, link) local("%s %s" % (web_browser, url_link))
def get_ldap_user_from_yaml(): me = ConfigDict(filename=config_file("/me.yaml")) d = {} for element in ["firstname", "lastname", "email", "phone", "address"]: d[element] = me.get("profile.{0}".format(element)) d["cm_user_id"] = me.get("portalname") d["gidNumber"] = 0 d["uidNumber"] = 0 if "gidNumber" in me.keys(): d["gidNumber"] = me.get("gidNumber") if "uidNumber" in me.keys(): d["uidNumber"] = me.get("uidNumber") d["projects"] = me.get("projects") # # copy the keys # d['keys'] = me.get("keys.keylist") return d
def project_apply(): form = ProjectRegistrationForm(request.form) if request.method == 'POST' and form.validate(): data = dict(request.form) action = str(data['button'][0]) print "Project Data" print data for key in data: if key in [ 'agreement_use', 'agreement_slides', 'agreement_support', 'agreement_software', 'agreement_documentation', 'join_open', 'join_notification' ]: if str(data[key][0]) == 'y': data[key] = True elif str(data[key][0]) == 'n': data[key] = False elif key in [ 'category', 'managers', 'resources_clusters', 'alumni', 'resources_provision', 'resources_services', 'members', 'keywords' ]: print data[key][0] else: data[key] = data[key][0] print str(key) + " - " + str(data[key]) + "\n" del data['button'] if action == 'save': projects = Projects() project = MongoProject() for d in data: project[d] = data[d] print project projects.add(project) flash('Thanks for registering') return redirect('/') project_config = ConfigDict( filename=config_file("/cloudmesh_project_intf.yaml")) project_fields = project_config.get("cloudmesh.project") return render_template('management/project_apply.html', title="Project Application", states=['save', 'cancel'], fields=project_fields, countries_list=[c for c in country_list()], states_list=[c for c in states_list()], disciplines_list=[c for c in disciplines_list()])
def display_launch_table(): """ To satisfy Pylint. Will update with proper comments """ if request.method == 'POST': print "HHHHHH", request.form.keys() for key in request.form.keys(): print key, ":", request.form[key] else: print "HEY JUDE" launcher_config = ConfigDict( filename=config_file("/cloudmesh_launcher.yaml")) launch_recipies = launcher_config.get("cloudmesh.launcher.recipies") return render_template('mesh/mesh_launch.html', recipies=launch_recipies)
def shell_command_open_web(arguments): """ :: Usage: web [--fg|--cm] [LINK] Arguments: LINK the link on the localhost cm server is opened. Options: -v verbose mode --fg opens a link on the FG portal --cm opens a link on the CM portal Description: Opens a web page with the specified link """ link = arguments["LINK"] if link is None or link == "/": link = "" web_browser = "firefox" if sys.platform == 'darwin': web_browser = "open" if arguments["--fg"]: location = "https://portal.futuregrid.org" elif arguments["--cm"]: location = "https://cloudmesh.futuregrid.org" else: try: filename = config_file("/cloudmesh_server.yaml") server_config = ConfigDict(filename=filename) host = server_config.get("cloudmesh.server.webui.host") port = server_config.get("cloudmesh.server.webui.port") location = "http://{0}:{1}".format(host, port) except Exception, e: print("ERROR: some error reading from the config file") print(e) return
def project_apply(): form = ProjectRegistrationForm(request.form) if request.method == 'POST' and form.validate(): data = dict(request.form) action = str(data['button'][0]) print "Project Data" print data for key in data: if key in ['agreement_use','agreement_slides','agreement_support', 'agreement_software','agreement_documentation', 'join_open', 'join_notification']: if str(data[key][0]) == 'y': data[key] = True elif str(data[key][0]) == 'n': data[key] = False elif key in ['category', 'managers', 'resources_clusters', 'alumni', 'resources_provision', 'resources_services', 'members', 'keywords']: print data[key][0] else: data[key] = data[key][0] print str(key)+" - "+str(data[key])+"\n" del data['button'] if action == 'save': projects = Projects() project = MongoProject() for d in data: project[d] = data[d] print project projects.add(project) flash('Thanks for registering') return redirect('/') project_config = ConfigDict(filename=config_file("/cloudmesh_project_intf.yaml")) project_fields = project_config.get("cloudmesh.project") return render_template('management/project_apply.html', title="Project Application", states=['save', 'cancel'], fields=project_fields, countries_list=[c for c in country_list()], states_list = [c for c in states_list()], disciplines_list = [c for c in disciplines_list()])
def connect(self, **kwargs): """ The method is used to get a connection to a specified database. The hostname and port is being read from the cloudmesh_server.yaml file. If the username and password is not mentioned as part of the command line arguments, the details are read from the cloudmesh_server.yaml file. If the details are not available in the yaml file, it tried to connect without the credentials. :param kwargs: Can contain credentials for authentication to the database. :return: Connection object to the specified server. """ self.DB_CONFIG = {} config = ConfigDict(filename=config_file("/cloudmesh_server.yaml")) mongo_config = config.get("cloudmesh", "server", "mongo") # self.DB_CONFIG["host"] = mongo_config["host"] self.DB_CONFIG["port"] = int(mongo_config["port"]) self.DB_CONFIG["username"] = mongo_config["username"] self.DB_CONFIG["password"] = mongo_config["password"] # for key, value in kwargs.iteritems(): if key == "user_name": if value: self.DB_CONFIG["username"] = value elif key == "pwd": if value: self.DB_CONFIG["password"] = value if self.DB_CONFIG["username"] and self.DB_CONFIG["password"]: uri = "mongodb://{0}:{1}@{2}:{3}".format( self.DB_CONFIG["username"], self.DB_CONFIG["password"], self.DB_CONFIG["host"], self.DB_CONFIG["port"]) else: uri = "mongodb://{0}:{1}".format(self.DB_CONFIG["host"], self.DB_CONFIG["port"]) try: return MongoClient(uri) except: Console.error( "Failed to connect to Mongoclient DB. May be an authentication issue.\n\t " ) pass
def connect(self, **kwargs): """ The method is used to get a connection to a specified database. The hostname and port is being read from the cloudmesh_server.yaml file. If the username and password is not mentioned as part of the command line arguments, the details are read from the cloudmesh_server.yaml file. If the details are not available in the yaml file, it tried to connect without the credentials. :param kwargs: Can contain credentials for authentication to the database. :return: Connection object to the specified server. """ self.DB_CONFIG = {} config = ConfigDict(filename=config_file("/cloudmesh_server.yaml")) mongo_config = config.get("cloudmesh", "server", "mongo") # self.DB_CONFIG["host"] = mongo_config["host"] self.DB_CONFIG["port"] = int(mongo_config["port"]) self.DB_CONFIG["username"] = mongo_config["username"] self.DB_CONFIG["password"] = mongo_config["password"] # for key, value in kwargs.iteritems(): if key == "user_name": if value: self.DB_CONFIG["username"] = value elif key == "pwd": if value: self.DB_CONFIG["password"] = value if self.DB_CONFIG["username"] and self.DB_CONFIG["password"]: uri = "mongodb://{0}:{1}@{2}:{3}".format(self.DB_CONFIG["username"], self.DB_CONFIG["password"], self.DB_CONFIG["host"], self.DB_CONFIG["port"]) else: uri = "mongodb://{0}:{1}".format(self.DB_CONFIG["host"], self.DB_CONFIG["port"]) try: return MongoClient(uri) except: Console.error("Failed to connect to Mongoclient DB. May be an authentication issue.\n\t ") pass
def password(): user_config = cm_config(filename=config_file("/cloudmesh.yaml")) user = user_config.cloud('india')['credentials'] server_config = ConfigDict(filename=config_file("/cloudmesh_server.yaml")) server = server_config.get('cloudmesh.server.keystone.india') print(" ".join([ "keystone", "--os-username", server['OS_USERNAME'], "--os-password", server['OS_PASSWORD'], "--os-tenant-name", server['OS_TENANT_NAME'], "--os-auth-url", server['OS_AUTH_URL'], "user-password-update", "--pass", user['OS_PASSWORD'], user['OS_USERNAME'] ])) Shell.keystone("--os-username", server['OS_USERNAME'], "--os-password", server['OS_PASSWORD'], "--os-tenant-name", server['OS_TENANT_NAME'], "--os-auth-url", server['OS_AUTH_URL'], "user-password-update", "--pass", user['OS_PASSWORD'], user['OS_USERNAME'])
def password(): user_config = cm_config(filename=config_file("/cloudmesh.yaml")) user = user_config.cloud('india')['credentials'] server_config = ConfigDict(filename=config_file("/cloudmesh_server.yaml")) server = server_config.get('cloudmesh.server.keystone.india') print(" ".join(["keystone", "--os-username", server['OS_USERNAME'], "--os-password", server['OS_PASSWORD'], "--os-tenant-name", server['OS_TENANT_NAME'], "--os-auth-url", server['OS_AUTH_URL'], "user-password-update", "--pass", user['OS_PASSWORD'], user['OS_USERNAME']])) Shell.keystone("--os-username", server['OS_USERNAME'], "--os-password", server['OS_PASSWORD'], "--os-tenant-name", server['OS_TENANT_NAME'], "--os-auth-url", server['OS_AUTH_URL'], "user-password-update", "--pass", user['OS_PASSWORD'], user['OS_USERNAME'])
import hostlist import progress import clean import mq PROGRESS.set('Cloudmesh Services', 50) __all__ = [ 'start', 'stop', 'list', 'clean', 'gui', 'monitor', 'kill', 'ls', 'lspbs', 'flower_server' ] celery_config = ConfigDict(filename=config_file("/cloudmesh_celery.yaml"), kind="worker") workers = celery_config.get("cloudmesh.workers") debug = True try: debug = cm_config_server().get("cloudmesh.server.debug") except: pass if debug: progress.off() else: progress.on() """ for worker in workers: workers[worker] = {"app":"cloudmesh.launcher{0}.queue", "hostlist":hostlist.expand_hostlist("l[1-{0}]".format(workers[worker])),
class OpenPBS(object): # ################################################################# # INITIALIZATION # ################################################################# def __init__(self, deploy=True, yaml_filename="/cloudmesh_pbs.yaml"): """ Creates an object instance of communication with pbs batch queues running on multiple hosts. It also is used to create some configuration parameters if deploy is set to True. it creates in the CLOUDMESH deploy directory the directory pbs and puts the dababse file pbs.db and the file that is used to store the current job number. The job number is shared among all resources and supposed to be unique. :param deploy: If True, creates the configuration files :param yaml_filename: The cloudmesh pbs yaml file. Defaults to cloudmesh_pbs.yaml :return: an object instance of OpenPBS """ self.yaml_filename = config_file(yaml_filename) self.pbs_dir = config_file("/pbs") self.id_file = config_file("/pbs/id.txt") self.db_file = config_file("/pbs/pbs.db") if deploy: self.deploy() self.load() self.id = self.jobid self.pbs_nodes_data = None def info(self): """ Prints some elementary information about the configuration of the OpenPBS instance. :return: """ print("{:>20} = {:}".format("Config Dir", self.pbs_dir)) print("{:>20} = {:}".format("Job ID file", self.id_file)) print("{:>20} = {:}".format("Db file", self.db_file)) def load(self, yaml_filename=None): """ Loads the cloudmesh pbs yaml file. :param yaml_filename: The filename of the yaml file """ log.debug("PBS yaml filename: {0}".format(self.yaml_filename)) if yaml_filename is None: yaml_filename = self.yaml_filename else: self.yaml_filename = config_file(yaml_filename) self.data = ConfigDict(filename=self.yaml_filename) self.hosts = ssh_config() def deploy(self, force=True): """ Copies the yaml file from etc in the distribution to the .cloudmesh directory. If the file exits it will not be copied and a warning is thrown. If the file is the same as in etc no warning is thrown. :param force: Forgot what this does, please document. """ # setup ~/.cloudmesh/pbs log.debug(self.pbs_dir) if not os.path.isdir(self.pbs_dir): os.makedirs(self.pbs_dir) self._load_jobid() xcopy("../etc/", config_file(""), "*.yaml", force=force) # ################################################################# # JOB ID COUNTER # ################################################################# def _load_jobid(self): """ internal method that loads the job id from the job id file. :return: the string of the job id """ try: with open(self.id_file, "r") as f: content = f.read() self.id = content.strip() except: self.jobid = 0 return self.id def _write_jobid(self, id): """ Internal method that overwrites the job id to the specified id. :param id: the job id :return: the string of the id """ log.debug("CCC:" + self.id_file) if not os.path.isfile(self.id_file): open('file', 'w').close() with open(self.id_file, "w+") as text_file: text_file.write('%s' % id) return id @property def jobid(self): """ The job id :return: The string of the job id """ return self._load_jobid() @jobid.setter def jobid(self, value): """ sets the job id to the given value :param value: The value of the jobid """ self._write_jobid(value) def jobid_incr(self): """ increments the job id by 1 """ id = self.jobid id = int(id) + 1 self.jobid = id # ################### # GET DATA # ################### def __str__(self): """ Returns the json object of the dict as string NOTE: should probably use json.dumps :return: the string representation of teh job data """ return self.data.json() def servers(self): """ List of the servers as defined in the .ssh/config file as dict :return: the dict representing the servers """ return self.data["cloudmesh"]["pbs"].keys() def queues(self, server): """ List the queues dict of the given server :param server: :return: """ server = pbs.data["cloudmesh"]["pbs"][server] if "queues" in server: return server["queues"] else: return None # # QSTAT # @classmethod def qstat_xml_to_dict(cls, xmldata): """ Internal method that converst a qsta xml representation to a dict. :param xmldata: The xml data from qstat -x :return: a dict representation of the data """ info = {} try: xmldoc = minidom.parseString(xmldata) itemlist = xmldoc.getElementsByTagName('Job') for item in itemlist: job = {} for attribute in item.childNodes: if len(attribute.childNodes) == 1: job[attribute.nodeName] = attribute.firstChild.nodeValue else: job[attribute.nodeName] = {} for subchild in attribute.childNodes: job[attribute.nodeName][ subchild.nodeName] = subchild.firstChild.nodeValue if attribute.nodeName in ['Job_Owner']: (name, host) = job[attribute.nodeName].split('@') job[u'cm_user'] = name job[u'cm_host'] = host info[job['Job_Id']] = job except: pass try: xmldoc = minidom.parseString(xmldata) itemlist = xmldoc.getElementsByTagName('Job') for item in itemlist: job = {} for attribute in item.childNodes: if len(attribute.childNodes) == 1: job[attribute.nodeName] = attribute.firstChild.nodeValue else: job[attribute.nodeName] = {} for subchild in attribute.childNodes: job[attribute.nodeName][ subchild.nodeName] = subchild.firstChild.nodeValue if attribute.nodeName in ['Job_Owner']: (name, host) = job[attribute.nodeName].split('@') job[u'cm_user'] = name job[u'cm_host'] = host info[job['Job_Id']] = job except: pass return info def qstat(self, host, user=True, format='dict'): """ executes the qstat command on a particular host and returns the information as dict. :param host: The host as specified in ~/.ssh/config :param user: If True, only retirns information for the user If False, all jobs for all users are returned :param format: :return: """ data = None username = self.username(host) manager_host = self.manager(host) xml_data = Shell.ssh(manager_host, "qstat", "-x").rstrip() if format == 'dict': data = OpenPBS.qstat_xml_to_dict(xml_data) selected_data = {} for jobid in data: (owner, cm_host) = data[jobid]['Job_Owner'].split('@') if not user: selected_data[unicode(jobid)] = data[unicode(jobid)] elif owner == username: selected_data[unicode(jobid)] = data[unicode(jobid)] data = selected_data for jobid in data: data[unicode(jobid)][u"cm_jobid"] = jobid if "Variable_list" in data[unicode(jobid)]: data[unicode(jobid)][u"cm_Variable_list"] = self.variable_list(data, jobid) elif format == "xml": if user is not None: print("WARNING: " "restrictiong xml data for a user not supported.") data = xml_data return dict(data) def username(self, host): """ The username of the host as specified in ~/.ssh/conf :param host: The name of the host :return: The username """ return self.hosts.username(host) def manager(self, host): """ In some cases a queue of another machine is hosted through a management node different from that machine. The manager command allows us to specify on which machine the qstat command is issued. :param host: The name of the host :return: The name of the management host """ try: m = self.data.get("cloudmesh", "pbs", host, "manager") except: print("WARNING: Manager not defined for", host) print(" Using the host") m = host return m def database_filename(self): """ The name of the cloudmesh_job file :return: """ return path_expand(self.data.get("cloudmesh", "pbsdatabase", "filename")) def _write_to_file(self, script, filename): """ Internal function to write a pbs script to a file :param script: content of the script :param filename: filename """ with open(filename, "w") as text_file: text_file.write('%s' % script) def db_jobstatus(self, host, jobid, kind='dict'): """This method is not yet implemented""" return {} def jobstatus(self, host, jobid, kind='dict'): """ The status of a specific job :param host: The host on which the job is running :param jobid: The jobid as specified by the queing system :param kind: The output can be returned as dict, xml, and yaml :return: """ manager_host = self.manager(host) qstat_xml_data = Shell.ssh(manager_host, "qstat", "-x", jobid).rstrip() if kind == 'xml': r = qstat_xml_data else: r = self.qstat_xml_to_dict(qstat_xml_data) r[unicode(jobid)][u"cm_jobid"] = self.jobid r[unicode(jobid)]["cm_Variable_list"] = self.variable_list(r) if kind == 'yaml': r = yaml.dump(r, default_flow_style=False) return r @classmethod def list(cls, data, attributes): """ Internal function to lists the information in the data dict :param data: The data directory :param attributes: the attribute to return :return: the content found for he specified attribute """ content = {} for jobid in data: content[jobid] = {} for attribute in attributes: try: content[jobid][attribute] = data[jobid][attribute] except: content[jobid][attribute] = "None" return content def qsub(self, name, host, script, template=None, kind="dict"): """ Executes the qsub command on a given host. NOTE this method may not yet be fully implemented :param name: name of the script :param host: host on which the script is to be run :param script: The name of the script :param template: The script is wrapped into a template :param kind: The return is passed as dict, yaml, xml :return: """ self.jobid_incr() jobscript = self.create_script(name, script, template) # copy the script to the remote host self._write_to_file(jobscript, name) # copy script to remote host remote_path = self.data.get("cloudmesh", "pbs", host, "scripts") print(remote_path) xmkdir(host, remote_path) manager_host = self.manager(host) # call qsub on the remot host r = Shell.scp(name, manager_host + ":" + remote_path) jobid = Shell.ssh(manager_host, "qsub {0}/{1}".format(remote_path, name)).rstrip() return self.jobstatus(host, jobid, kind=kind) def getid(self, data): key = data.keys()[0] return key @classmethod def variable_list(cls, data, id=None): """ Internal function to list the variables of a qstat job which is 'Variable_List' :param data: The input data value :param id: The id of the job :return: """ if id is None: key = data.keys()[0] else: key = id var_list = data[key]['Variable_List'].split(',') d = {} for element in var_list: (attribute, value) = element.split('=') d[attribute] = value return d def create_script(self, name, script, template=None): # BUG if template is None: template_script = script data = {'script': script, 'name': name} result = template.format(**data) return result def read_script(self, filename, user=None, host='localhost'): if host in ['localhost'] and user is None: with file(filename) as f: content = f.read() else: # TODO: copy file from remote host print("ERROR: not implemented") pass return content def anodes(self, host, refresh=True): pass def nodes(self, host, refresh=True): """ returns the information from the command pbsnodes in a dict. :param host: the name of the host as specified in the .ssh/config file :param refresh: if False, reads returns a cached value if True, issues a new command and refreshes the cach :return: information of the pbsnodes command in a dict """ manager_host = self.manager(host) if self.pbs_nodes_data is None or refresh: try: result = Shell.ssh(manager_host, "pbsnodes", "-a") except: raise RuntimeError( "can not execute pbs nodes on host {0}".format(manager_host)) pbsinfo = {} nodes = result.split("\n\n") for node in nodes: pbs_data = node.split("\n") pbs_data = [e.strip() for e in pbs_data] name = pbs_data[0] if name != "": pbsinfo[name] = {u'name': name} for element in pbs_data[1:]: try: (attribute, value) = element.split(" = ") if attribute == 'status': status_elements = value.split(",") pbsinfo[name][attribute] = {} for e in status_elements: (a, v) = e.split("=") pbsinfo[name][attribute][a] = v elif attribute == 'jobs': pbsinfo[name][attribute] = value.split(',') elif attribute == 'note' and ( value.strip().startswith("{") or value.strip().startswith("[")): pbsinfo[name][attribute] = literal_eval(value) else: pbsinfo[name][attribute] = value except: pass self.pbs_nodes_data = pbsinfo return self.pbs_nodes_data def nodes_sum(self, host): sum = 0 distribution = self.nodes_distribution(host) for key in distribution: i = int(distribution[key]) sum = sum + i return sum def nodes_distribution(self, host): """prints the distribution of services""" manager_host = self.manager(host) def pbsnodes_data(manager_host): result = str( Shell.ssh(manager_host, "pbsnodes", "-l", "-n"))[:-1] return result empty = ["", "", ""] x = [x.split() for x in pbsnodes_data(manager_host).split("\n")] # Fill missing values r = [] for line in x: new = ["unkown", "unkown", "unkown"] for i in range(0, len(line)): try: new[i] = line[i] except: pass r.append(new) # just taking column 2 x = [x[2] for x in r] # print "GFKHFJH ", x cnt = Counter(x) # print "COUNT", result = dict(cnt) return result
def do_launcher(self, args, arguments): """ :: Usage: launcher start MENU launcher stop STACK_NAME launcher list launcher show STACK_NAME launcher menu [--column=COLUMN] [--format=FORMAT] launcher import [FILEPATH] [--force] launcher export FILEPATH launcher help | -h An orchestration tool with Chef Cookbooks Arguments: MENU Name of a cookbook STACK_NAME Name of a launcher FILEPATH Filepath COLUMN column name to display FORMAT display format (json, table) help Prints this message Options: -v verbose mode """ log.info(arguments) self.cm_mongo = cm_mongo() self.cm_config = cm_config() self.cm_user = cm_user() self._id = "t_stacks" if arguments["help"] or arguments["-h"]: print(self.do_launcher.__doc__) elif arguments['show'] and arguments['STACK_NAME']: print("NOT IMPLEMENTED") return elif arguments['menu']: userid = self.cm_config.username() launchers = self.cm_mongo.launcher_get(userid) if launchers.count() == 0: Console.warning( "no launcher in database, please import launcher first" "(launcher import [FILEPATH] [--force])") return else: d = {} for launcher in launchers: d[launcher['cm_launcher']] = launcher if "_id" in d[launcher['cm_launcher']]: del d[launcher['cm_launcher']]['_id'] columns = None if arguments['--column']: if arguments['--column'] != "all": columns = [ x.strip() for x in arguments['--column'].split(',') ] else: columns = ['name', 'description'] if arguments['--format']: if arguments['--format'] not in ['table', 'json', 'csv']: Console.error("please select printing format ", "among table, json and csv") return else: p_format = arguments['--format'] else: p_format = None shell_commands_dict_output(userid, d, print_format=p_format, firstheader="launcher", header=columns # vertical_table=True ) elif arguments['list']: userid = self.cm_config.username() self.cm_mongo.activate(userid) self.cm_mongo.refresh(cm_user_id=userid, types=[self._id]) stacks = self.cm_mongo.stacks(cm_user_id=userid) launchers = self.filter_launcher(stacks, { "search": "contain", "key": "stack_name", "value": "launcher" }) log.debug(launchers) d = {} for k0, v0 in launchers.iteritems(): for k1, v1 in launchers[k0].iteritems(): d[v1['id']] = v1 columns = [ 'stack_name', 'description', 'stack_status', 'creation_time', 'cm_cloud' ] if arguments['--column'] and arguments['--column'] != "all": columns = [x.strip() for x in arguments['--column'].split(',')] if arguments['--format']: if arguments['--format'] not in ['table', 'json', 'csv']: Console.error( "please select printing format among table, json and csv" ) return else: p_format = arguments['--format'] else: p_format = None shell_commands_dict_output(userid, d, print_format=p_format, firstheader="launcher_id", header=columns # vertical_table=True ) elif arguments['start'] and arguments['MENU']: userid = self.cm_config.username() def_cloud = self.get_cloud_name(userid) self.cm_mongo.activate(userid) config_launcher = cm_config_launcher() userinfo = self.cm_user.info(userid) if "key" in userinfo["defaults"]: key = userinfo["defaults"]["key"] elif len(userinfo["keys"]["keylist"].keys()) > 0: key = userinfo["keys"]["keylist"].keys()[0] if key: keycontent = userinfo["keys"]["keylist"][key] if keycontent.startswith('key '): keycontent = keycontent[4:] cm_keys_mongo(userid).check_register_key( userid, def_cloud, key, keycontent) keynamenew = _keyname_sanitation(userid, key) else: Console.warning("No sshkey found. Please Upload one") cookbook = arguments['MENU'] s_name = "launcher-{0}-{1}-{2}".format(userid, cookbook, get_rand_string()) dummy = "123456789" # doing nothing. just for test try: t_url = \ config_launcher['cloudmesh']['launcher']['default']['template'] except: # If key is missing (KeyError), new cloudmesh_launcher.yaml # needs to be copied to ~/.cloudmesh t_url = \ "https://raw.githubusercontent.com/cloudmesh/cloudmesh/master/heat-templates/centos6/launcher/launcher.yaml" param = { 'KeyName': keynamenew, 'Cookbook': cookbook, 'dummy': dummy } # test for openmpi, hadoop if cookbook[:6] == "hadoop" or cookbook[:7] == "openmpi": privatekey, publickey = generate_keypair() try: t_url = \ config_launcher['cloudmesh']['launcher']['recipes'][cookbook]['template'] except: # If key is missing (KeyError), new cloudmesh_launcher.yaml # needs to be copied to ~/.cloudmesh t_url = \ ("https://raw.githubusercontent.com/cloudmesh/cloudmesh/master/heat-templates/ubuntu-14.04/" + str(cookbook) + "-cluster/" + str(cookbook) + "-cluster.yaml") param = { 'KeyName': keynamenew, 'PublicKeyString': publickey, 'PrivateKeyString': privatekey } if cookbook[:9] == "hadoop2.7": param["UserName"] = userid log.debug(def_cloud, userid, s_name, t_url, param) res = self.cm_mongo.stack_create(cloud=def_cloud, cm_user_id=userid, servername=s_name, template_url=t_url, parameters=param) log.debug(res) if 'error' in res: print(res['error']['message']) return res elif arguments['stop'] and arguments['STACK_NAME']: userid = self.cm_config.username() def_cloud = self.get_cloud_name(userid) s_id = arguments['STACK_NAME'] self.cm_mongo.activate(userid) res = self.cm_mongo.stack_delete(cloud=def_cloud, cm_user_id=userid, server=s_id) log.debug(res) return res elif arguments['import']: filepath = config_file("/cloudmesh_launcher.yaml") if arguments['FILEPATH']: filepath = arguments['FILEPATH'] try: filename = path_expand(filepath) fileconfig = ConfigDict(filename=filename) except Exception, err: Console.error( "error while loading '{0}', please check".format(filepath)) print(traceback.format_exc()) print(sys.exc_info()[0]) return try: recipes_dict = fileconfig.get("cloudmesh", "launcher", "recipies") except: Console.error("error while loading recipies from the file") # print recipes_dict userid = self.cm_config.username() launcher_names = [] launchers = self.cm_mongo.launcher_get(userid) for launcher in launchers: launcher_names.append(launcher['cm_launcher'].encode("ascii")) for key in recipes_dict: if key in launcher_names: if arguments['--force']: self.cm_mongo.launcher_remove(userid, key) self.cm_mongo.launcher_import(recipes_dict[key], key, userid) print("launcher '{0}' overwritten.".format(key)) else: print("ERROR: launcher '{0}' exists, " "please remove it first, or use " "'--force' when adding".format(key)) else: self.cm_mongo.launcher_import(recipes_dict[key], key, userid) print("launcher '{0}' added.".format(key))
def readClustersConfig(self, dir_yaml): clusters_config = ConfigDict( filename=dir_yaml + "/" + self.default_clusters_yaml) self.dict_clusters_config = clusters_config.get("cloudmesh.inventory") # get all possible cluster names from dict_clusters_config self.cluster_name_list += self.dict_clusters_config.keys()
class CredentialFromYaml(CredentialBaseClass): password = None def __init__(self, username, cloud, datasource=None, yaml_version=2.0, style=2.0, password=None): """datasource is afilename""" CredentialBaseClass.__init__(self, username, cloud, datasource) self.password = password if datasource is not None: self.filename = datasource else: self.filename = "~/.cloudmesh/cloudmesh.yaml" self.config = ConfigDict(filename=self.filename) self.read(username, cloud, style=style) def read(self, username, cloud, style=2.0): self.style = style self['cm'] = {} self['cm']['source'] = 'yaml' self['cm']['filename'] = self.filename self['cm']['kind'] = self.config.get("meta.kind") self['cm']['yaml_version'] = self.config.get("meta.yaml_version") kind = self['cm']['kind'] if kind == "clouds": self['cm']['filename'] = "~/.cloudmesh/cloudmesh.yaml" self.update(self.config.get("cloudmesh.clouds.{0}".format(cloud))) elif kind == "server": self['cm']['filename'] = "~/.cloudmesh/cloudmesh_server.yaml" self.update( self.config.get("cloudmesh.server.keystone.{0}".format(cloud))) else: log.error("kind wrong {0}".format(kind)) self['cm']['username'] = username self['cm']['cloud'] = cloud self.clean_cm() self.transform_cm(self['cm']['yaml_version'], style) # self.remove_default() def clean_cm(self): '''temporary so we do not have to modify yaml files for now''' for key in self.keys(): if key.startswith('cm_'): new_key = key.replace('cm_', '') self['cm'][new_key] = self[key] del self[key] """ def remove_default(self): if 'default' in self.keys(): del self['default'] """ def transform_cm(self, yaml_version, style): if yaml_version <= 2.0 and style == 2.0: for key in self['cm']: new_key = 'cm_' + key self[new_key] = self['cm'][key] del self['cm']
class Test_cloudmesh: d = { "portalname": "gvonlasz" } filename = "etc/cloudmesh.yaml" def setup(self): self.t = cm_template(path_expand(self.filename)) self.user = cm_user() try: self.setup_inventory() except: print("=" * 40) print("setup_inventory() failed. ldap test will not be performed") print("=" * 40) def tearDown(self): os.system("fab test.info:user") pass def test_variables(self): HEADING() print(self.t.variables()) assert "portalname" in self.t.variables() def test_replace_incomplete(self): try: print(self.t.replace(self.d, format="dict")) except: pass assert True def test_user(self): d = { "portalname": "gvonlasz", "sierra_openstack_password": "******", "project_default": "fg82", "india_openstack_password": "******", "projects": "82, 83", # this is still wrong } print(self.t.replace(d=d)) # self.t.complete does not exist in cm_template? # assert self.t.complete # if not t.complete(): # print "ERROR: undefined variables" # print t.variables() def test_userinfo(self): HEADING() print(self.user.info("fuwang")) print(self.user.info("gvonlasz")) print(self.user.info("nonexistuser")) print(self.user.info("nova")) print(self.user.info("fuwang", ["sierra"])) print(self.user.info("fuwang", ["cloud-non-exist"])) print("============================") pprint(self.user["gvonlasz"]) print(self.user.get_name('gvonlasz')) def setup_inventory(self): banner("Read Dicts") self.sample_user = ConfigDict(filename=config_file("/me.yaml")) self.portalname = self.sample_user.get("portalname") print("PORTALNAME", self.portalname) print("SAMPLE USER", self.sample_user) banner("create user from template, duplicates cm init generate me") t = cm_template(config_file("/etc/cloudmesh.yaml")) pprint(set(t.variables())) self.config = t.replace(kind="dict", values=self.sample_user) print(type(self.config)) print(self.config) # # BUG? # self.idp = cm_userLDAP() self.idp.connect("fg-ldap", "ldap") self.idp.refresh() ldap_info = self.idp.get(self.portalname) print(ldap_info) print(type(self.config)) self.config['cloudmesh']['projects'] = ldap_info['projects'] self.config['cloudmesh']['keys'] = ldap_info['keys'] try: self.config['cloudmesh']['projects'][ 'deafult'] = ldap_info['projects']['active'][0] except: print("ERROR: you have no projects") def test_print(self): pprint(self.config) def test_projects(self): projects = dict() keys = dict() # read the yaml # read projects info from ldap # write out new dict/json file pass def test_keys(self): # read the yaml # read KEYS from ldap # write out new dict/json file pass def test_gregor(self): banner("ME") id = ConfigDict(filename=config_file("/me.yaml")).get("portalname") user = cm_user() result = user.info(id) pprint(result) pass def test_list(self): user = cm_user() list_of_users = user.list_users() pprint(list_of_users) print() print("=========================") num = len(list_of_users) print(str(num) + " users listed") def test_mongo_credential(self): banner("USER") pprint(self.user.info("gvonlasz")) username = "******" cloudname = "dummy" password = "******" tennant = "fg1" name = username self.user.set_credential(username, cloudname, {"OS_USERNAME": name, "OS_PASSWORD": password, "OS_TENANT_NAME": tennant, "CM_CLOUD_TYPE": "openstack"} ) cred = self.user.get_credential(username, cloudname) banner("credential") print(cred) banner("credentials") pprint(self.user.get_credentials(username)) banner("find") result = self.user.userdb_passwd.find({}) for r in result: pprint(r)
from fabric.api import task, local, hide, settings import hostlist import progress import clean import mq PROGRESS.set('Cloudmesh Services', 50) __all__ = ['start', 'stop', 'list', 'clean', 'gui', 'monitor', 'kill', 'ls', 'lspbs', 'flower_server'] celery_config = ConfigDict( filename=config_file("/cloudmesh_celery.yaml"), kind="worker") workers = celery_config.get("cloudmesh.workers") debug = True try: debug = cm_config_server().get("cloudmesh.server.debug") except: pass if debug: progress.off() else: progress.on() """ for worker in workers: workers[worker] = {"app":"cloudmesh.launcher{0}.queue",
class compute(ComputeBaseType): # : the type of the cloud. It is "openstack" type = "openstack" # global var # : a dict with the images __images__ = {} # global var # : a dict with the flavors __flavors__ = {} # global var # : a dict with the servers __servers__ = {} # global var @property def servers(self): """TODO: this does not work""" print("UUUUUUU") if len(self.__servers__.keys()) == 0: print("OOOOO") self.__servers__ = self.get_servers() return self.__servers__ # : a dict with the users # users = {} # global var # : a dict containing the credentionls read with cm_config # credential = None # global var credential = None # global var user_token = None # : a unique label for the clous label = None # global var # cm_type = "openstack" # name = "undefined" # : This is the cloud, should be internal though with _ cloud = None # internal var for the cloud client in openstack keystone = None # : The user id user_id = None # internal var # _nova = nova # # initialize # # possibly make connext seperate def __init__(self, label, credential=None, service_url_type='publicURL'): """ initializes the openstack cloud from a file located at cloudmesh.yaml. However if a credential dict is used it is used instead """ self.clear() self.label = label self.credential = credential self.service_url_type = service_url_type if credential is None: try: self.compute_config = ConfigDict(filename="~/.cloudmesh/cloudmesh.yaml") self.credential = self.compute_config.get("cloudmesh.clouds.{:}.credentials".format(label)) except: log.error(str( lineno()) + "No user credentail found! Please check your cloudmesh.yaml file.") # sys.exit(1) self.connect() def clear(self): """ clears the data of this openstack instance, a new connection including reading the credentials and a refresh needs to be called to obtain again data. """ # Todo: we may just use the name of the class instead as the type self._clear() self.user_token = None self.credential = None self.type = "openstack" def connect(self): """ creates tokens for a connection """ log.info(str(lineno()) + "Loading User Credentials") if self.credential is None: log.error( str(lineno()) + "Error connecting to openstack compute, credential is None") elif not self.user_token: self.user_token = self.get_token(self.credential) def DEBUG(self, msg, line_number=None): if line_number is None: line_number = "" if msg == "credential": debug_dict = dict(self.credential) pprint(debug_dict) password = debug_dict['OS_PASSWORD'] content = str(debug_dict) content = content.replace(password, "********") log.debug( "{1} - GET CRED {0}".format(content, str(line_number))) else: password = self.credential['OS_PASSWORD'] log.debug("{0} - {1}".format(str(line_number), str(msg.replace(password, "********")))) def auth(self): # DEBUG try: _args = locals() if 'self' in _args: del (_args['self']) self.DEBUG("[{0}()] called with [{1}]".format(sys._getframe().f_code.co_name, str(_args)), lineno()) self.DEBUG("user_token:{0}".format(str(self.user_token)), lineno()) except: pass return 'access' in self.user_token def get_token(self, credential=None): # DEBUG try: import sys _args = locals() if 'self' in _args: del (_args['self']) self.DEBUG("[{0}()] called with [{1}]".format(sys._getframe().f_code.co_name, str(_args)), lineno()) except: pass if credential is None: credential = self.credential self.DEBUG("credential", lineno()) param = None if 'OS_TENANT_NAME' in credential: param = {"auth": {"passwordCredentials": { "username": credential['OS_USERNAME'], "password": credential['OS_PASSWORD'], }, "tenantName": credential['OS_TENANT_NAME'] } } elif 'OS_TENANT_ID' in credential: param = {"auth": {"passwordCredentials": { "username": credential['OS_USERNAME'], "password": credential['OS_PASSWORD'], }, "tenantId": credential['OS_TENANT_ID'] } } url = "{0}/tokens".format(credential['OS_AUTH_URL']) self.DEBUG("URL {0}".format(url), lineno()) headers = {'content-type': 'application/json'} verify = self._get_cacert(credential) print_param = copy.deepcopy(param) print_param["auth"]["passwordCredentials"]["password"] = "******" self.DEBUG("PARAM {0}".format(json.dumps(print_param)), lineno()) self.DEBUG("HEADER {0}".format(headers), lineno()) self.DEBUG("VERIFY {0}".format(verify), lineno()) r = requests.post(url, data=json.dumps(param), headers=headers, verify=verify) # pprint (r.json()) try: sanitized_r = copy.deepcopy(r.json()) if 'access' in sanitized_r: if 'token' in sanitized_r['access']: if 'id' in sanitized_r['access']['token']: sanitized_r['access']['token']['id'] = '******' log.debug("{0}".format(str(sanitized_r))) except: pass return r.json() # # FIND USER ID # def find_user_id(self, force=False): """ this method returns the user id and stores it for later use. """ if not force: try: self.user_id = self.credential['OS_USER_ID'] return self.user_id except: self.user_id = None log.error("OS_USER_ID not set") self.user_token = self.get_token() self.user_id = self.user_token['access']['user']['id'] return self.user_id def _get_service(self, type="compute", kind="user"): token = self.user_token for service in token['access']['serviceCatalog']: if service['type'] == type: break return service def _get_compute_service(self, token=None): return self._get_service("compute") def _get_cacert(self, credential=None): if credential is None: credential = self.credential verify = False if 'OS_CACERT' in credential: if credential['OS_CACERT'] is not None and \ credential['OS_CACERT'] != "None" and \ os.path.isfile(credential['OS_CACERT']): verify = credential['OS_CACERT'] return verify def _post(self, posturl, params=None, credential=None): # print posturl # print self.config if credential is None: credential = self.credential conf = self._get_service_endpoint("compute") headers = {'content-type': 'application/json', 'X-Auth-Token': '%s' % conf['token']} # print headers # print self._get_cacert(credential) r = requests.post(posturl, headers=headers, data=json.dumps(params), verify=self._get_cacert(credential)) ret = {"msg": "success"} if r.text: try: ret = r.json() except: pass return ret def _put(self, posturl, credential=None, params=None): # print self.config if credential is None: credential = self.credential conf = self._get_service_endpoint("compute") headers = {'content-type': 'application/json', 'X-Auth-Token': '%s' % conf['token']} # print headers r = requests.put(posturl, headers=headers, data=json.dumps(params), verify=self._get_cacert(credential)) ret = {"msg": "success"} if r.text: try: ret = r.json() except: pass return ret # def ks_get_extensions(self): pass # conf = self._get_service_endpoint("identity") def keypair_list(self): apiurl = "os-keypairs" return self._get(msg=apiurl, urltype=self.service_url_type) def keypair_add(self, keyname, keycontent): self.DEBUG(":adding a keypair in cm_compute...", lineno()) # keysnow = self.keypair_list() url = self._get_service_endpoint("compute")[self.service_url_type] posturl = "%s/os-keypairs" % url params = {"keypair": {"name": "%s" % keyname, "public_key": "%s" % keycontent } } # print params return self._post(posturl, params) def keypair_remove(self, keyname): self.DEBUG(":removing a keypair in cm_compute...", lineno()) conf = self._get_service_endpoint("compute") url = conf[self.service_url_type] url = "%s/os-keypairs/%s" % (url, keyname) headers = {'content-type': 'application/json', 'X-Auth-Token': '%s' % conf['token']} r = requests.delete(url, headers=headers, verify=self._get_cacert()) ret = {"msg": "success"} if r.text: try: ret = r.json() except: pass return ret def vm_create(self, name, flavor_name, image_id, security_groups=None, key_name=None, meta={}, userdata=None): """ start a vm via rest api call """ # # TODO: add logic for getting default image # if image_id is None: # get image id from profile information (default image for that cloud) # TODO: add logic for getting label of machine # # if flavor_name is None: # get flavorname from profile information (ther is a get label function # ...) # if keyname is None: # get the default key from the profile information url = self._get_service_endpoint("compute")[self.service_url_type] posturl = "%s/servers" % url # print posturl # keycontent = base64.b64encode(key_name) secgroups = [] if security_groups: for secgroup in security_groups: secgroups.append({"name": secgroup}) else: secgroups = [{"name": "default"}] params = { "server": { "name": "%s" % name, "imageRef": "%s" % image_id, "flavorRef": "%s" % flavor_name, # max_count is the number of instances to launch # If 3 specified, three vm instances will be launched # "max_count": 1, # "min_count": 1, "security_groups": secgroups, "metadata": meta, } } if key_name: params["server"]["key_name"] = key_name if userdata: # # TODO: strutils not defined # # safe_userdata = strutils.safe_encode(userdata) # params["server"]["user_data"] = base64.b64encode(safe_userdata) safe_userdata = None self.DEBUG(":POST PARAMS {0}".format(params), lineno()) return self._post(posturl, params) def vm_delete(self, id): """ delete a single vm and returns the id """ conf = self._get_service_endpoint("compute") url = conf[self.service_url_type] url = "%s/servers/%s" % (url, id) headers = {'content-type': 'application/json', 'X-Auth-Token': '%s' % conf['token']} # print headers # no return from http delete via rest api r = requests.delete(url, headers=headers, verify=self._get_cacert()) ret = {"msg": "success"} if r.text: try: ret = r.json() except: pass return ret def stack_create(self, name, template_url, parameters, timeout_mins=60): """ Create a stack by OpenStack Heat Orchestration ref: http://developer.openstack.org/api-ref-orchestration-v1.html """ url = self._get_service_endpoint("orchestration")[self.service_url_type] posturl = "%s/stacks" % url try: param = ast.literal_eval(parameters) except ValueError: param = parameters params = { "stack_name": "%s" % name, "template_url": "%s" % template_url, "parameters": param, "timeout_mins": "%s" % timeout_mins } self.DEBUG(":POST PARAMS {0}".format(params), lineno()) return self._post(posturl, params) def stack_delete(self, stack_name): """ delete a specified stack and returns the id ref: http://developer.openstack.org/api-ref-orchestration-v1.html """ conf = self._get_service_endpoint("orchestration") url = conf[self.service_url_type] headers = {'content-type': 'application/json', 'X-Auth-Token': '%s' % conf['token']} # Find stacks msg = "stacks/%s" % stack_name service = "orchestration" r1 = self._get(msg, service=service, urltype=self.service_url_type) try: stack_id = r1['stack']['id'] except KeyError: log.warning("stack does not exist ({0})".format(stack_name)) ret = {"msg": "failed"} return ret url = "%s/stacks/%s/%s" % (url, stack_name, stack_id) # no return from http delete via rest api r = requests.delete(url, headers=headers, verify=self._get_cacert()) ret = {"msg": "success"} if r.text: try: ret = r.json() except: pass return ret # possibly for future use in network management via Neuron # currently is not being used def get_network_id(self): """ Obtaining router/expertnal gateway info via the rest api call """ ret = {"msg": "failed"} r = self._get('v2.0/routers', service='network', urltype=self.service_url_type) if "floating_ip" in r: ret = r["floating_ip"]["ip"] return r def get_public_ip(self): """ Obtaining a floating ip from the pool via the rest api call """ url = self._get_service_endpoint("compute")[self.service_url_type] posturl = "%s/os-floating-ips" % url ret = {"msg": "failed"} # Default to the default pool, possibly 'nova' # Before the juno deployment, this always worked r = self._post(posturl) # Since Juno deployment, the pool name was changed if 'itemNotFound' in r: if 'message' in r['itemNotFound'] and r['itemNotFound']['message'] == 'Floating ip pool not found.': # get floating ip pool name first r = self._get('os-floating-ip-pools') if 'floating_ip_pools' in r: # use the first pool pool = r['floating_ip_pools'][0]['name'] params = {'pool': pool} # reissue the request with returned pool name r = self._post(posturl, params) if "floating_ip" in r: ret = r["floating_ip"]["ip"] # # currently not being used # Nureon related operations # else: # gatewayinfo = self.get_network_id() # url = self._get_service_endpoint("network")[self.service_url_type] # posturl = '%s/v2.0/floatingips' % url # tenant_id = self.user_token['access']['token']['tenant']['id'] # params = {"floatingip":{"floating_network_id":<UUID from gatewayinfo>}} # r = self._post(posturl) # #r = self._get('/v2.0/floatingips',service='network') # print (r) return ret def assign_public_ip(self, serverid, ip): """ assigning public ip to an instance """ url = self._get_service_endpoint("compute")[self.service_url_type] posturl = "%s/servers/%s/action" % (url, serverid) params = {"addFloatingIp": { "address": "%s" % ip } } self.DEBUG("POST PARAMS {0}".format(params), lineno()) return self._post(posturl, params) def delete_public_ip(self, idofip): """ delete a public ip that is assigned but not currently being used """ conf = self._get_service_endpoint("compute") url = conf[self.service_url_type] url = "%s/os-floating-ips/%s" % (url, idofip) headers = {'content-type': 'application/json', 'X-Auth-Token': '%s' % conf['token']} r = requests.delete(url, headers=headers, verify=self._get_cacert()) ret = {"msg": "success"} if r.text: try: ret = r.json() except: pass return ret def list_allocated_ips(self): """ return list of ips allocated to current account """ conf = self._get_service_endpoint("compute") url = conf[self.service_url_type] url = "%s/os-floating-ips" % url headers = {'content-type': 'application/json', 'X-Auth-Token': '%s' % conf['token']} r = requests.get(url, headers=headers, verify=self._get_cacert()) return r.json()["floating_ips"] def release_unused_public_ips(self): ips = self.list_allocated_ips() ips_id_to_instance = {} for ip in ips: ips_id_to_instance[ip['id']] = ip['instance_id'] for id, instance in ips_id_to_instance.iteritems(): if instance is None: self.delete_public_ip(id) return True def _get(self, msg, kind="user", service="compute", urltype="publicURL", payload=None, json=True): # kind = "user" # service = "publicURL, adminURL" # service= "compute", "identity", .... # token=None, url=None, kind=None, urltype=None, json=True): credential = self.credential token = self.user_token conf = self._get_service_endpoint(service) url = conf[urltype] url = "{0}/{1}".format(url, msg) self.DEBUG("AUTH URL {0}".format(url), lineno()) headers = {'X-Auth-Token': token['access']['token']['id']} r = requests.get( url, headers=headers, verify=self._get_cacert(credential), params=payload) self.DEBUG("Response {0}".format(r), lineno()) if json: return r.json() else: return r # http def _get_service_endpoint(self, type=None): """what example %/servers""" if type is None: type = "compute" compute_service = self._get_service(type) # pprint(compute_service) credential = self.credential # print credential conf = {} credential = self.credential conf['publicURL'] = str(compute_service['endpoints'][0]['publicURL']) # some cloud does not have this, e.g. HP cloud if 'internalURL' in compute_service['endpoints'][0]: conf['internalURL'] = str(compute_service['endpoints'][0]['internalURL']) if 'OS_REGION' in credential: for endpoint in compute_service['endpoints']: if endpoint['region'] == credential['OS_REGION']: conf['publicURL'] = endpoint['publicURL'] break conf['token'] = str(self.user_token['access']['token']['id']) return conf # new def _now(self): return datetime.now().strftime('%Y-%m-%dT%H-%M-%SZ') # new def _list_to_dict(self, list, id, type, time_stamp): d = {} # cm_type_version = self.compute_config.get('cloudmesh.clouds.{0}.cm_type_version'.format(self.label)) # log.debug ("CM TYPE VERSION {0}".format(cm_type_version)) for element in list: element['cm_type'] = type element['cm_cloud'] = self.label element['cm_cloud_type'] = self.type # element['cm_cloud_version'] = cm_type_version element['cm_refresh'] = time_stamp d[str(element[id])] = dict(element) return d # new def get_extensions(self): time_stamp = self._now() msg = "extensons" # list = self._get(msg)['extensions'] result = self._get(msg, urltype=self.service_url_type, json=False) if result.status_code == 404: log.error("extensions not available") return {} else: list = result.json() return self._list_to_dict(list, 'name', "extensions", time_stamp) def get_limits(self): """Gets absolute and rate limit information, including information on currently used absolute limits.""" time_stamp = self._now() msg = "limits" _dict = self._get(msg, urltype=self.service_url_type)['limits'] return _dict def get_absolute_limits(self, view="original"): """Gets absolute limit information Args: view (str) : two types of output available * original - returns integer value in a key and value pair * fraction - returns xx / xx fraction value """ limits = self.get_limits() if view == "fraction": new_limits = {"Cores": None, "Instances": None, "RAM": None, "SecurityGroups": None, "FloatingIps": None} new_limits['Cores'] = str(limits['absolute']['totalCoresUsed']) + \ " / " + str(limits['absolute']['maxTotalCores']) new_limits['Instances'] = \ str(limits['absolute']['totalInstancesUsed']) + " / " + \ str(limits['absolute']['maxTotalInstances']) new_limits['RAM'] = str(limits['absolute']['totalRAMUsed']) + \ " / " + str(limits['absolute']['maxTotalRAMSize']) new_limits['SecurityGroups'] = \ str(limits['absolute']['totalSecurityGroupsUsed']) + " / " + \ str(limits['absolute']['maxSecurityGroups']) new_limits['FloatingIps'] = \ str(limits['absolute']['totalFloatingIpsUsed']) + " / " + \ str(limits['absolute']['maxTotalFloatingIps']) return new_limits else: return limits['absolute'] # new def get_servers(self): time_stamp = self._now() msg = "servers/detail" list = self._get(msg, urltype=self.service_url_type)['servers'] self.__servers__ = self._list_to_dict(list, 'id', "server", time_stamp) # # hack for the hp cloud west # for server in self.__servers__: self.__servers__[server]['id'] = str(self.__servers__[server]['id']) return self.__servers__ # new def get_flavors(self): time_stamp = self._now() msg = "flavors/detail" list = self._get(msg, urltype=self.service_url_type)['flavors'] self.__flavors__ = self._list_to_dict(list, 'name', "flavor", time_stamp) # # hack for the hp cloud west # for flavor in self.__flavors__: self.__flavors__[flavor]['id'] = str(self.__flavors__[flavor]['id']) return self.__flavors__ def flavorid(self, name): for key in self.__flavors__: if self.__flavors__[key]['name'] == name: return key def flavor(self, id_or_name): keys = self.__flavors__.keys() if id_or_name not in keys: key = self.flavorid(id_or_name) return self.__flavors__[key] # new def get_images(self): """List images""" time_stamp = self._now() msg = "images/detail" list = self._get(msg, urltype=self.service_url_type)['images'] self.__images__ = self._list_to_dict(list, 'id', "image", time_stamp) return self.__images__ def get_security_groups(self): """Lists security groups. """ time_stamp = self._now() list = self.list_security_groups()['security_groups'] self.security_groups = self._list_to_dict(list, 'id', 'security_group', time_stamp) return self.security_groups def get_stacks(self): """Lists active stacks.""" time_stamp = self._now() msg = "stacks" service = "orchestration" list = self._get(msg, service=service, urltype=self.service_url_type)['stacks'] self.stacks = self._list_to_dict(list, 'id', 'stacks', time_stamp) return self.stacks def get_usage(self): """Report list statistics on compute and storage resources.""" time_stamp = self._now() tenant_id = self.user_token['access']['token']['tenant']['id'] msg = "os-simple-tenant-list/{0}".format(tenant_id) param = {"start": datetime.now() - timedelta(hours=24), "end": datetime.now()} _dict = self._get(msg, urltype=self.service_url_type, payload=param)['tenant_usage'] self.DEBUG(_dict, lineno()) self.usage = _dict return _dict def get_quota(self): """ View quotas for a tenant (project). Administrators only, depending on policy settings. """ time_stamp = self._now() tenant_id = self.user_token['access']['token']['tenant']['id'] msg = "os-quota-sets/{0}".format(tenant_id) _dict = self._get(msg, urltype=self.service_url_type)['quota_set'] self.DEBUG(_dict, lineno()) return _dict def get_meta(self, id): """get the metadata dict for the vm with the given id""" msg = "/servers/%s/metadata" % id return self._get(msg, urltype=self.service_url_type) def set_meta(self, id, metadata, replace=False): """set the metadata for the given vm with the id""" conf = self._get_service_endpoint() conf['serverid'] = id if replace: conf['set'] = "PUT" else: conf['set'] = "POST" apiurlt = urlparse(conf[self.service_url_type]) url2 = apiurlt[1] params2 = '{"metadata":' + str(metadata).replace("'", '"') + '}' headers2 = {"X-Auth-Token": conf[ 'token'], "Accept": "application/json", "Content-type": "application/json"} print("%%%%%%%%%%%%%%%%%%") pprint(conf) print("%%%%%%%%%%%%%%%%%%") print("PARAMS", params2) print("HEADERS", headers2) print("API2", apiurlt[2]) print("API1", apiurlt[1]) print("ACTIVITY", conf['set']) print("ID", conf['serverid']) print("####################") conn2 = httplib.HTTPConnection(url2) conn2.request(conf['set'], "%s/servers/%s/metadata" % (apiurlt[2], conf['serverid']), params2, headers2) response2 = conn2.getresponse() data2 = response2.read() dd2 = json.loads(data2) conn2.close() return dd2 # # refresh # # identity management moved to its dedicated class """ def _get_users_dict(self): result = self.get_users() return result def _get_tenants_dict(self): result = self.get_tenants() return result """ def _get_images_dict(self): result = self.get_images() return result def _get_flavors_dict(self): try: result = self.get_flavors_from_yaml() except: result = None if not result: return self.get_flavors() self.__flavors__ = result return self.__flavors__ def get_flavors_from_yaml(self): obj = cm_config_flavor() flavors = obj.get('cloudmesh.flavor') return flavors.get(self.label) def _get_servers_dict(self): result = self.get_servers() return result def _get_security_groups_dict(self): result = self.get_security_groups() return result def _get_stacks_dict(self): result = self.get_stacks() return result def _get_usage_dict(self): result = self.get_usage() return result def limits(self): """ returns the limits of a tenant""" limit_list = [] info = self.get_limits() for rate in info['rate']: limit_set = rate['limit'] for limit in limit_set: limit_list.append(limit) return limit_list # return the security groups for the current authenticated tenant, in dict # format def list_security_groups(self): apiurl = "os-security-groups" return self._get(apiurl, urltype=self.service_url_type) # return the security group id given a name, if it's defined in the current tenant # The id is used to identify a group when adding more rules to it def find_security_groupid_by_name(self, name): groupid = None secgroups = self.list_security_groups() for secgroup in secgroups["security_groups"]: if secgroup["name"] == name: groupid = secgroup["id"] break return groupid # creating a security group, and optionally add rules to it # for the current TENANT that it authenticated as # This implementation is based on the rest api def create_security_group(self, secgroup, rules=[]): url = self._get_service_endpoint("compute")[self.service_url_type] posturl = "%s/os-security-groups" % url params = {"security_group": { "name": secgroup.name, "description": secgroup.description } } # log.debug ("POST PARAMS {0}".format(params)) ret = self._post(posturl, params) groupid = None # upon successful, it returns a dict keyed by 'security_group', # otherwide may have failed due to some reason if "security_group" in ret: groupid = ret["security_group"]["id"] # if the security group object has rules included, add them first if len(secgroup.rules) > 0: self.add_security_group_rules(groupid, secgroup.rules) # only trying to add the additional rules if the empty group has been # created successfully if not groupid: log.error( "Failed to create security group. Error message: '%s'" % ret) else: self.add_security_group_rules(groupid, rules) # return the groupid of the newly created group, or None if failed return groupid # add rules to an existing security group def add_security_group_rules(self, groupid, rules): url = self._get_service_endpoint("compute")[self.service_url_type] posturl = "%s/os-security-group-rules" % url ret = None for rule in rules: params = {"security_group_rule": { "ip_protocol": rule.ip_protocol, "from_port": rule.from_port, "to_port": rule.to_port, "cidr": rule.cidr, "parent_group_id": groupid } } # log.debug ("POST PARAMS {0}".format(params)) ret = self._post(posturl, params) if "security_group_rule" not in ret: if 'badRequest' in ret and ret['badRequest']['message'].startswith('This rule already exists'): log.warning("The rule already exists") else: log.error( "Failed to create security group rule(s). Error message: '%s'" % ret) break return ret # # security Groups of VMS # # GVL: review # how does this look for azure and euca? Should there be a general framework for this in the BaseCloud class # based on that analysis? # # comments of wht these things do and how they work are missing # ''' def createSecurityGroup(self, default_security_group, description="no-description"): """ comment is missing """ protocol = "" ipaddress = "" max_port = "" min_port = "" default_security_group_id = self.cloud.security_groups.create( default_security_group, description) default_security_group_id = default_security_group_id.id config_security = cm_config() yamlFile = config_security.get() ruleNames = yamlFile['security'][ 'security_groups'][default_security_group] for ruleName in ruleNames: rules = yamlFile['security']['rules'][ruleName] for key, value in rules.iteritems(): if 'protocol' in key: protocol = value elif 'max_port' in key: max_port = value elif 'min_port' in key: min_port = value else: ip_address = value self.cloud.security_group_rules.create( default_security_group_id, protocol, min_port, max_port, ip_address) return default_security_group # GVL: review # how does this look for azure and euca? Should there be a general framework for this in the BaseCloud class # based on that analysis? # # comments of wht these things do and how they work are missing def checkSecurityGroups(self): """ TODO: comment is missing """ config_security = cm_config() names = {} securityGroups = self.cloud.security_groups.list() for securityGroup in securityGroups: names[securityGroup.name] = securityGroup.id yamlFile = config_security.get() if yamlFile.has_key('security'): default_security_group = yamlFile['security']['default'] else: return None # default_security_group_id=names[default_security_group] if default_security_group in names: return default_security_group else: return self.createSecurityGroup(default_security_group) # GVL: review # how does this look for azure and euca? Should there be a general framework for this in the BaseCloud class # based on that analysis? # # comments of wht these things do and how they work are missing # def get_public_ip(self): """ TODO: comment is missing """ return self.cloud.floating_ips.create() # GVL: review # how does this look for azure and euca? Should there be a general framework for this in the BaseCloud class # based on that analysis? # # comments of wht these things do and how they work are missing # def assign_public_ip(self, serverid, ip): """ comment is missing """ self.cloud.servers.add_floating_ip(serverid, ip) # # set vm meta # def vm_set_meta(self, vm_id, metadata): """an experimental class to set the metadata""" print metadata is_set = 0 # serverid = self.servers[id]['manager'] while not is_set: try: print "set ", vm_id, "to set", metadata result = self.cloud.servers.set_meta(vm_id, metadata) # body = {'metadata': metadata} # print body # result = self.cloud.servers._create("/servers/%s/metadata" % # vm_id, body, "metadata") print result is_set = 1 except Exception as e: print "ERROR", e time.sleep(2) print result # # create a vm # def vm_create(self, name=None, flavor_name=None, image_id=None, security_groups=None, key_name=None, meta=None): """ create a vm with the given parameters """ if not key_name is None: if not self.check_key_pairs(key_name): config = cm_config() dict_t = config.get() key = dict_t['keys']['keylist'][key_name] if not 'ssh-rsa' in key and not 'ssh-dss' in key: key = open(key, "r").read() self.upload_key_pair(key, key_name) config = cm_config() if flavor_name is None: flavor_name = config.default(self.label)['flavor'] if image_id is None: image_id = config.default(self.label)['image'] # print "CREATE>>>>>>>>>>>>>>>>" # print image_id # print flavor_name vm_flavor = self.cloud.images.find(name=flavor_name) vm_image = self.cloud.images.find(id=image_id) if key_name is None: vm = self.cloud.servers.create(name, flavor=vm_flavor, image=vm_image, security_groups=security_groups, meta=meta ) else: # bug would passing None just work? vm = self.cloud.servers.create(name, flavor=vm_flavor, image=vm_image, key_name=key_name, security_groups=security_groups, meta=meta ) delay = vm.user_id # trick to hopefully get all fields data = vm.__dict__ del data['manager'] # data['cm_name'] = name # data['cm_flavor'] = flavor_name # data['cm_image'] = image_id # return {str(data['id']): data} # should probably just be return data # # delete vm(s) # def vm_delete(self, id): """ delete a single vm and returns the id """ vm = self.cloud.servers.delete(id) # return just the id or None if its deleted return vm @donotchange def vms_delete(self, ids): """ delete many vms by id. ids is an array """ for id in ids: print "Deleting %s" % self.servers[id]['name'] vm = self.vm_delete(id) return ids # # list user images # ''' @donotchange def vms_user(self, refresh=False): """ find my vms """ user_id = self.find_user_id() time_stamp = datetime.now().strftime('%Y-%m-%dT%H-%M-%SZ') if refresh: self.refresh("servers") result = {} for (key, vm) in self.__servers__.items(): if vm['user_id'] == self.user_id: result[key] = vm return result # # list project vms # def vms_project(self, refresh=False): """ find my vms that arein this project. this method was needed for openstack essex deployment on fg """ user_id = self.find_user_id() time_stamp = datetime.now().strftime('%Y-%m-%dT%H-%M-%SZ') if refresh: self.refresh("images") result = {} for (key, vm) in self.__servers__.items(): result[key] = vm return result # # delete images from a user # @donotchange def vms_delete_user(self): """ find my vms and delete them """ user_id = self.find_user_id() vms = self.find('user_id', user_id) self.vms_delete(vms) return # # find # @donotchange def find(self, key, value=None): """find my vms""" ids = [] if key == 'user_id' and value is None: value = self.user_id for (id, vm) in self.__servers__.items(): if vm[str(key)] == value: ids.append(str(vm['id'])) return ids # # rename # ''' def rename(self, old, new, id=None): """rename the vm with the given name old to new. If more than one exist with the same name only the first one will be renamed. consider moving the code to the baseclass.""" all = self.find('name', old) print all if len(all) > 0: id = all[0] vm = self.cloud.servers.update(id, new) return # TODO: BUG WHY ARE TGERE TWO REINDEX FUNCTIONS? @donotchange def reindex(self, prefixold, prefix, index_format): all = self.find('user_id') counter = 1 for id in all: old = self.servers[id]['name'] new = prefix + index_format % counter print "Rename %s -> %s, %s" % (old, new, self.servers[id]['key_name']) if old != new: vm = self.cloud.servers.update(id, new) counter += 1 ''' # # TODO # """ refresh just a specific VM delete all images that follow a regualr expression in name look into sort of images, images, vms """ # # EXTRA # # will be moved into one class @donotchange def table_col_to_dict(self, body): """converts a given list of rows to a dict""" result = {} for element in body: key = element[0] value = element[1] result[key] = value return result @donotchange def table_matrix(self, text, format=None): """converts a given pretty table to a list of rows or a dict. The format can be specified with 'dict' to return a dict. otherwise it returns an array""" lines = text.splitlines() headline = lines[0].split("|") headline = headline[1:-1] for i in range(0, len(headline)): headline[i] = str(headline[i]).strip() lines = lines[1:] body = [] for l in lines: line = l.split("|") line = line[1:-1] entry = {} for i in range(0, len(line)): line[i] = str(line[i]).strip() if format == "dict": key = headline[i] entry[key] = line[i] if format == "dict": body.append(entry) else: body.append(line) if format == 'dict': return body else: return headline, body # # CLI call of ussage # def usage(self, tenant_id=None, serverid=None, start=None, end=None, format='dict'): """ returns the list information of the tennant""" DEFAULT_STAT_DURATION = 30 if not tenant_id: url = self._get_service_endpoint("compute")[self.service_url_type] urlsplit = url.split("/") tenant_id = urlsplit[len(urlsplit) - 1] # print 70 * "-" # print self.cloud.certs.__dict__.get() # print 70 * "-" # tenantid = "member" # not sure how to get that if not end: end = datetime.now() # end = self._now() if not start: start = end - timedelta(days=DEFAULT_STAT_DURATION) # start = start.strftime('%Y-%m-%dT%H-%M-%SZ') # iso_start = self.parse_isotime(start) # iso_end = self.parse_isotime(end) # print ">>>>>", iso_start, iso_end # info = self.cloud.list.get(tenantid, iso_start, iso_end) # print info.__dict__ # sys.exit() # (start, rest) = start.split("T") # ignore time for now # (end, rest) = end.split("T") # ignore time for now apiurl = "os-simple-tenant-list/%s" % tenant_id payload = {'start': start, 'end': end} result = self._get(apiurl, payload=payload, urltype=self.service_url_type)['tenant_usage'] instances = result['server_usages'] numInstances = len(instances) ramhours = result['total_memory_mb_usage'] cpuhours = result['total_hours'] vcpuhours = result['total_vcpus_usage'] diskhours = result['total_local_gb_usage'] # if serverid provided, only return the server specific data ret = None if serverid: for instance in instances: if instance["instance_id"] == serverid: ret = instance break # else return tenant list info else: ret = {'tenant_id': tenant_id, 'start': start.strftime('%Y-%m-%dT%H-%M-%SZ'), 'end': end.strftime('%Y-%m-%dT%H-%M-%SZ'), 'instances': numInstances, 'cpuHours': cpuhours, 'vcpuHours': vcpuhours, 'ramMBHours': ramhours, 'diskGBHours': diskhours} return ret # (headline, matrix) = self.table_matrix(result) # headline.append("Start") # headline.append("End") # matrix[0].append(start) # matrix[0].append(end) # if format == 'dict': # result = {} # for i in range(0, len(headline)): # result[headline[i]] = matrix[0][i] # return result # else: # return (headline, matrix[0]) # # CLI call of absolute-limits # # def limits(self): # conf = get_conf() # return _get(conf, "%s/limits") ''' def check_key_pairs(self, key_name): """simple check to see if a keyname is in the keypair list""" allKeys = self.cloud.keypairs.list() for key in allKeys: if key.name in key_name: return True return False # # Upload Key Pair # def upload_key_pair(self, publickey, name): """ Uploads key pair """ try: self.cloud.keypairs.create(name, publickey) except Exception as e: return 1, e return (0, 'Key added successfully') # # Delete Key Pair # def delete_key(self, name): """ delets key pair """ try: self.cloud.keypairs.delete(name) except Exception as e: return (1, e) return (0, 'Key deleted successfully') # # List Security Group # def sec_grp_list(self): """ lists all security groups """ try: return self.cloud.security_groups.list() except Exception as e: print e states = [ "ACTIVE", "ERROR", "BUILDING", "PAUSED", "SUSPENDED", "STOPPED", "DELETED", "RESCUED", "RESIZED", "SOFT_DELETED" ] def display(self, states, userid): """ simple or on states and check if userid. If userid is None all users will be marked. A new variable cm_display is introduced manageing if a VM should be printed or not""" for (id, vm) in self.servers.items(): vm['cm_display'] = vm['status'] in states if userid is not None: vm['cm_display'] = vm['cm_display'] and ( vm['user_id'] == userid) ''' def display_regex(self, state_check, userid): print(state_check) for (id, vm) in self.__servers__.items(): vm['cm_display'] = eval(state_check) # vm['cm_display'] = vm['status'] in states if userid is not None: vm['cm_display'] = vm['cm_display'] and ( vm['user_id'] == userid)
def do_launcher(self, args, arguments): """ :: Usage: launcher start MENU launcher stop STACK_NAME launcher list launcher show STACK_NAME launcher menu [--column=COLUMN] [--format=FORMAT] launcher import [FILEPATH] [--force] launcher export FILEPATH launcher help | -h An orchestration tool with Chef Cookbooks Arguments: MENU Name of a cookbook STACK_NAME Name of a launcher FILEPATH Filepath COLUMN column name to display FORMAT display format (json, table) help Prints this message Options: -v verbose mode """ log.info(arguments) self.cm_mongo = cm_mongo() self.cm_config = cm_config() self.cm_user = cm_user() self._id = "t_stacks" if arguments["help"] or arguments["-h"]: print(self.do_launcher.__doc__) elif arguments["show"] and arguments["STACK_NAME"]: print("NOT IMPLEMENTED") return elif arguments["menu"]: userid = self.cm_config.username() launchers = self.cm_mongo.launcher_get(userid) if launchers.count() == 0: Console.warning( "no launcher in database, please import launcher first" "(launcher import [FILEPATH] [--force])" ) return else: d = {} for launcher in launchers: d[launcher["cm_launcher"]] = launcher if "_id" in d[launcher["cm_launcher"]]: del d[launcher["cm_launcher"]]["_id"] columns = None if arguments["--column"]: if arguments["--column"] != "all": columns = [x.strip() for x in arguments["--column"].split(",")] else: columns = ["name", "description"] if arguments["--format"]: if arguments["--format"] not in ["table", "json", "csv"]: Console.error("please select printing format ", "among table, json and csv") return else: p_format = arguments["--format"] else: p_format = None shell_commands_dict_output( userid, d, print_format=p_format, firstheader="launcher", header=columns # vertical_table=True ) elif arguments["list"]: userid = self.cm_config.username() self.cm_mongo.activate(userid) self.cm_mongo.refresh(cm_user_id=userid, types=[self._id]) stacks = self.cm_mongo.stacks(cm_user_id=userid) launchers = self.filter_launcher(stacks, {"search": "contain", "key": "stack_name", "value": "launcher"}) log.debug(launchers) d = {} for k0, v0 in launchers.iteritems(): for k1, v1 in launchers[k0].iteritems(): d[v1["id"]] = v1 columns = ["stack_name", "description", "stack_status", "creation_time", "cm_cloud"] if arguments["--column"] and arguments["--column"] != "all": columns = [x.strip() for x in arguments["--column"].split(",")] if arguments["--format"]: if arguments["--format"] not in ["table", "json", "csv"]: Console.error("please select printing format among table, json and csv") return else: p_format = arguments["--format"] else: p_format = None shell_commands_dict_output( userid, d, print_format=p_format, firstheader="launcher_id", header=columns # vertical_table=True ) elif arguments["start"] and arguments["MENU"]: userid = self.cm_config.username() def_cloud = self.get_cloud_name(userid) self.cm_mongo.activate(userid) config_launcher = cm_config_launcher() userinfo = self.cm_user.info(userid) if "key" in userinfo["defaults"]: key = userinfo["defaults"]["key"] elif len(userinfo["keys"]["keylist"].keys()) > 0: key = userinfo["keys"]["keylist"].keys()[0] if key: keycontent = userinfo["keys"]["keylist"][key] if keycontent.startswith("key "): keycontent = keycontent[4:] cm_keys_mongo(userid).check_register_key(userid, def_cloud, key, keycontent) keynamenew = _keyname_sanitation(userid, key) else: Console.warning("No sshkey found. Please Upload one") cookbook = arguments["MENU"] s_name = "launcher-{0}-{1}-{2}".format(userid, cookbook, get_rand_string()) dummy = "123456789" # doing nothing. just for test try: t_url = config_launcher["cloudmesh"]["launcher"]["default"]["template"] except: # If key is missing (KeyError), new cloudmesh_launcher.yaml # needs to be copied to ~/.cloudmesh t_url = "https://raw.githubusercontent.com/cloudmesh/cloudmesh/master/heat-templates/centos6/launcher/launcher.yaml" param = {"KeyName": keynamenew, "Cookbook": cookbook, "dummy": dummy} # test for openmpi, hadoop if cookbook[:6] == "hadoop" or cookbook[:7] == "openmpi": privatekey, publickey = generate_keypair() try: t_url = config_launcher["cloudmesh"]["launcher"]["recipes"][cookbook]["template"] except: # If key is missing (KeyError), new cloudmesh_launcher.yaml # needs to be copied to ~/.cloudmesh t_url = ( "https://raw.githubusercontent.com/cloudmesh/cloudmesh/master/heat-templates/ubuntu-14.04/" + str(cookbook) + "-cluster/" + str(cookbook) + "-cluster.yaml" ) param = {"KeyName": keynamenew, "PublicKeyString": publickey, "PrivateKeyString": privatekey} if cookbook[:9] == "hadoop2.7": param["UserName"] = userid log.debug(def_cloud, userid, s_name, t_url, param) res = self.cm_mongo.stack_create( cloud=def_cloud, cm_user_id=userid, servername=s_name, template_url=t_url, parameters=param ) log.debug(res) if "error" in res: print(res["error"]["message"]) return res elif arguments["stop"] and arguments["STACK_NAME"]: userid = self.cm_config.username() def_cloud = self.get_cloud_name(userid) s_id = arguments["STACK_NAME"] self.cm_mongo.activate(userid) res = self.cm_mongo.stack_delete(cloud=def_cloud, cm_user_id=userid, server=s_id) log.debug(res) return res elif arguments["import"]: filepath = config_file("/cloudmesh_launcher.yaml") if arguments["FILEPATH"]: filepath = arguments["FILEPATH"] try: filename = path_expand(filepath) fileconfig = ConfigDict(filename=filename) except Exception, err: Console.error("error while loading '{0}', please check".format(filepath)) print(traceback.format_exc()) print(sys.exc_info()[0]) return try: recipes_dict = fileconfig.get("cloudmesh", "launcher", "recipies") except: Console.error("error while loading recipies from the file") # print recipes_dict userid = self.cm_config.username() launcher_names = [] launchers = self.cm_mongo.launcher_get(userid) for launcher in launchers: launcher_names.append(launcher["cm_launcher"].encode("ascii")) for key in recipes_dict: if key in launcher_names: if arguments["--force"]: self.cm_mongo.launcher_remove(userid, key) self.cm_mongo.launcher_import(recipes_dict[key], key, userid) print("launcher '{0}' overwritten.".format(key)) else: print( "ERROR: launcher '{0}' exists, " "please remove it first, or use " "'--force' when adding".format(key) ) else: self.cm_mongo.launcher_import(recipes_dict[key], key, userid) print("launcher '{0}' added.".format(key))
app = Flask(__name__) api = restful.Api(app) queue_stat_parser = reqparse.RequestParser() queue_stat_parser.add_argument('resource', type=str, required=True) # queue_stat_parser.add_argument('user', type=str) queue_stat_parser.add_argument('id', type=str) queue_info_parser = reqparse.RequestParser() queue_info_parser.add_argument('resource', type=str, required=True) # queue_info_parser.add_argument('user', type=str) queue_info_parser.add_argument('queue', type=str) config = ConfigDict( prefix="cloudmesh", filename=config_file("/cloudmesh.yaml")) user = config.get("cloudmesh.profile.username") def versioned_url(url): return "/" + version + url def simple_error(kind, attribute, help="does not exist"): msg = {} msg["error:"] = "{0} {1} {2}".format(kind, attribute, help) return msg class rest_queue_stat(restful.Resource): def get(self):
app = Flask(__name__) api = restful.Api(app) queue_stat_parser = reqparse.RequestParser() queue_stat_parser.add_argument('resource', type=str, required=True) # queue_stat_parser.add_argument('user', type=str) queue_stat_parser.add_argument('id', type=str) queue_info_parser = reqparse.RequestParser() queue_info_parser.add_argument('resource', type=str, required=True) # queue_info_parser.add_argument('user', type=str) queue_info_parser.add_argument('queue', type=str) config = ConfigDict(prefix="cloudmesh", filename=config_file("/cloudmesh.yaml")) user = config.get("cloudmesh.profile.username") def versioned_url(url): return "/" + version + url def simple_error(kind, attribute, help="does not exist"): msg = {} msg["error:"] = "{0} {1} {2}".format(kind, attribute, help) return msg class rest_queue_stat(restful.Resource): def get(self): args = queue_stat_parser.parse_args()
def parse(cls, class_type=None): """ The method is used to parse the contents of the yaml file and generate the python code for the same. At the moment the method tries to read the file from ~/.cloudmesh. Depending on the class_type, it looks for the file "cloudmesh_user.yaml" or "cloudmesh_project.yaml" :param class_type: class_type can be either user or project. If a collection name is not specified, uses the name of the file as the collection name. :return: Writes the contents to a file under the etc directory. if an "etc" directory does not exist, it creates one. The file name would be either "base_user.py" or "base_project.py" depending on the class_type """ code = CodeGenerator() file_path = "~/.cloudmesh/{0}/cloudmesh_{1}.yaml".format( "accounts", class_type) # file_path = "~/.cloudmesh/cloudmesh_" + class_type + ".yaml" try: filename = path_expand(file_path) file_config = ConfigDict(filename=filename) except: Console.error( "Could not load file, please check filepath: {0}".format( file_path)) return try: if class_type == "user": code.set_headers( "from cloudmesh_database.dbconn import get_mongo_dbname_from_collection" ) code.set_headers( "from cloudmesh_management.cloudmeshobject import CloudmeshObject" ) code.set_headers("from mongoengine import *") code.newline() code.newline() code.write("class " + class_type.title() + "(CloudmeshObject):") code.newline() code.indent() code.write( "db_name = get_mongo_dbname_from_collection(\"manage\")") code.newline() code.write("if db_name:") code.newline() code.indent() code.write("meta = {'db_alias': db_name}") code.newline() code.dedent() code.newline() code.write("\"\"\"") code.newline() code.write("Hidden Fields") code.newline() code.write("\"\"\"") code.newline() code.write( "status = StringField(required=True, default='pending')") code.newline() code.write("userid = UUIDField()") code.newline() # code.write("\n") code.write("\"\"\"") code.newline() code.write("User Fields") code.newline() code.write("\"\"\"") code.newline() # config = file_config.get("cloudmesh", "user", "fields") for _item in config: for key in _item: field = key field_type = "" is_required = _item[key].get('required') if _item[key].get('type') in [ 'text', 'textarea', 'dropdown', 'password' ]: field_type = "StringField" if is_required: field_type += "(required=True)" else: field_type += "()" elif _item[key].get('type') == 'checkbox': if len(_item[key].get('options')) > 1: options = str(_item[key].get('options')) code.set_options(key, options) if is_required: field_type = "ListField(StringField(choices=" + key.upper( ) + "), required=True)" else: field_type = "BooleanField(required=True)" elif _item[key].get('type') == 'email': field_type = "EmailField" if is_required: field_type += "(required=True)" line = field + " = " + field_type code.write(line) code.newline() code.write("projects = ListField(ReferenceField('Project'))") # print code.end() if not code.write_to_file(code.end(), "base_" + class_type): Console.error("Error while trying to write to file.") pass elif class_type == "project": code.set_headers( "from cloudmesh_database.dbconn import get_mongo_dbname_from_collection" ) code.set_headers( "from cloudmesh_management.cloudmeshobject import CloudmeshObject" ) code.set_headers("from mongoengine import *") code.newline() code.newline() code.write("class " + class_type.title() + "(CloudmeshObject):") code.newline() code.indent() code.write( "db_name = get_mongo_dbname_from_collection(\"manage\")") code.newline() code.write("if db_name:") code.newline() code.indent() code.write("meta = {'db_alias': db_name}") code.newline() code.dedent() code.newline() code.write("\"\"\"") code.newline() code.write("Hidden Fields") code.newline() code.write("\"\"\"") code.newline() code.write( "status = StringField(required=True, default='pending')") code.newline() code.write("project_id = UUIDField()") code.newline() # code.write("\n") code.write("\"\"\"") code.newline() code.write("Project Fields") code.newline() code.write("\"\"\"") code.newline() # config = file_config.get("cloudmesh", "project", "fields") for _item in config: for key in _item: field = key field_type = "" is_required = _item[key].get('required') if _item[key].get('type') in [ 'text', 'textarea', 'dropdown', 'password' ]: if _item[key].get('reference'): field_type = "ListField(ReferenceField('" + str( _item[key].get( 'reference')).title() + "'))" elif _item[key].get('options'): if len(_item[key].get('options')) > 1: # options = str(_item[key].get('options')) code.set_options( key, str(tuple(_item[key].get('options')))) field_type = "ListField(StringField(choices=" + key.upper( ) + "), required=True)" else: field_type = "StringField" if is_required: field_type += "(required=True)" else: field_type += "()" elif _item[key].get('type') in ['list']: if _item[key].get('reference'): field_type = "ListField(ReferenceField('" + str( _item[key].get( 'reference')).title() + "'))" elif _item[key].get('options'): if len(_item[key].get('options')) > 1: # options = str(_item[key].get('options')) code.set_options( key, str(tuple(_item[key].get('options')))) field_type = "ListField(StringField(choices=" + key.upper( ) + "), required=True)" else: field_type = "ListField" if is_required: field_type += "(required=True)" else: field_type += "()" elif _item[key].get('type') == 'checkbox': if len(_item[key].get('options')) > 1: if len(_item[key].get('options')) > 1: # options = str(_item[key].get('options')) code.set_options( key, str(tuple(_item[key].get('options')))) field_type = "ListField(StringField(choices=" + key.upper( ) + "), required=True)" else: field_type = "BooleanField(required=True)" elif _item[key].get('type') == 'email': field_type = "EmailField" if is_required: field_type += "(required=True)" elif _item[key].get('type') == 'url': field_type = "URLField" if is_required: field_type += "(required=True)" else: field_type += "()" line = field + " = " + field_type code.write(line) code.newline() # print code.end() if not code.write_to_file(code.end(), "base_" + class_type): Console.error("Error while trying to write to file.") pass except: Console.error("Error while reading file.") pass
def readClustersConfig(self, dir_yaml): clusters_config = ConfigDict(filename=dir_yaml + "/" + self.default_clusters_yaml) self.dict_clusters_config = clusters_config.get("cloudmesh.inventory") # get all possible cluster names from dict_clusters_config self.cluster_name_list += self.dict_clusters_config.keys()
class OpenPBS(object): # ################################################################# # INITIALIZATION # ################################################################# def __init__(self, deploy=True, yaml_filename="/cloudmesh_pbs.yaml"): """ Creates an object instance of communication with pbs batch queues running on multiple hosts. It also is used to create some configuration parameters if deploy is set to True. it creates in the CLOUDMESH deploy directory the directory pbs and puts the dababse file pbs.db and the file that is used to store the current job number. The job number is shared among all resources and supposed to be unique. :param deploy: If True, creates the configuration files :param yaml_filename: The cloudmesh pbs yaml file. Defaults to cloudmesh_pbs.yaml :return: an object instance of OpenPBS """ self.yaml_filename = config_file(yaml_filename) self.pbs_dir = config_file("/pbs") self.id_file = config_file("/pbs/id.txt") self.db_file = config_file("/pbs/pbs.db") if deploy: self.deploy() self.load() self.id = self.jobid self.pbs_nodes_data = None def info(self): """ Prints some elementary information about the configuration of the OpenPBS instance. :return: """ print("{:>20} = {:}".format("Config Dir", self.pbs_dir)) print("{:>20} = {:}".format("Job ID file", self.id_file)) print("{:>20} = {:}".format("Db file", self.db_file)) def load(self, yaml_filename=None): """ Loads the cloudmesh pbs yaml file. :param yaml_filename: The filename of the yaml file """ log.debug("PBS yaml filename: {0}".format(self.yaml_filename)) if yaml_filename is None: yaml_filename = self.yaml_filename else: self.yaml_filename = config_file(yaml_filename) self.data = ConfigDict(filename=self.yaml_filename) self.hosts = ssh_config() def deploy(self, force=True): """ Copies the yaml file from etc in the distribution to the .cloudmesh directory. If the file exits it will not be copied and a warning is thrown. If the file is the same as in etc no warning is thrown. :param force: Forgot what this does, please document. """ # setup ~/.cloudmesh/pbs log.debug(self.pbs_dir) if not os.path.isdir(self.pbs_dir): os.makedirs(self.pbs_dir) self._load_jobid() xcopy("../etc/", config_file(""), "*.yaml", force=force) # ################################################################# # JOB ID COUNTER # ################################################################# def _load_jobid(self): """ internal method that loads the job id from the job id file. :return: the string of the job id """ try: with open(self.id_file, "r") as f: content = f.read() self.id = content.strip() except: self.jobid = 0 return self.id def _write_jobid(self, id): """ Internal method that overwrites the job id to the specified id. :param id: the job id :return: the string of the id """ log.debug("CCC:" + self.id_file) if not os.path.isfile(self.id_file): open('file', 'w').close() with open(self.id_file, "w+") as text_file: text_file.write('%s' % id) return id @property def jobid(self): """ The job id :return: The string of the job id """ return self._load_jobid() @jobid.setter def jobid(self, value): """ sets the job id to the given value :param value: The value of the jobid """ self._write_jobid(value) def jobid_incr(self): """ increments the job id by 1 """ id = self.jobid id = int(id) + 1 self.jobid = id # ################### # GET DATA # ################### def __str__(self): """ Returns the json object of the dict as string NOTE: should probably use json.dumps :return: the string representation of teh job data """ return self.data.json() def servers(self): """ List of the servers as defined in the .ssh/config file as dict :return: the dict representing the servers """ return self.data["cloudmesh"]["pbs"].keys() def queues(self, server): """ List the queues dict of the given server :param server: :return: """ server = pbs.data["cloudmesh"]["pbs"][server] if "queues" in server: return server["queues"] else: return None # # QSTAT # @classmethod def qstat_xml_to_dict(cls, xmldata): """ Internal method that converst a qsta xml representation to a dict. :param xmldata: The xml data from qstat -x :return: a dict representation of the data """ info = {} try: xmldoc = minidom.parseString(xmldata) itemlist = xmldoc.getElementsByTagName('Job') for item in itemlist: job = {} for attribute in item.childNodes: if len(attribute.childNodes) == 1: job[attribute. nodeName] = attribute.firstChild.nodeValue else: job[attribute.nodeName] = {} for subchild in attribute.childNodes: job[attribute.nodeName][ subchild. nodeName] = subchild.firstChild.nodeValue if attribute.nodeName in ['Job_Owner']: (name, host) = job[attribute.nodeName].split('@') job[u'cm_user'] = name job[u'cm_host'] = host info[job['Job_Id']] = job except: pass try: xmldoc = minidom.parseString(xmldata) itemlist = xmldoc.getElementsByTagName('Job') for item in itemlist: job = {} for attribute in item.childNodes: if len(attribute.childNodes) == 1: job[attribute. nodeName] = attribute.firstChild.nodeValue else: job[attribute.nodeName] = {} for subchild in attribute.childNodes: job[attribute.nodeName][ subchild. nodeName] = subchild.firstChild.nodeValue if attribute.nodeName in ['Job_Owner']: (name, host) = job[attribute.nodeName].split('@') job[u'cm_user'] = name job[u'cm_host'] = host info[job['Job_Id']] = job except: pass return info def qstat(self, host, user=True, format='dict'): """ executes the qstat command on a particular host and returns the information as dict. :param host: The host as specified in ~/.ssh/config :param user: If True, only retirns information for the user If False, all jobs for all users are returned :param format: :return: """ data = None username = self.username(host) manager_host = self.manager(host) xml_data = Shell.ssh(manager_host, "qstat", "-x").rstrip() if format == 'dict': data = OpenPBS.qstat_xml_to_dict(xml_data) selected_data = {} for jobid in data: (owner, cm_host) = data[jobid]['Job_Owner'].split('@') if not user: selected_data[unicode(jobid)] = data[unicode(jobid)] elif owner == username: selected_data[unicode(jobid)] = data[unicode(jobid)] data = selected_data for jobid in data: data[unicode(jobid)][u"cm_jobid"] = jobid if "Variable_list" in data[unicode(jobid)]: data[unicode( jobid)][u"cm_Variable_list"] = self.variable_list( data, jobid) elif format == "xml": if user is not None: print("WARNING: " "restrictiong xml data for a user not supported.") data = xml_data return dict(data) def username(self, host): """ The username of the host as specified in ~/.ssh/conf :param host: The name of the host :return: The username """ return self.hosts.username(host) def manager(self, host): """ In some cases a queue of another machine is hosted through a management node different from that machine. The manager command allows us to specify on which machine the qstat command is issued. :param host: The name of the host :return: The name of the management host """ try: m = self.data.get("cloudmesh", "pbs", host, "manager") except: print("WARNING: Manager not defined for", host) print(" Using the host") m = host return m def database_filename(self): """ The name of the cloudmesh_job file :return: """ return path_expand( self.data.get("cloudmesh", "pbsdatabase", "filename")) def _write_to_file(self, script, filename): """ Internal function to write a pbs script to a file :param script: content of the script :param filename: filename """ with open(filename, "w") as text_file: text_file.write('%s' % script) def db_jobstatus(self, host, jobid, kind='dict'): """This method is not yet implemented""" return {} def jobstatus(self, host, jobid, kind='dict'): """ The status of a specific job :param host: The host on which the job is running :param jobid: The jobid as specified by the queing system :param kind: The output can be returned as dict, xml, and yaml :return: """ manager_host = self.manager(host) qstat_xml_data = Shell.ssh(manager_host, "qstat", "-x", jobid).rstrip() if kind == 'xml': r = qstat_xml_data else: r = self.qstat_xml_to_dict(qstat_xml_data) r[unicode(jobid)][u"cm_jobid"] = self.jobid r[unicode(jobid)]["cm_Variable_list"] = self.variable_list(r) if kind == 'yaml': r = yaml.dump(r, default_flow_style=False) return r @classmethod def list(cls, data, attributes): """ Internal function to lists the information in the data dict :param data: The data directory :param attributes: the attribute to return :return: the content found for he specified attribute """ content = {} for jobid in data: content[jobid] = {} for attribute in attributes: try: content[jobid][attribute] = data[jobid][attribute] except: content[jobid][attribute] = "None" return content def qsub(self, name, host, script, template=None, kind="dict"): """ Executes the qsub command on a given host. NOTE this method may not yet be fully implemented :param name: name of the script :param host: host on which the script is to be run :param script: The name of the script :param template: The script is wrapped into a template :param kind: The return is passed as dict, yaml, xml :return: """ self.jobid_incr() jobscript = self.create_script(name, script, template) # copy the script to the remote host self._write_to_file(jobscript, name) # copy script to remote host remote_path = self.data.get("cloudmesh", "pbs", host, "scripts") print(remote_path) xmkdir(host, remote_path) manager_host = self.manager(host) # call qsub on the remot host r = Shell.scp(name, manager_host + ":" + remote_path) jobid = Shell.ssh(manager_host, "qsub {0}/{1}".format(remote_path, name)).rstrip() return self.jobstatus(host, jobid, kind=kind) def getid(self, data): key = data.keys()[0] return key @classmethod def variable_list(cls, data, id=None): """ Internal function to list the variables of a qstat job which is 'Variable_List' :param data: The input data value :param id: The id of the job :return: """ if id is None: key = data.keys()[0] else: key = id var_list = data[key]['Variable_List'].split(',') d = {} for element in var_list: (attribute, value) = element.split('=') d[attribute] = value return d def create_script(self, name, script, template=None): # BUG if template is None: template_script = script data = {'script': script, 'name': name} result = template.format(**data) return result def read_script(self, filename, user=None, host='localhost'): if host in ['localhost'] and user is None: with file(filename) as f: content = f.read() else: # TODO: copy file from remote host print("ERROR: not implemented") pass return content def anodes(self, host, refresh=True): pass def nodes(self, host, refresh=True): """ returns the information from the command pbsnodes in a dict. :param host: the name of the host as specified in the .ssh/config file :param refresh: if False, reads returns a cached value if True, issues a new command and refreshes the cach :return: information of the pbsnodes command in a dict """ manager_host = self.manager(host) if self.pbs_nodes_data is None or refresh: try: result = Shell.ssh(manager_host, "pbsnodes", "-a") except: raise RuntimeError( "can not execute pbs nodes on host {0}".format( manager_host)) pbsinfo = {} nodes = result.split("\n\n") for node in nodes: pbs_data = node.split("\n") pbs_data = [e.strip() for e in pbs_data] name = pbs_data[0] if name != "": pbsinfo[name] = {u'name': name} for element in pbs_data[1:]: try: (attribute, value) = element.split(" = ") if attribute == 'status': status_elements = value.split(",") pbsinfo[name][attribute] = {} for e in status_elements: (a, v) = e.split("=") pbsinfo[name][attribute][a] = v elif attribute == 'jobs': pbsinfo[name][attribute] = value.split(',') elif attribute == 'note' and ( value.strip().startswith("{") or value.strip().startswith("[")): pbsinfo[name][attribute] = literal_eval(value) else: pbsinfo[name][attribute] = value except: pass self.pbs_nodes_data = pbsinfo return self.pbs_nodes_data def nodes_sum(self, host): sum = 0 distribution = self.nodes_distribution(host) for key in distribution: i = int(distribution[key]) sum = sum + i return sum def nodes_distribution(self, host): """prints the distribution of services""" manager_host = self.manager(host) def pbsnodes_data(manager_host): result = str(Shell.ssh(manager_host, "pbsnodes", "-l", "-n"))[:-1] return result empty = ["", "", ""] x = [x.split() for x in pbsnodes_data(manager_host).split("\n")] # Fill missing values r = [] for line in x: new = ["unkown", "unkown", "unkown"] for i in range(0, len(line)): try: new[i] = line[i] except: pass r.append(new) # just taking column 2 x = [x[2] for x in r] # print "GFKHFJH ", x cnt = Counter(x) # print "COUNT", result = dict(cnt) return result
def parse(cls, class_type=None): """ The method is used to parse the contents of the yaml file and generate the python code for the same. At the moment the method tries to read the file from ~/.cloudmesh. Depending on the class_type, it looks for the file "cloudmesh_user.yaml" or "cloudmesh_project.yaml" :param class_type: class_type can be either user or project. If a collection name is not specified, uses the name of the file as the collection name. :return: Writes the contents to a file under the etc directory. if an "etc" directory does not exist, it creates one. The file name would be either "base_user.py" or "base_project.py" depending on the class_type """ code = CodeGenerator() file_path = "~/.cloudmesh/{0}/cloudmesh_{1}.yaml".format("accounts", class_type) # file_path = "~/.cloudmesh/cloudmesh_" + class_type + ".yaml" try: filename = path_expand(file_path) file_config = ConfigDict(filename=filename) except: Console.error("Could not load file, please check filepath: {0}".format(file_path)) return try: if class_type == "user": code.set_headers("from cloudmesh_database.dbconn import get_mongo_dbname_from_collection") code.set_headers("from cloudmesh_management.cloudmeshobject import CloudmeshObject") code.set_headers("from mongoengine import *") code.newline() code.newline() code.write("class "+class_type.title()+"(CloudmeshObject):") code.newline() code.indent() code.write("db_name = get_mongo_dbname_from_collection(\"manage\")") code.newline() code.write("if db_name:") code.newline() code.indent() code.write("meta = {'db_alias': db_name}") code.newline() code.dedent() code.newline() code.write("\"\"\"") code.newline() code.write("Hidden Fields") code.newline() code.write("\"\"\"") code.newline() code.write("status = StringField(required=True, default='pending')") code.newline() code.write("userid = UUIDField()") code.newline() # code.write("\n") code.write("\"\"\"") code.newline() code.write("User Fields") code.newline() code.write("\"\"\"") code.newline() # config = file_config.get("cloudmesh", "user", "fields") for _item in config: for key in _item: field = key field_type = "" is_required = _item[key].get('required') if _item[key].get('type') in ['text', 'textarea', 'dropdown', 'password']: field_type = "StringField" if is_required: field_type += "(required=True)" else: field_type += "()" elif _item[key].get('type') == 'checkbox': if len(_item[key].get('options')) > 1: options = str(_item[key].get('options')) code.set_options(key, options) if is_required: field_type = "ListField(StringField(choices="+key.upper()+"), required=True)" else: field_type = "BooleanField(required=True)" elif _item[key].get('type') == 'email': field_type = "EmailField" if is_required: field_type += "(required=True)" line = field+" = "+field_type code.write(line) code.newline() code.write("projects = ListField(ReferenceField('Project'))") # print code.end() if not code.write_to_file(code.end(), "base_"+class_type): Console.error("Error while trying to write to file.") pass elif class_type == "project": code.set_headers("from cloudmesh_database.dbconn import get_mongo_dbname_from_collection") code.set_headers("from cloudmesh_management.cloudmeshobject import CloudmeshObject") code.set_headers("from mongoengine import *") code.newline() code.newline() code.write("class "+class_type.title()+"(CloudmeshObject):") code.newline() code.indent() code.write("db_name = get_mongo_dbname_from_collection(\"manage\")") code.newline() code.write("if db_name:") code.newline() code.indent() code.write("meta = {'db_alias': db_name}") code.newline() code.dedent() code.newline() code.write("\"\"\"") code.newline() code.write("Hidden Fields") code.newline() code.write("\"\"\"") code.newline() code.write("status = StringField(required=True, default='pending')") code.newline() code.write("project_id = UUIDField()") code.newline() # code.write("\n") code.write("\"\"\"") code.newline() code.write("Project Fields") code.newline() code.write("\"\"\"") code.newline() # config = file_config.get("cloudmesh", "project", "fields") for _item in config: for key in _item: field = key field_type = "" is_required = _item[key].get('required') if _item[key].get('type') in ['text', 'textarea', 'dropdown', 'password']: if _item[key].get('reference'): field_type = "ListField(ReferenceField('"+str(_item[key].get('reference')).title()+"'))" elif _item[key].get('options'): if len(_item[key].get('options')) > 1: # options = str(_item[key].get('options')) code.set_options(key, str(tuple(_item[key].get('options')))) field_type = "ListField(StringField(choices="+key.upper()+"), required=True)" else: field_type = "StringField" if is_required: field_type += "(required=True)" else: field_type += "()" elif _item[key].get('type') in ['list']: if _item[key].get('reference'): field_type = "ListField(ReferenceField('"+str(_item[key].get('reference')).title()+"'))" elif _item[key].get('options'): if len(_item[key].get('options')) > 1: # options = str(_item[key].get('options')) code.set_options(key, str(tuple(_item[key].get('options')))) field_type = "ListField(StringField(choices="+key.upper()+"), required=True)" else: field_type = "ListField" if is_required: field_type += "(required=True)" else: field_type += "()" elif _item[key].get('type') == 'checkbox': if len(_item[key].get('options')) > 1: if len(_item[key].get('options')) > 1: # options = str(_item[key].get('options')) code.set_options(key, str(tuple(_item[key].get('options')))) field_type = "ListField(StringField(choices="+key.upper()+"), required=True)" else: field_type = "BooleanField(required=True)" elif _item[key].get('type') == 'email': field_type = "EmailField" if is_required: field_type += "(required=True)" elif _item[key].get('type') == 'url': field_type = "URLField" if is_required: field_type += "(required=True)" else: field_type += "()" line = field+" = "+field_type code.write(line) code.newline() # print code.end() if not code.write_to_file(code.end(), "base_"+class_type): Console.error("Error while trying to write to file.") pass except: Console.error("Error while reading file.") pass