def list_quotas(cls, cloud, format): Quota.set_os_environment(cloud) result = Shell.execute("nova", "quota-show") d = Quota.convert_to_dict(result) return dict_printer(d, order=['Quota', 'Limit'], output=format)
def list(cls, cloud, start=None, end=None, tenant=None, format="table"): # set the environment variables set_os_environ(cloud) try: # execute the command args = ["usage"] if start is not None: args.extend(["--start", start]) if end is not None: args.extend(["--end", end]) if tenant is not None: args.extend(["--tenant", tenant]) result = Shell.execute("nova", args) result = Nova.remove_subjectAltName_warning(result) lines = result.splitlines() dates = lines[0] # TODO: as stated below, nova returns additional lines, # on my pc, SecurityWarning is returned, so filtering.. for l in lines[1:]: if l.__contains__("SecurityWarning"): lines.remove(l) table = '\n'.join(lines[1:]) dates = dates.replace("Usage from ", "").replace("to", "").replace(" +", " ")[:-1].split() # # TODO: for some reason the nova command has returned not the # first + char, so we could not ignore the line we may set - as # additional comment char, but that did not work # d = TableParser.convert(table, comment_chars="+#") # d["0"]["start"] = "start" # d["0"]["end"] = "end" d["0"]["start"] = dates[0] d["0"]["end"] = dates[1] # del d['0'] return dict_printer(d, order=["start", "end", "servers", "cpu hours", "ram mb-hours", "disk gb-hours"], output=format) except Exception, e: return e
def find_tunnel(): r = Shell.execute("ps", ["-ax"]).split("\n") pid = None info = None for line in r: if ("localhost" in line and "nucleus" in line) or ("comet" in line and "tunnel" in line) and not 'status' in line: info = line.strip() break if info: pid = int(info.split(" ", 1)[0]) return pid
def list_limits(cls, cloud, format, tenant=" "): # set the environment variables Quota.set_os_environment(cloud) # execute the command args = ["limits", "--tenant", tenant] result = Shell.execute("nova", args) # print results in a format if "ERROR" in result: return result else: d = Limits.convert_to_dict(result) return dict_printer(d, order=["Name", "Used", "Max"], output=format)
def delete(cls, name=None, cloud="juno"): """ Method to delete a group from the cloudmesh database :param name: :param cloud: :return: """ try: # group = cls.get(name=name, cloud=cloud) args = {} if name is not None: args["name"] = name if cloud is not None: args["cloud"] = cloud group = cls.cm.find("group", output="object", **args).first() if group: # Delete VM from cloud before deleting group vm_ids = group.value.split(",") for vm_id in vm_ids: try: # Submit request to delete VM args = ["delete", vm_id] # TODO: this is a bug as we should use VM class result = Shell.execute("nova", args) print(Nova.remove_subjectAltName_warning(result)) except Exception as e: Console.error("Failed to delete VM {}, error: {}" .format(vm_id, e)) continue # Delete group record in local db cls.cm.delete(group) return "Delete Success" else: return None except Exception as ex: Console.error(ex.message, ex)
def vm_destroy(*args): a = ['destroy'] + args return Shell.execute('vagrant', a)
def vm_halt(*args): a = ['halt'] + args return Shell.execute('vagrant', a)
def vm_suspend(*args): a = ['suspend'] + args return Shell.execute('vagrant', a)
def vbox_list(*args): a = ["list", "vms", "-l"] + args return Shell.execute('VBoxManage', a)
def vbox_controlvm(*args): a = ["controlvm"] + args return Shell.execute('VBoxManage', a)
def do_nova(self, args, arguments): """ :: Usage: nova set CLOUD nova info [CLOUD] [--password] nova help nova [--group=GROUP] ARGUMENTS... A simple wrapper for the openstack nova command Arguments: GROUP The group to add vms to ARGUMENTS The arguments passed to nova help Prints the nova manual set reads the information from the current cloud and updates the environment variables if the cloud is an openstack cloud info the environment values for OS Options: --group=GROUP Add VM to GROUP group --password Prints the password -v verbose mode """ # pprint(arguments) cloud = arguments['CLOUD'] or Default.get_cloud() if not cloud: Console.error("Default cloud not set!") return "" group = arguments["--group"] or Default.get("group", cloud=cloud) if not group: Console.error("Default group not set!") return "" if arguments["help"]: os.system("nova help") return "" elif arguments["info"]: set_os_environ(cloud) d = {} # # TODO: this naturally does not work as clouds will have # different parameters. ALos it does not unset previous # parameters from other clouds. See register # for attribute in ['OS_USERNAME', 'OS_TENANT_NAME', 'OS_AUTH_URL', 'OS_CACERT', 'OS_PASSWORD', 'OS_REGION']: try: d[attribute] = os.environ[attribute] except: Console.warning("OS environment variable {:} not found" .format(attribute)) d[attribute] = None if not arguments["--password"]: d['OS_PASSWORD'] = "******" print(row_table(d, order=None, labels=["Variable", "Value"])) msg = "info. OK." Console.ok(msg) return "" elif arguments["set"]: if cloud: set_os_environ(cloud) msg = "{0} is set".format(cloud) Console.ok(msg) else: Console.error("CLOUD is required") else: # nova ARGUMENTS... print("Cloud = {0}".format(cloud)) try: set_os_environ(cloud) args = arguments["ARGUMENTS"] # arguments may contain multiple optional arguments if len(args) == 1: args = args[0].split() result = Shell.execute("nova", args) print(Nova.remove_subjectAltName_warning(result)) """ If request for nova boot, add the vm to group specified, or else add to default group """ if "boot" in args: # Logic to find ID of VM in the result fields = [] for field in result.split("|"): fields.append(field.strip()) index = fields.index('id') + 1 vm_id = fields[index] # Add to group Group.add(name=group, type="vm", id=vm_id, cloud=cloud) except Exception, ex: Console.error("Error executing Nova command: {}".format(ex)) return ""
def vm_init(*args): a = ['init'] + args return Shell.execute('vagrant', a)
def vbox_startcm(*args): a = ["startvm"] + args return Shell.execute('VBoxManage', a)
def vm_up(*args): a = ['up'] + args return Shell.execute('vagrant', a)
def vm_up(*args): a = ['up', "--provider", provider] + args return Shell.execute('vagrant', a)
def vm_ssh(*args): a = ['ssh'] + args return Shell.execute('vagrant', a)
def run(cls, cluster, group, cmd, **kwargs): # determine the script name.. # # TODO: script count is variable in data base, we test if fil exists and if it # does increase counter till we find one that does not, that will be new counter. # new counter will than be placed in db. # # define get_script_name(directory, prefix, counter) # there maybe s a similar thing already in the old cloudmesh # # if not kwargs['-name']: # # old_count = Shell.ssh(cluster, # "ls {}*.sh | wc -l | sed 's/$/ count/'". # format(username)) # c = [f for f in old_count.splitlines() if 'count' in f] # script_count = c[0].split()[0] # else: # script_count = kwargs['-name'] config = cls.read_config(cluster) if config["credentials"]["username"] == 'TBD': return "Please enter username in cloudmesh.yaml for cluster {}".format(cluster) cls.incr() data = { "cluster": cluster, "count": cls.counter(), "username": config["credentials"]["username"], "remote_experiment_dir": config["default"]["experiment_dir"], "queue": config["default"]["queue"], "id": None, "nodes": 1, "tasks_per_node": 1, } data["script_base_name"] = "{username}-{count}".format(**data) data["script_name"] = "{username}-{count}.sh".format(**data) data["script_output"] = "{username}-{count}.out".format(**data) data["script_error"] = "{username}-{count}.err".format(**data) data["remote_experiment_dir"] = \ "{remote_experiment_dir}/{count}".format(**data).format(**data) data["group"] = group # overwrite defaults option_mapping = {'-t': '{tasks_per_node}'.format(**data), '-N': '{nodes}'.format(**data), '-p': '{queue}'.format(**data), '-o': '{script_output}'.format(**data), '-D': '{remote_experiment_dir}'.format(**data), '-e': '{script_error}'.format(**data)} map(lambda (k, v): option_mapping.__setitem__(k, kwargs.get(k) or v), option_mapping.iteritems()) config = cls.read_config(cluster) project = None try: project = config["credentials"]["project"] if project.lower() not in ["tbd", "none"]: option_mapping["-A"] = project except: pass for key in option_mapping: data[key] = option_mapping[key] # create the options for the script options = "" for key, value in option_mapping.iteritems(): options += '#SBATCH {} {}\n'.format(key, value) cls.create_remote_dir(cluster, data["remote_experiment_dir"]) # if the command is a script, copy the script if os.path.isfile(Config.path_expand(cmd)): _from = Config.path_expand(cmd) _to = '{cluster}:{remote_experiment_dir}'.format(**data) local_file_name = cmd.split('/')[-1] Shell.execute("rsync", [_from, _to]) data["command"] = '{remote_experiment_dir}/{local_file_name}'.format(local_file_name=local_file_name, **data) else: data["command"] = cmd data["options"] = options script = textwrap.dedent( """ #! /bin/sh {options} echo '#CLOUDMESH: BATCH ENVIRONMENT' echo 'BASIL_RESERVATION_ID:' $BASIL_RESERVATION_ID echo 'SLURM_CPU_BIND:' $SLURM_CPU_BIND echo 'SLURM_JOB_ID:' $SLURM_JOB_ID echo 'SLURM_JOB_CPUS_PER_NODE:' $SLURM_JOB_CPUS_PER_NODE echo 'SLURM_JOB_DEPENDENCY:' $SLURM_JOB_DEPENDENCY echo 'SLURM_JOB_NAME:' $SLURM_JOB_NAME echo 'SLURM_JOB_NODELIST:' $SLURM_JOB_NODELIST echo 'SLURM_JOB_NUM_NODES:' $SLURM_JOB_NUM_NODES echo 'SLURM_MEM_BIND:' $SLURM_MEM_BIND echo 'SLURM_TASKS_PER_NODE:' $SLURM_TASKS_PER_NODE echo 'MPIRUN_NOALLOCATE:' $MPIRUN_NOALLOCATE echo 'MPIRUN_NOFREE:' $MPIRUN_NOFREE echo 'SLURM_NTASKS_PER_CORE:' $SLURM_NTASKS_PER_CORE echo 'SLURM_NTASKS_PER_NODE:' $SLURM_NTASKS_PER_NODE echo 'SLURM_NTASKS_PER_SOCKET:' $SLURM_NTASKS_PER_SOCKET echo 'SLURM_RESTART_COUNT:' $SLURM_RESTART_COUNT echo 'SLURM_SUBMIT_DIR:' $SLURM_SUBMIT_DIR echo 'MPIRUN_PARTITION:' $MPIRUN_PARTITION d=$(date) echo \"#CLOUDMESH: status, start, $d\" srun -l echo \"#CLOUDMESH: status, start, $d\" srun -l {command} d=$(date) srun -l echo \"#CLOUDMESH: status, finished, $d\" d=$(date) echo \"#CLOUDMESH: status, finished, $d\" """ ).format(**data).replace("\r\n", "\n").strip() _from = Config.path_expand('~/.cloudmesh/{script_name}'.format(**data)) _to = '{cluster}:{remote_experiment_dir}'.format(**data) data["from"] = _from data["to"] = _to data["script"] = script # write the script to local # print(_from) # print(_to) with open(_from, 'w') as local_file: local_file.write(script) # copy to remote host Shell.scp(_from, _to) # delete local file # Shell.execute('rm', _from) # import sys; sys.exit() # run the sbatch command cmd = 'sbatch {remote_experiment_dir}/{script_name}'.format(**data) data["cmd"] = cmd # print ("CMD>", cmd) result = Shell.ssh(cluster, cmd) data["output"] = result # find id for line in result.split("\n"): # print ("LLL>", line) if "Submitted batch job" in line: data["job_id"] = int(line.replace("Submitted batch job ", "").strip()) break # # HACK, should not depend on Model.py # # from cloudmesh_client.db.model import BATCHJOB # name = "" # BATCHJOB(name, # cluster=data["cluster"], # id=data["id"], # script=data["script"]) # has user and username which seems wrong # here what we have in data and want to store the - options are obviously wrong # and need to be full names # noinspection PyPep8,PyPep8 """ {'-D': '/N/u/gvonlasz/experiment/3', '-N': '1', '-o': 'gvonlasz-3.out', '-p': 'delta', '-t': '1', 'cluster': 'india', 'cmd': 'sbatch /N/u/gvonlasz/experiment/3/gvonlasz-3.sh', 'command': 'uname', 'count': 3, 'from': '/Users/big/.cloudmesh/gvonlasz-3.sh', 'id': 1346, 'options': '#SBATCH -t 1\n#SBATCH -o gvonlasz-3.out\n#SBATCH -N 1\n#SBATCH -p delta\n#SBATCH -D /N/u/gvonlasz/experiment/3\n', 'output': 'Submitted batch job 1346', 'queue': 'delta', 'remote_experiment_dir': '/N/u/gvonlasz/experiment/3', 'script': "#! /bin/sh\n#SBATCH -t 1\n#SBATCH -o gvonlasz-3.out\n#SBATCH -N 1\n#SBATCH -p delta\n#SBATCH -D /N/u/gvonlasz/experiment/3\n\nsrun -l echo '#CLOUDMESH: Starting'\nsrun -l uname\nsrun -l echo '#CLOUDMESH: Test ok'", 'script_base_name': 'gvonlasz-3', 'script_name': 'gvonlasz-3.sh', 'script_output': 'gvonlasz-3.out', 'to': 'india:/N/u/gvonlasz/experiment/3', 'username': '******'} """ """ we also want to store what part of the .out file, BASIL_RESERVATION_ID: SLURM_CPU_BIND: SLURM_JOB_ID: 1351 SLURM_JOB_CPUS_PER_NODE: 12 SLURM_JOB_DEPENDENCY: SLURM_JOB_NAME: gvonlasz-8.sh SLURM_JOB_NODELIST: d001 SLURM_JOB_NUM_NODES: 1 SLURM_MEM_BIND: SLURM_TASKS_PER_NODE: 12 MPIRUN_NOALLOCATE: MPIRUN_NOFREE: SLURM_NTASKS_PER_CORE: SLURM_NTASKS_PER_NODE: SLURM_NTASKS_PER_SOCKET: SLURM_RESTART_COUNT: SLURM_SUBMIT_DIR: /N/u/gvonlasz MPIRUN_PARTITION: so maybe we want to use some of the names here as they reflect the env vars """ # # add data to database # # remove the - options for key in ['-t', '-N', '-p', '-o', '-D', '-e']: if key in data: print (key, data[key]) del data[key] data['status'] = 'started' cls.add_db(**data) return data
def vbox_vminfo(*args): a = ["showvminfo"] + args return Shell.execute('VBoxManage', a)
def run(command): parameter = command.split(" ") shell_command = parameter[0] args = parameter[1:] result = Shell.execute(shell_command, args) return result