def comet_ll(request): # noinspection PyUnusedLocal c = comet_logon(request) data = json.loads(Cluster.simple_list(format="json")) pprint(data) # data["terminal"] = Parameter.expand(data.keys()) for entry in data: nodes = Parameter.expand(data[entry]["computes"]) nodes_linked = ["<a href=\"console/{}/{}\">{}</a>".format(data[entry]['name'], node, node) for node in nodes] data[entry]["terminal"] = '<br>'.join(nodes_linked) # pprint(type(data), data) order = [ "name", "project", "nodes", "computes", "terminal", "frontend name", "frontend state", "frontend type", "description", ] header = [ "Name", "Project", "Count", "Nodes", "Terminal", "Frontend (Fe)", "State (Fe)", "Type (Fe)", "Description", ] return comet_dict_table(request, title="Comet List", data=data, header=header, order=order)
def get_layout(id, user): if id is None: myjobs = ids(user) else: myjobs = [id] if len(myjobs) > 1: print(jobs, len(myjobs)) Console.error("More than one swarm cluster running, please specify ID") n = nodes(user, myjobs[0])[0] all_nodes = Parameter.expand(n)
def info(self, what=None, kind=None): """ prints information about the database """ count_result = {} if kind is None: kinds = tablenames() else: kinds = Parameter.expand(kind) if what is None: infos = "table,count" else: infos = Parameter.expand(what) banner("Databse table information", c="-") inspector = inspect(self.db.engine) if "table" in infos: for table_name in inspector.get_table_names(): if table_name in kinds: print(table_name + ":") for column in inspector.get_columns(table_name): print(" ", column['name'], column['type']) counter = 0 if "count" in infos: for table_name in inspector.get_table_names(): if table_name in kinds: t = table(table_name) rows = self.session.query(t).count() count_result[table_name] = rows print("Count {:}: {:}".format(table_name, rows)) counter = counter + rows count_result['sum'] = counter return count_result
def _get_vm_names(): vm_list = cm.find(kind="vm") vms = [vm["name"] for vm in vm_list] names = pattern = arguments["NAMES"] if pattern is not None: if "*" in pattern: names = search(vms, pattern) else: names = Parameter.expand(names) if names == ['last'] or names is None: names == [Default.vm] return vm_list, names
def comet_ll(request): # noinspection PyUnusedLocal c = comet_logon(request) data = json.loads(Cluster.simple_list(format="json")) pprint(data) # data["terminal"] = Parameter.expand(data.keys()) for entry in data: nodes = Parameter.expand(data[entry]["computes"]) nodes_linked = [ "<a href=\"console/{}/{}\">{}</a>".format(data[entry]['name'], node, node) for node in nodes ] data[entry]["terminal"] = '<br>'.join(nodes_linked) # pprint(type(data), data) order = [ "name", "project", "nodes", "computes", "terminal", "frontend name", "frontend state", "frontend type", "description", ] header = [ "Name", "Project", "Count", "Nodes", "Terminal", "Frontend (Fe)", "State (Fe)", "Type (Fe)", "Description", ] return comet_dict_table(request, title="Comet List", data=data, header=header, order=order)
def do_vm(self, args, arguments): """ :: Usage: vm default [--cloud=CLOUD][--format=FORMAT] vm refresh [all][--cloud=CLOUD] vm boot [--name=NAME] [--cloud=CLOUD] [--username=USERNAME] [--image=IMAGE] [--flavor=FLAVOR] [--group=GROUP] [--public] [--secgroup=SECGROUP] [--key=KEY] [--dryrun] vm boot [--n=COUNT] [--cloud=CLOUD] [--username=USERNAME] [--image=IMAGE] [--flavor=FLAVOR] [--group=GROUP] [--public] [--secgroup=SECGROUP] [--key=KEY] [--dryrun] vm ping [NAME] [N] vm console [NAME] [--group=GROUP] [--cloud=CLOUD] [--force] vm start [NAMES] [--group=GROUP] [--cloud=CLOUD] [--force] vm stop [NAMES] [--group=GROUP] [--cloud=CLOUD] [--force] vm terminate [NAMES] [--group=GROUP] [--cloud=CLOUD] [--force] vm delete [NAMES] [--group=GROUP] [--cloud=CLOUD] [--keep] [--dryrun] vm ip assign [NAMES] [--cloud=CLOUD] vm ip show [NAMES] [--group=GROUP] [--cloud=CLOUD] [--format=FORMAT] [--refresh] vm ip inventory [NAMES] [--header=HEADER] [--file=FILE] vm ssh [NAME] [--username=USER] [--quiet] [--ip=IP] [--cloud=CLOUD] [--key=KEY] [--command=COMMAND] vm rename [OLDNAMES] [NEWNAMES] [--force] [--dryrun] vm list [NAMES] [--cloud=CLOUDS|--active] [--group=GROUP] [--format=FORMAT] [--refresh] vm status [NAMES] vm wait [--cloud=CLOUD] [--interval=SECONDS] vm info [--cloud=CLOUD] [--format=FORMAT] vm check NAME vm username USERNAME [NAMES] [--cloud=CLOUD] Arguments: COMMAND positional arguments, the commands you want to execute on the server(e.g. ls -a) separated by ';', you will get a return of executing result instead of login to the server, note that type in -- is suggested before you input the commands NAME server name. By default it is set to the name of last vm from database. NAMES server name. By default it is set to the name of last vm from database. KEYPAIR_NAME Name of the openstack keypair to be used to create VM. Note this is not a path to key. NEWNAMES New names of the VM while renaming. OLDNAMES Old names of the VM while renaming. Options: --username=USERNAME the username to login into the vm. If not specified it will be guessed from the image name and the cloud --ip=IP give the public ip of the server --cloud=CLOUD give a cloud to work on, if not given, selected or default cloud will be used --count=COUNT give the number of servers to start --detail for table print format, a brief version is used as default, use this flag to print detailed table --flavor=FLAVOR give the name or id of the flavor --group=GROUP give the group name of server --secgroup=SECGROUP security group name for the server --image=IMAGE give the name or id of the image --key=KEY specify a key to use, input a string which is the full path to the private key file --keypair_name=KEYPAIR_NAME Name of the openstack keypair to be used to create VM. Note this is not a path to key. --user=USER give the user name of the server that you want to use to login --name=NAME give the name of the virtual machine --force rename/ delete vms without user's confirmation --command=COMMAND specify the commands to be executed Description: commands used to boot, start or delete servers of a cloud vm default [options...] Displays default parameters that are set for vm boot either on the default cloud or the specified cloud. vm boot [options...] Boots servers on a cloud, user may specify flavor, image .etc, otherwise default values will be used, see how to set default values of a cloud: cloud help vm start [options...] Starts a suspended or stopped vm instance. vm stop [options...] Stops a vm instance . vm delete [options...] Delete servers of a cloud, user may delete a server by its name or id, delete servers of a group or servers of a cloud, give prefix and/or range to find servers by their names. Or user may specify more options to narrow the search vm floating_ip_assign [options...] assign a public ip to a VM of a cloud vm ip show [options...] show the ips of VMs vm ssh [options...] login to a server or execute commands on it vm list [options...] same as command "list vm", please refer to it vm status [options...] Retrieves status of last VM booted on cloud and displays it. Tip: give the VM name, but in a hostlist style, which is very convenient when you need a range of VMs e.g. sample[1-3] => ['sample1', 'sample2', 'sample3'] sample[1-3,18] => ['sample1', 'sample2', 'sample3', 'sample18'] Quoting commands: cm vm login gvonlasz-004 --command=\"uname -a\" """ """ # terminate # issues a termination to the cloud, keeps vm in database # delete # issues a terminate if not already done # (remember you do not have to go to cloud if state is already terminated) # deletes the vm from database # # bulk rename rename abc[0-1] def[3-4] renames the abc0,abc1 -> def3,def4 if arguments["rename"]: oldnames = Parameter.expand(arguments["OLDNAME"]) newnames = Parameter.expand(arguments["NEWNAME"]) # check if new names ar not already taken # to be implemented if len(oldnames) == len(newnames): for i in range(0, len(oldnames)): oldname = oldnames[i] newname = newnames[i] if newname is None or newname == '': print("New node name cannot be empty") else: print(Cluster.rename_node(clusterid, oldname, newname)) """ cm = CloudmeshDatabase() def _print_dict(d, header=None, output='table'): return Printer.write(d, order=["id", "name", "status"], output=output, sort_keys=True) def _print_dict_ip(d, header=None, output='table'): return Printer.write(d, order=["network", "version", "addr"], output=output, sort_keys=True) def get_vm_name(name=None, offset=0, fill=3): if name is None: count = Default.get_counter(name='name') + offset prefix = Default.user if prefix is None or count is None: Console.error("Prefix and Count could not be retrieved correctly.", traceflag=False) return name = prefix + "-" + str(count).zfill(fill) return name def _refresh_cloud(cloud): try: msg = "Refresh VMs for cloud {:}.".format(cloud) if Vm.refresh(cloud=cloud): Console.ok("{:} OK.".format(msg)) else: Console.error("{:} failed".format(msg), traceflag=False) except Exception as e: Console.error("Problem running VM refresh", traceflag=False) def _get_vm_names(): vm_list = cm.find(kind="vm") vms = [vm["name"] for vm in vm_list] names = pattern = arguments["NAMES"] if pattern is not None: if "*" in pattern: names = search(vms, pattern) else: names = Parameter.expand(names) if names == ['last'] or names is None: names == [Default.vm] return vm_list, names cloud = arguments["--cloud"] or Default.cloud config = ConfigDict("cloudmesh.yaml") active_clouds = config["cloudmesh"]["active"] def _refresh(cloud): all = arguments["all"] or None if all is None: _refresh_cloud(cloud) else: for cloud in active_clouds: _refresh_cloud(cloud) arg = dotdict(arguments) arg.cloud = arguments["--cloud"] or Default.cloud arg.image = arguments["--image"] or Default.get(name="image", category=arg.cloud) arg.flavor = arguments["--flavor"] or Default.get(name="flavor", category=arg.cloud) arg.group = arguments["--group"] or Default.group arg.secgroup = arguments["--secgroup"] or Default.secgroup arg.key = arguments["--key"] or Default.key arg.dryrun = arguments["--dryrun"] arg.name = arguments["--name"] arg.format = arguments["--format"] or 'table' arg.refresh = Default.refresh or arguments["--refresh"] arg.count = int(arguments["--n"] or 1) arg.dryrun = arguments["--dryrun"] arg.verbose = not arguments["--quiet"] # # in many cases use NAMES # if arg.NAMES is not None: # arg.names = Parameter.expand(arg.NAMES) # gvonlasz[001-002] gives ["gvonlasz-001", "gvonlasz-002"] # else: # arg.names = None # if arguments["boot"]: arg.username = arguments["--username"] or Image.guess_username(arg.image) is_name_provided = arg.name is not None arg.user = Default.user for index in range(0, arg.count): vm_details = dotdict({ "cloud": arg.cloud, "name": get_vm_name(arg.name, index), "image": arg.image, "flavor": arg.flavor, "key": arg.key, "secgroup": arg.secgroup, "group": arg.group, "username": arg.username, "user": arg.user }) # correct the username vm_details.username = Image.guess_username_from_category( vm_details.cloud, vm_details.image, username=arg.username) try: if arg.dryrun: print(Printer.attribute(vm_details, output=arg.format)) msg = "dryrun info. OK." Console.ok(msg) else: vm_id = Vm.boot(**vm_details) if vm_id is None: msg = "info. failed." Console.error(msg, traceflag=False) return "" # set name and counter in defaults Default.set_vm(value=vm_details.name) if is_name_provided is False: Default.incr_counter("name") # Add to group if vm_id is not None: Group.add(name=vm_details.group, species="vm", member=vm_details.name, category=vm_details.cloud) msg = "info. OK." Console.ok(msg) except Exception as e: Console.error("Problem booting instance {name}".format(**vm_details), traceflag=False) elif arguments["username"]: arg.username = arguments["--username"] or Image.guess_username(arg.image) cloud = arg.cloud username = arg.USERNAME if arg.NAMES is None: names = [Default.vm] else: names = Parameter.expand(arg.NAMES) if len(names) == 0: return for name in names: arg.name = name Console.ok("Set username for {cloud}:{name} to {USERNAME}".format(**arg)) Vm.set_login_user(name=name, cloud=cloud, username=username) elif arguments["default"]: try: count = Default.get_counter() prefix = Username() if prefix is None or count is None: Console.error("Prefix and Count could not be retrieved correctly.", traceflag=False) return vm_name = prefix + "-" + str(count).zfill(3) arg = { "name": vm_name, "cloud": arguments["--cloud"] or Default.cloud } for attribute in ["image", "flavor"]: arg[attribute] = Default.get(name=attribute, category=cloud) for attribute in ["key", "group", "secgroup"]: arg[attribute] = Default.get(name=attribute, category='general') output = arguments["--format"] or "table" print(Printer.attribute(arg, output=output)) msg = "info. OK." Console.ok(msg) ValueError("default command not implemented properly. Upon " "first install the defaults should be read from yaml.") except Exception as e: # Error.traceback(e) Console.error("Problem listing defaults", traceflag=False) elif arguments["ping"]: try: if arguments["NAME"] is None and arguments["N"] is None: name = arguments["NAME"] or Default.vm n = arguments["N"] or 1 elif arguments["NAME"].isdigit(): n = arguments["NAME"] name = Default.vm else: name = arguments["NAME"] or Default.vm n = arguments["N"] or 1 print("Ping:", name, str(n)) vm = dotdict(Vm.list(name=name, category=cloud, output="dict")["dict"]) ip = vm.floating_ip result = Shell.ping(host=ip, count=n) print(result) except Exception as e: Console.error(e.message, traceflag=False) elif arguments["console"]: try: name = arguments["NAME"] or Default.vm vm = dotdict(Vm.list(name=name, category=cloud, output="dict")["dict"]) cloud_provider = CloudProvider(cloud).provider vm_list = cloud_provider.list_console(vm.uuid) print(vm_list) msg = "info. OK." Console.ok(msg) except Exception as e: # Error.traceback(e) Console.error("Problem retrieving status of the VM", traceflag=False) elif arguments["status"]: try: cloud_provider = CloudProvider(cloud).provider vm_list = cloud_provider.list_vm(cloud) vms = [vm_list[i]["name"] for i in vm_list ] print ("V", vms) pattern = arguments["NAMES"] if pattern is not None: if "*" in pattern: print ("serach") names = search(vms, pattern) else: names = Parameter.expand() for i in vm_list: if vm_list[i]["name"] in names: print("{} {}".format(vm_list[i]["status"], vm_list[i]["name"])) else: print("{} {}".format(vm_list[0]["status"], vm_list[0]["name"])) except Exception as e: # Error.traceback(e) Console.error("Problem retrieving status of the VM", traceflag=True) elif arguments["wait"]: interval = arguments["--interval"] or 5 try: cloud_provider = CloudProvider(cloud).provider for i in range(1,10): vm_list = cloud_provider.list_vm(cloud) time.sleep(float(1)) d = {} for id in vm_list: vm = vm_list[id] d[vm["name"]] = vm["status"] print (d) print("{} {}".format(vm_list[0]["status"], vm_list[0]["name"])) if vm_list[0]["status"] in ['ACTIVE']: return except Exception as e: # Error.traceback(e) Console.error("Problem retrieving status of the VM", traceflag=True) elif arguments["info"]: try: cloud_provider = CloudProvider(cloud).provider vms = cloud_provider.list_vm(cloud) vm = vms[0] output_format = arguments["--format"] or "table" print(Printer.attribute(vm, output=output_format)) msg = "info. OK." Console.ok(msg) except Exception as e: # Error.traceback(e) Console.error("Problem retrieving status of the VM", traceflag=False) elif arguments["check"]: test = {} try: names = Parameter.expand(arguments["NAME"]) id = 0 for name in names: print("Not implemented: {}".format(name)) # TODO: check the status of the vms status = "active" # TODO: check if they have a floating ip # TODO: get ip floating_ip = "127.0.0.1" ip = True # ping # TODO: ping the machine with the shell command ping = True # check if one can login and run a command check = False try: r = Shell.execute("uname", "-a") # do a real check check = True except: check = False test[name] = { "id": id, "name": name, "status": status, "ip": ip, "ping": ping, "login": check } id += 1 pprint(test) print(Printer.write(test, order=["id", "name", "status", "ip", "ping", "login"], output="table", sort_keys=True)) msg = "not yet implemented. failed." Console.error(msg, traceflag=False) except Exception as e: # Error.traceback(e) Console.error("Problem retrieving status of the VM", traceflag=False) elif arguments["start"]: try: servers = Parameter.expand(arguments["NAMES"]) # If names not provided, take the last vm from DB. if len(servers) == 0: last_vm = Default.vm if last_vm is None: Console.error("No VM records in database. Please run vm refresh.", traceflag=False) return "" name = last_vm["name"] # print(name) servers = list() servers.append(name) group = arguments["--group"] force = arguments["--force"] # if default cloud not set, return error if not cloud: Console.error("Default cloud not set.", traceflag=False) return "" Vm.start(cloud=cloud, servers=servers) msg = "info. OK." Console.ok(msg) except Exception as e: # Error.traceback(e) Console.error("Problem starting instances", traceflag=False) elif arguments["stop"]: try: servers = Parameter.expand(arguments["NAMES"]) # If names not provided, take the last vm from DB. if servers is None or len(servers) == 0: last_vm = Default.vm if last_vm is None: Console.error("No VM records in database. Please run vm refresh.", traceflag=False) return "" name = last_vm["name"] # print(name) servers = list() servers.append(name) group = arguments["--group"] force = arguments["--force"] # if default cloud not set, return error if not cloud: Console.error("Default cloud not set.", traceflag=False) return "" Vm.stop(cloud=cloud, servers=servers) msg = "info. OK." Console.ok(msg) except Exception as e: # Error.traceback(e) Console.error("Problem stopping instances", traceflag=False) elif arguments["refresh"]: _refresh(cloud) elif arguments["delete"]: dryrun = arguments["--dryrun"] group = arguments["--group"] force = not arguments["--keep"] cloud = arguments["--cloud"] vms, servers = _get_vm_names() if servers in [None, []]: Console.error("No vms found.", traceflag=False) return "" for server in servers: if dryrun: Console.ok("Dryrun: delete {}".format(server)) else: Vm.delete(servers=[server], force=force) return "" elif arguments["ip"] and arguments["assign"]: if arguments["NAMES"] is None: names = [Default.vm] else: names = Parameter.expand(arguments["NAMES"]) for name in names: # ip = Network.get_floatingip(....) vm = dotdict(Vm.list(name=name, category=cloud, output="dict")["dict"]) if vm.floating_ip is None: Console.ok("Assign IP to {}".format(name)) try: floating_ip = Network.find_assign_floating_ip(cloudname=cloud, instance_id=name) Vm.refresh(cloud=cloud) if floating_ip is not None: print( "Floating IP assigned to {:} is {:}".format( name, floating_ip)) msg = "info. OK." Console.ok(msg) except Exception as e: Console.error("Problem assigning floating ips.", traceflag=False) else: Console.error("VM {} already has a floating ip: {}".format(name, vm.floating_ip), traceflag=False) elif arguments["ip"] and arguments["inventory"]: vms, names = _get_vm_names() if names in [None, []]: if str(Default.vm) in ['None', None]: Console.error("The default vm is not set.", traceflag=False) return "" else: names = [Default.vm] header = arguments["--header"] or "[servers]" filename = arguments["--file"] or "inventory.txt" try: vm_ips = [] for vm in vms: if vm["name"] in names: print (vm["name"]) vm_ips.append(vm["floating_ip"]) result = header + "\n" result += '\n'.join(vm_ips) Console.ok("Creating inventory file: {}".format(filename)) Console.ok(result) with open(filename, 'w') as f: f.write(result) except Exception as e: Console.error("Problem getting ip addresses for instance", traceflag=True) elif arguments["ip"] and arguments["show"]: if arguments["NAMES"] is None: if str(Default.vm) in ['None', None]: Console.error("The default vm is not set.", traceflag=False) return "" else: names = [Default.vm] else: names = Parameter.expand(arguments["NAMES"]) group = arguments["--group"] output_format = arguments["--format"] or "table" refresh = arguments["--refresh"] try: ips = Ip.list(cloud=arg.cloud, output=output_format, names=names) print(ips) except Exception as e: Console.error("Problem getting ip addresses for instance", traceflag=False) elif arguments["ssh"]: def _print(msg): if arg.verbose: Console.msg(msg) chameleon = "chameleon" in ConfigDict(filename="cloudmesh.yaml")["cloudmesh"]["clouds"][arg.cloud][ "cm_host"] if chameleon: arg.username = "******" elif arg.cloud == "azure": arg.username = ConfigDict(filename="cloudmesh.yaml")["cloudmesh"]["clouds"]["azure"]["default"]["username"] else: if arg.username is None: Console.error("Could not guess the username of the vm", traceflag=False) return arg.username = arguments["--username"] or Image.guess_username(arg.image) arg.command = arguments["--command"] data = dotdict({ 'name': arguments["NAME"] or Default.vm, 'username': arg.username, 'cloud': arg.cloud, 'command': arg.command }) _print("login {cloud}:{username}@{name}".format(**data)) vm = Vm.get(data.name, category=data.cloud) Vm.set_login_user(name=data.name, cloud=data.cloud, username=data.username) data.floating_ip = vm.floating_ip data.key = arguments["--key"] or Default.key _print(Printer.attribute(data)) ''' if vm.username is None: user_from_db = Vm.get_login_user(vm.name, vm.cloud) user_suggest = user_from_db or Default.user username = input("Username (Default: {}):".format(user_suggest)) or user_suggest Vm.set_login_user(name=data.name, cloud=cloud, username=data.username) ''' ip = arguments["--ip"] commands = arguments["--command"] ip_addresses = [] cloud_provider = CloudProvider(cloud).provider ip_addr = cloud_provider.get_ips(vm.name) ipaddr_dict = Vm.construct_ip_dict(ip_addr, cloud) for entry in ipaddr_dict: ip_addresses.append(ipaddr_dict[entry]["addr"]) if len(ip_addresses) > 0: if ip is not None: if ip not in ip_addresses: Console.error("IP Address specified does not match with the host.", traceflag=False) return "" else: _print("Determining IP Address to use with a ping test.") # This part assumes that the ping is allowed to the machine. for ipadd in ip_addresses: _print("Checking {:}...".format(ipadd)) try: # Evading ping test, as ping is not enabled for VMs on Azure cloud # socket.gethostbyaddr(ipadd) # ip will be set if above command is successful. ip = ipadd except socket.herror: _print("Cannot reach {:}.".format(ipadd)) if ip is None: _print("Unable to connect to the machine") return "" else: _print("IP to be used is: {:}".format(ip)) # # TODO: is this correctly implemented # if not cloud == 'azure': SecGroup.enable_ssh(cloud=cloud) if arg.verbose: Console.info("Connecting to Instance at IP:" + format(ip)) # Constructing the ssh command to connect to the machine. sshcommand = "ssh" if arg.key is not None: sshcommand += " -i {:}".format(arg.key) sshcommand += " -o StrictHostKeyChecking=no" sshcommand += " {:}@{:}".format(data.username, ip) if commands is not None: sshcommand += " \"{:}\"".format(commands) # print(sshcommand) os.system(sshcommand) else: Console.error("No Public IPs found for the instance", traceflag=False) elif arguments["list"]: # groups = Group.list(output="dict") arg = dotdict(arguments) arg.names = arguments["NAMES"] arg.group = arguments["--group"] if arg.group is None: arg.group = [] else: arg.group = Parameter.expand(arguments["--group"]) arg.refresh = arguments["--refresh"] or Default.refresh if arg.NAMES is not None: arg.names = Parameter.expand(arguments["NAMES"]) else: arg.names = ["all"] _format = arguments["--format"] or "table" if arguments["--active"]: clouds = active_clouds else: if arguments["--cloud"]: clouds = Parameter.expand(arguments["--cloud"]) else: clouds = [Default.cloud] try: d = ConfigDict("cloudmesh.yaml") for cloud in clouds: if arg.refresh: _refresh(cloud) Console.ok("Listing VMs on Cloud: {:}".format(cloud)) vms = Vm.list(category=cloud, output="raw") # print ("XXX", type(vms), vms) if vms is None: break result = [] if "all" in arg.names: if result is None: result = [] else: result = vms elif arg.group is not None and len(arg.group) > 0: for vm in vms: if vm["group"] in arg.group: result.append(vm) elif arg.names is not None and len(arg.names) > 0: for vm in vms: if vm["name"] in arg.names: result.append(vm) if len(result) > 0: # print(result) (order, header) = CloudProvider(cloud).get_attributes("vm") print(Printer.write(result, order=order, output=_format) ) else: Console.error("No data found with requested parameters.", traceflag=False) except Exception as e: # Error.traceback(e) Console.error("Problem listing all instances", traceflag=False) elif arguments["rename"]: try: oldnames = Parameter.expand(arguments["OLDNAMES"]) newnames = Parameter.expand(arguments["NEWNAMES"]) force = arguments["--force"] if oldnames is None or newnames is None: Console.error("Wrong VMs specified for rename", traceflag=False) elif len(oldnames) != len(newnames): Console.error("The number of VMs to be renamed is wrong", traceflat=False) else: for i in range(0, len(oldnames)): oldname = oldnames[i] newname = newnames[i] if arguments["--dryrun"]: Console.ok("Rename {} to {}".format(oldname, newname)) else: Vm.rename(cloud=cloud, oldname=oldname, newname=newname, force=force ) msg = "info. OK." Console.ok(msg) except Exception as e: # Error.traceback(e) Console.error("Problem deleting instances", traceflag=False) return ""
def do_comet(self, args, arguments): """ :: Usage: comet init comet active [ENDPOINT] comet ll [CLUSTERID] [--format=FORMAT] [--endpoint=ENDPOINT] comet cluster [CLUSTERID] [--format=FORMAT] [--sort=SORTKEY] [--endpoint=ENDPOINT] comet computeset [COMPUTESETID] [--allocation=ALLOCATION] [--cluster=CLUSTERID] [--state=COMPUTESESTATE] [--endpoint=ENDPOINT] comet start CLUSTERID [--count=NUMNODES] [COMPUTENODEIDS] [--allocation=ALLOCATION] [--walltime=WALLTIME] [--endpoint=ENDPOINT] comet terminate COMPUTESETID [--endpoint=ENDPOINT] comet power (on|off|reboot|reset|shutdown) CLUSTERID [NODESPARAM] [--endpoint=ENDPOINT] comet console [--link] CLUSTERID [COMPUTENODEID] [--endpoint=ENDPOINT] comet node info CLUSTERID [COMPUTENODEID] [--format=FORMAT] [--endpoint=ENDPOINT] comet node rename CLUSTERID OLDNAMES NEWNAMES [--endpoint=ENDPOINT] comet iso list [--endpoint=ENDPOINT] comet iso upload [--isoname=ISONAME] PATHISOFILE [--endpoint=ENDPOINT] comet iso attach ISOIDNAME CLUSTERID [COMPUTENODEIDS] [--endpoint=ENDPOINT] comet iso detach CLUSTERID [COMPUTENODEIDS] [--endpoint=ENDPOINT] Options: --endpoint=ENDPOINT Specify the comet nucleus service endpoint to work with, e.g., dev or production --format=FORMAT Format is either table, json, yaml, csv, rest [default: table] --sort=SORTKEY Sorting key for the table view --count=NUMNODES Number of nodes to be powered on. When this option is used, the comet system will find a NUMNODES number of arbitrary nodes that are available to boot as a computeset --allocation=ALLOCATION Allocation to charge when power on node(s) --walltime=WALLTIME Walltime requested for the node(s). Walltime could be an integer value followed by a unit (m, h, d, w, for minute, hour, day, and week, respectively). E.g., 3h, 2d --isoname=ISONAME Name of the iso image after being stored remotely. If not specified, use the original filename --state=COMPUTESESTATE List only computeset with the specified state. The state could be submitted, running, completed --link Whether to open the console url or just show the link Arguments: ENDPOINT Service endpoint based on the yaml config file. By default it's either dev or production. CLUSTERID The assigned name of a cluster, e.g. vc1 COMPUTESETID An integer identifier assigned to a computeset COMPUTENODEID A compute node name, e.g., vm-vc1-0 If not provided, the requested action will be taken on the frontend node of the specified cluster COMPUTENODEIDS A set of compute node names in hostlist format, e.g., vm-vc1-[0-3] One single node is also acceptable: vm-vc1-0 If not provided, the requested action will be taken on the frontend node of the specified cluster NODESPARAM Specifying the node/nodes/computeset to act on. In case of integer, will be intepreted as a computesetid; in case of a hostlist format, e.g., vm-vc1-[0-3], a group of nodes; or a single host is also acceptable, e.g., vm-vc1-0 ISONAME Name of an iso image at remote server ISOIDNAME Index or name of an iso image at the remote server. The index is based on the list from 'comet iso list'. PATHISOFILE The full path to the iso image file to be uploaded OLDNAMES The list of current node names to be renamed, in hostlist format. A single host is also acceptable. NEWNAMES The list of new names to rename to, in hostlist format. A single host is also acceptable. """ # back up of all the proposed commands/options """ comet status comet tunnel start comet tunnel stop comet tunnel status comet logon comet logoff comet ll [CLUSTERID] [--format=FORMAT] comet docs comet info [--user=USER] [--project=PROJECT] [--format=FORMAT] comet cluster [CLUSTERID][--name=NAMES] [--user=USER] [--project=PROJECT] [--hosts=HOSTS] [--start=TIME_START] [--end=TIME_END] [--hosts=HOSTS] [--format=FORMAT] comet computeset [COMPUTESETID] comet start ID comet stop ID comet power on CLUSTERID [NODESPARAM] [--allocation=ALLOCATION] [--walltime=WALLTIME] comet power (off|reboot|reset|shutdown) CLUSTERID [NODESPARAM] comet console CLUSTERID [COMPUTENODEID] comet delete [all] [--user=USER] [--project=PROJECT] [--name=NAMES] [--hosts=HOSTS] [--start=TIME_START] [--end=TIME_END] [--host=HOST] comet delete --file=FILE comet update [--name=NAMES] [--hosts=HOSTS] [--start=TIME_START] [--end=TIME_END] comet add [--user=USER] [--project=PROJECT] [--host=HOST] [--description=DESCRIPTION] [--start=TIME_START] [--end=TIME_END] NAME comet add --file=FILENAME Options: --user=USER user name --name=NAMES Names of the vcluster --start=TIME_START Start time of the vcluster, in YYYY/MM/DD HH:MM:SS format. [default: 1901-01-01] --end=TIME_END End time of the vcluster, in YYYY/MM/DD HH:MM:SS format. In addition a duratio can be specified if the + sign is the first sig The duration will than be added to the start time. [default: 2100-12-31] --project=PROJECT project id --host=HOST host name --description=DESCRIPTION description summary of the vcluster --file=FILE Adding multiple vclusters from one file --format=FORMAT Format is either table, json, yaml, csv, rest [default: table] --allocation=ALLOCATION Allocation to charge when power on node(s) --walltime=WALLTIME Walltime requested for the node(s) Arguments: FILENAME the file to open in the cwd if . is specified. If file in in cwd you must specify it with ./FILENAME Opens the given URL in a browser window. """ """ if not arguments["tunnel"] and Comet.tunnelled and not Comet.is_tunnel(): Console.error("Please establish a tunnel first with:") print print (" comet tunnel start") print return "" try: if not arguments["tunnel"]: logon = Comet.logon() if logon is False: Console.error("Could not logon") return "" except: Console.error("Could not logon") # pprint (arguments) output_format = arguments["--format"] or "table" if arguments["status"]: Comet.state() elif arguments["tunnel"] and arguments["start"]: Comet.tunnel(True) elif arguments["tunnel"] and arguments["stop"]: Comet.tunnel(False) elif arguments["tunnel"] and arguments["status"]: Comet.state() elif arguments["logon"]: if self.context.comet_token is None: if Comet.logon(): Console.ok("logging on") self.context.comet_token = Comet.token else: Console.error("could not logon") else: Console.error("already logged on") elif arguments["logoff"]: if self.context.comet_token is None: Console.error("not logged in") else: if Comet.logoff(): Console.ok("Logging off") self.context.comet_token = None else: Console.error( "some issue while logging off. Maybe comet not reachable") elif arguments["docs"]: Comet.docs() elif arguments["info"]: Console.error("not yet implemented") elif arguments["add"]: print ("add the cluster") elif arguments["start"]: cluster_id = arguments["ID"] print("start", cluster_id) Cluster.start(cluster_id) elif arguments["stop"]: cluster_id = arguments["ID"] print("stop", cluster_id) Cluster.stop(cluster_id) elif arguments["ll"]: """ if arguments["init"]: print ("Initializing the comet configuration file...") config = ConfigDict("cloudmesh.yaml") # for unit testing only. cometConf = config["cloudmesh.comet"] endpoints = [] # print (cometConf.keys()) if "endpoints" in cometConf.keys(): endpoints = cometConf["endpoints"].keys() if len(endpoints) < 1: Console.error("No service endpoints available. " "Please check the config template", traceflag=False) return "" if "username" in cometConf.keys(): default_username = cometConf['username'] # print (default_username) if 'TBD' == default_username: set_default_user = \ input("Set a default username (RETURN to skip): ") if set_default_user: config.data["cloudmesh"]["comet"]["username"] = \ set_default_user config.save() Console.ok("Comet default username set!") if "active" in cometConf.keys(): active_endpoint = cometConf['active'] set_active_endpoint = \ input("Set the active service endpoint to use. " "The availalbe endpoints are - %s [%s]: " % ("/".join(endpoints), active_endpoint) ) if set_active_endpoint: if set_active_endpoint in endpoints: config.data["cloudmesh"]["comet"]["active"] = \ set_active_endpoint config.save() Console.ok("Comet active service endpoint set!") else: Console.error("The provided endpoint does not match " "any available service endpoints. Try %s" % "/".join(endpoints), traceflag=False) if cometConf['active'] in endpoints: endpoint_url = cometConf["endpoints"] \ [cometConf['active']]["nucleus_base_url"] api_version = cometConf["endpoints"] \ [cometConf['active']]["api_version"] set_endpoint_url = \ input("Set the base url for the nucleus %s service [%s]: " \ % (cometConf['active'], endpoint_url) ) if set_endpoint_url: if set_endpoint_url != endpoint_url: config.data["cloudmesh"]["comet"]["endpoints"] \ [cometConf['active']]["nucleus_base_url"] \ = set_endpoint_url config.save() Console.ok("Service base url set!") set_api_version = \ input("Set the api version for the nucleus %s service [%s]: " \ % (cometConf['active'], api_version) ) if set_api_version: if set_api_version != api_version: config.data["cloudmesh"]["comet"]["endpoints"] \ [cometConf['active']]["api_version"] \ = set_api_version config.save() Console.ok("Service api version set!") print("Authenticating to the nucleus %s " \ "service and obtaining the apikey..." \ % cometConf['active']) Comet.get_apikey(cometConf['active']) return '' # Comet.get_apikey() if arguments["active"]: config = ConfigDict("cloudmesh.yaml") cometConf = config["cloudmesh.comet"] endpoint = arguments["ENDPOINT"] or None # parameter specified, intended to change if endpoint: if "endpoints" in cometConf.keys(): endpoints = cometConf["endpoints"].keys() if endpoint in endpoints: config.data["cloudmesh"] \ ["comet"] \ ["active"] = endpoint config.save() Console.ok("Comet active service endpoint set" " to: %s" % endpoint) else: Console.error("The provided endpoint does not match " "any available service endpoints. Try %s." % "/".join(endpoints), traceflag = False) else: Console.error("No available endpoint to set. " "Check config file!", traceflag=False) else: if "active" in cometConf.keys(): active_endpoint = cometConf['active'] Console.ok("Current active service endpoint is: %s" % active_endpoint) else: Console.error("Cannot set active endpoint. " "Check config file!", traceflag = False) try: endpoint = None config = ConfigDict("cloudmesh.yaml") cometConf = config["cloudmesh.comet"] if arguments["--endpoint"]: endpoint = arguments["--endpoint"] if "endpoints" in cometConf.keys(): endpoints = cometConf["endpoints"].keys() if endpoint not in endpoints: Console.error("The provided endpoint does not match " "any available service endpoints. Try %s." % "/".join(endpoints), traceflag = False) return '' logon = Comet.logon(endpoint=endpoint) if logon is False: Console.error("Could not logon. Please try first:\n" "cm comet init", traceflag = False) return "" except: Console.error("Could not logon", traceflag = False) output_format = arguments["--format"] or "table" if arguments["ll"]: cluster_id = arguments["CLUSTERID"] or None print(Cluster.simple_list(cluster_id, format=output_format)) elif arguments["cluster"]: cluster_id = arguments["CLUSTERID"] sortkey = arguments["--sort"] print(Cluster.list(cluster_id, format=output_format, sort=sortkey)) elif arguments["computeset"]: computeset_id = arguments["COMPUTESETID"] or None cluster = arguments["--cluster"] or None state = arguments["--state"] or None allocation = arguments["--allocation"] or None cluster = arguments["--cluster"] or None print (Cluster.computeset(computeset_id, cluster, state, allocation)) elif arguments["start"]: clusterid = arguments["CLUSTERID"] numnodes = arguments["--count"] or None computenodeids = arguments["COMPUTENODEIDS"] or None # check allocation information for the cluster cluster = Cluster.list(clusterid, format='rest') try: allocations = cluster[0]['allocations'] except: # print (cluster) Console.error("No allocation available for the specified cluster."\ "Please check with the comet help team", traceflag=False) return "" # checking whether the computesetids is in valid hostlist format if computenodeids: try: hosts_param = hostlist.expand_hostlist(computenodeids) except hostlist.BadHostlist: Console.error("Invalid hosts list specified!", traceflag=False) return "" elif numnodes: try: param = int(numnodes) except ValueError: Console.error("Invalid count value specified!", traceflag=False) return "" if param <= 0: Console.error("count value has to be greather than zero", traceflag=False) return "" numnodes = param else: Console.error("You have to specify either the count of nodes, " \ "or the names of nodes in hostlist format", traceflag=False) return "" walltime = arguments["--walltime"] or None allocation = arguments["--allocation"] or None # validating walltime and allocation parameters walltime = Cluster.convert_to_mins(walltime) if not walltime: print("No valid walltime specified. " \ "Using system default (2 days)") if not allocation: if len(allocations) == 1: allocation = allocations[0] else: allocation = Cluster.display_get_allocation(allocations) # issuing call to start a computeset with specified parameters print(Cluster.computeset_start(clusterid, computenodeids, numnodes, allocation, walltime) ) elif arguments["terminate"]: computesetid = arguments["COMPUTESETID"] print(Cluster.computeset_terminate(computesetid)) elif arguments["power"]: clusterid = arguments["CLUSTERID"] or None fuzzyparam = arguments["NODESPARAM"] or None # parsing nodesparam for proper action if fuzzyparam: try: param = int(fuzzyparam) subject = 'COMPUTESET' except ValueError: param = fuzzyparam try: hosts_param = hostlist.expand_hostlist(fuzzyparam) subject = 'HOSTS' except hostlist.BadHostlist: Console.error("Invalid hosts list specified!", traceflag=False) return "" else: subject = 'FE' param = None if arguments["on"]: action = "on" elif arguments["off"]: action = "off" elif arguments["reboot"]: action = "reboot" elif arguments["reset"]: action = "reset" elif arguments["shutdown"]: action = "shutdown" else: action = None print (Cluster.power(clusterid, subject, param, action) ) elif arguments["console"]: clusterid = arguments["CLUSTERID"] linkonly = False if arguments["--link"]: linkonly = True nodeid = None if 'COMPUTENODEID' in arguments: nodeid = arguments["COMPUTENODEID"] Comet.console(clusterid, nodeid, linkonly) elif arguments["iso"]: if arguments["list"]: isos = (Comet.list_iso()) idx = 0 for iso in isos: if iso.startswith("public/"): iso = iso.split("/")[1] idx += 1 print ("{}: {}".format(idx, iso)) if arguments["upload"]: isofile = arguments["PATHISOFILE"] isofile = os.path.abspath(isofile) if os.path.isfile(isofile): if arguments["--isoname"]: filename = arguments["--isoname"] else: filename = os.path.basename(isofile) else: print ("File does not exist - {}" \ .format(arguments["PATHISOFILE"])) return "" print(Comet.upload_iso(filename, isofile)) elif arguments["attach"]: isoidname = arguments["ISOIDNAME"] clusterid = arguments["CLUSTERID"] computenodeids = arguments["COMPUTENODEIDS"] or None print(Cluster.attach_iso(isoidname, clusterid, computenodeids)) elif arguments["detach"]: clusterid = arguments["CLUSTERID"] computenodeids = arguments["COMPUTENODEIDS"] or None print(Cluster.detach_iso(clusterid, computenodeids)) elif arguments["node"]: if arguments["info"]: clusterid = arguments["CLUSTERID"] nodeid = arguments["COMPUTENODEID"] print (Cluster.node_info(clusterid, nodeid=nodeid, format=output_format)) elif arguments["rename"]: clusterid = arguments["CLUSTERID"] oldnames = Parameter.expand(arguments["OLDNAMES"]) newnames = Parameter.expand(arguments["NEWNAMES"]) if len(oldnames) != len(newnames): Console.error("Length of OLDNAMES and NEWNAMES have to be the same", traceflag=False) return "" else: for newname in newnames: if newname.strip() == "": Console.error("Newname cannot be empty string", traceflag=False) return "" cluster_data = Cluster.list(clusterid, format="rest") if len(cluster_data) > 0: computes = cluster_data[0]["computes"] nodenames = [x["name"] for x in computes] else: Console.error("Error obtaining the cluster information", traceflag=False) return "" # check if new names ar not already taken # to be implemented # print (oldnames) # print (newnames) # print (nodenames) oldset = set(oldnames) newset = set(newnames) currentset = set(nodenames) # at least one OLDNAME does not exist if not oldset <= currentset: Console.error("Not all OLDNAMES are valid", traceflag=False) return "" else: # those unchanged nodes keptset = currentset - oldset # duplication between name of unchanged nodes and # the requested NEWNAMES if keptset & newset != set(): Console.error("Not proceeding as otherwise introducing "\ "duplicated names", traceflag=False) else: for i in range(0,len(oldnames)): oldname = oldnames[i] newname = newnames[i] print ("%s -> %s" % (oldname, newname)) confirm = input("Confirm batch renaming (Y/y to confirm, "\ "any other key to abort):") if confirm.lower() == 'y': print ("Conducting batch renaming") for i in range(0,len(oldnames)): oldname = oldnames[i] newname = newnames[i] print (Cluster.rename_node(clusterid, oldname, newname)) else: print ("Action aborted!") return ""
def do_workflow(self, args, arguments): """ :: Usage: workflow refresh [--cloud=CLOUD] [-v] workflow list [ID] [NAME] [--cloud=CLOUD] [--format=FORMAT] [--refresh] [-v] workflow add NAME LOCATION workflow delete ID workflow status [NAMES] workflow show ID workflow save NAME WORKFLOWSTR workflow run NAME workflow service start workflow service stop This lists out the workflows present for a cloud Options: --format=FORMAT the output format [default: table] --cloud=CLOUD the cloud name --refresh refreshes the data before displaying it from the cloud Examples: cm workflow refresh cm workflow list cm workflow list --format=csv cm workflow show 58c9552c-8d93-42c0-9dea-5f48d90a3188 --refresh cm workflow run workflow1 """ arg = dotdict(arguments) if arg.NAMES is not None: arg.names = Parameter.expand(arg.NAMES) else: arg.names = None pprint (arg) cloud = arguments["--cloud"] or Default.cloud if cloud is None: Console.error("Default cloud doesn't exist") return if arguments["-v"]: print("Cloud: {}".format(cloud)) if arguments["refresh"] or Default.refresh: msg = "Refresh workflow for cloud {:}.".format(cloud) if Workflow.refresh(cloud): Console.ok("{:} ok".format(msg)) else: Console.error("{:} failed".format(msg)) return "" if arguments["list"]: id = arguments['ID'] live = arguments['--refresh'] output_format = arguments["--format"] result = None if id is None: result = Workflow.list(cloud, output_format) else: result = Workflow.details(cloud, id, live, output_format) if result is None: Console.error("No workflow(s) found. Failed.") else: print(result) return "" elif arguments["show"]: workflow_id = arguments["ID"] Console.ok("I executed show") # Console.msg(workflow_id) if workflow_id is None: Console.msg("Please enter a Workflow Id to execute workflow") else: result = Workflow.run(cloud,workflow_id) # print (result) if result is None: Console.msg("Use workflow list to view existing workflows or do workflow save to save a new one") else: # Console.msg(result) # Console.msg(result[0]) Console.msg(result[0]['workflow_str']) # entry_point(result[0]['workflow_str']) Console.msg("All Set to execute") elif arguments["save"]: workflow_name = arguments["NAME"] workflow_str = arguments["WORKFLOWSTR"] result = Workflow.save(cloud=cloud, name=workflow_name, str=workflow_str) if result is not None: Console.ok(result) else: Console.error("Failed to save workflow!") elif arguments["run"]: Console.msg("Execute Run") workflow_name = arguments["NAME"] # cm_id = arguments["ID"] result = Workflow.run(cloud,name=workflow_name) elif arguments["delete"]: workflow_id = arguments["ID"] result = Workflow.delete(cloud,workflow_id) if result is not None: Console.msg(result) else: Console.error("Failed to save workflow!") elif arguments["service"] and arguments["start"]: # Console.msg() # mkdir -pf ~/.cloudmesh/workflow pass
def do_workflow(self, args, arguments): """ :: Usage: workflow refresh [--cloud=CLOUD] [-v] workflow list [ID] [--cloud=CLOUD] [--format=FORMAT] [--refresh] [-v] workflow add NAME LOCATION workflow delete NAMES workflow status [NAMES] workflow show NAMES This lists out the workflows present for a cloud Options: --format=FORMAT the output format [default: table] --cloud=CLOUD the cloud name --refresh refreshes the data before displaying it from the cloud Examples: cm workflow refresh cm workflow list cm workflow list --format=csv cm workflow show 58c9552c-8d93-42c0-9dea-5f48d90a3188 --refresh """ arg = dotdict(arguments) if arg.NAMES is not None: arg.names = Parameter.expand(arg.NAMES) else: arg.names = None pprint (arg) cloud = arguments["--cloud"] or Default.cloud if cloud is None: Console.error("Default cloud doesn't exist") return if arguments["-v"]: print("Cloud: {}".format(cloud)) if arguments["refresh"] or Default.refresh: msg = "Refresh workflow for cloud {:}.".format(cloud) if Workflow.refresh(cloud): Console.ok("{:} ok".format(msg)) else: Console.error("{:} failed".format(msg)) return "" if arguments["list"]: id = arguments['ID'] live = arguments['--refresh'] output_format = arguments["--format"] counter = 0 result = None while counter < 2: if id is None: result = Workflow.list(cloud, output_format) else: result = Workflow.details(cloud, id, live, output_format) if counter == 0 and result is None: if not Workflow.refresh(cloud): msg = "Refresh workflow for cloud {:}.".format(cloud) Console.error("{:} failed.".format(msg)) counter += 1 if result is None: Console.error("No workflow(s) found. Failed.") else: print(result) return "" elif arguments["show"]: Console.ok("I executed show")
from pprint import pprint import warnings; with warnings.catch_warnings(): warnings.simplefilter("ignore"); import matplotlib.pyplot as plt if __name__ == '__main__': arguments = docopt(__doc__) print(arguments) kind = arguments['--kind'] hosts = Parameter.expand(arguments["--host"]) oses = Parameter.expand(arguments["--os"]) print(hosts) print(oses) # # CLEAN # for host in hosts: for osystem in oses: data = { "os": osystem, "host": host, "kind": kind }
def do_network(self, args, arguments): """ :: Usage: network get fixed [ip] [--cloud=CLOUD] FIXED_IP network get floating [ip] [--cloud=CLOUD] FLOATING_IP_ID network reserve fixed [ip] [--cloud=CLOUD] FIXED_IP network unreserve fixed [ip] [--cloud=CLOUD] FIXED_IP network associate floating [ip] [--cloud=CLOUD] [--group=GROUP] [--instance=INS_ID_OR_NAME] [FLOATING_IP] network disassociate floating [ip] [--cloud=CLOUD] [--group=GROUP] [--instance=INS_ID_OR_NAME] [FLOATING_IP] network create floating [ip] [--cloud=CLOUD] [--pool=FLOATING_IP_POOL] network delete floating [ip] [--cloud=CLOUD] [--unused] [FLOATING_IP] network list floating pool [--cloud=CLOUD] network list floating [ip] [--cloud=CLOUD] [--unused] [--instance=INS_ID_OR_NAME] [IP_OR_ID] network create cluster --group=demo_group network -h | --help Options: -h help message --unused unused floating ips --cloud=CLOUD Name of the IaaS cloud e.g. india_openstack_grizzly. --group=GROUP Name of the group in Cloudmesh --pool=FLOATING_IP_POOL Name of Floating IP Pool --instance=INS_ID_OR_NAME ID or Name of the vm instance Arguments: IP_OR_ID IP Address or ID of IP Address FIXED_IP Fixed IP Address, e.g. 10.1.5.2 FLOATING_IP Floating IP Address, e.g. 192.1.66.8 FLOATING_IP_ID ID associated with Floating IP, e.g. 185c5195-e824-4e7b-8581-703abec4bc01 Examples: network get fixed ip --cloud=india 10.1.2.5 network get fixed --cloud=india 10.1.2.5 network get floating ip --cloud=india 185c5195-e824-4e7b-8581-703abec4bc01 network get floating --cloud=india 185c5195-e824-4e7b-8581-703abec4bc01 network reserve fixed ip --cloud=india 10.1.2.5 network reserve fixed --cloud=india 10.1.2.5 network unreserve fixed ip --cloud=india 10.1.2.5 network unreserve fixed --cloud=india 10.1.2.5 network associate floating ip --cloud=india --instance=albert-001 192.1.66.8 network associate floating --cloud=india --instance=albert-001 network associate floating --cloud=india --group=albert_group network disassociate floating ip --cloud=india --instance=albert-001 192.1.66.8 network disassociate floating --cloud=india --instance=albert-001 192.1.66.8 network create floating ip --cloud=india --pool=albert-f01 network create floating --cloud=india --pool=albert-f01 network delete floating ip --cloud=india 192.1.66.8 192.1.66.9 network delete floating --cloud=india 192.1.66.8 192.1.66.9 network list floating ip --cloud=india network list floating --cloud=india network list floating --cloud=india --unused network list floating --cloud=india 192.1.66.8 network list floating --cloud=india --instance=323c5195-7yy34-4e7b-8581-703abec4b network list floating pool --cloud=india network create cluster --group=demo_group """ # pprint(arguments) # Get the cloud parameter OR read default cloudname = arguments["--cloud"] or Default.cloud if cloudname is None: Console.error("Default cloud has not been set!" "Please use the following to set it:\n" "cm default cloud=CLOUDNAME\n" "or provide it via the --cloud=CLOUDNAME argument.") return "" # Fixed IP info if arguments["get"] \ and arguments["fixed"]: fixed_ip = arguments["FIXED_IP"] result = Network.get_fixed_ip(cloudname, fixed_ip_addr=fixed_ip) Console.msg(result) # Floating IP info elif arguments["get"] \ and arguments["floating"]: floating_ip_id = arguments["FLOATING_IP_ID"] result = Network.get_floating_ip(cloudname, floating_ip_or_id=floating_ip_id) Console.msg(result) # Reserve a fixed ip elif arguments["reserve"] \ and arguments["fixed"]: fixed_ip = arguments["FIXED_IP"] result = Network.reserve_fixed_ip(cloudname=cloudname, fixed_ip_addr=fixed_ip) if result is not None: Console.ok("Reserve fixed ip address {} complete.".format(fixed_ip)) # Un-Reserve a fixed ip elif arguments["unreserve"] \ and arguments["fixed"]: fixed_ip = arguments["FIXED_IP"] result = Network.unreserve_fixed_ip(cloudname=cloudname, fixed_ip_addr=fixed_ip) if result is not None: Console.ok("Un-Reserve fixed ip address {} complete.".format(fixed_ip)) # Associate floating IP elif arguments["associate"] \ and arguments["floating"]: # Get all command-line arguments group_name = arguments["--group"] instance_id = arguments["--instance"] floating_ip = arguments["FLOATING_IP"] # group supplied if group_name is not None: """ Group name has been provided. Assign floating IPs to all vms in the group and return """ # Get the group information group = Group.get_info(name=group_name, category=cloudname, output="json") if group is not None: # Convert from str to json group = json.loads(group) # For each vm in the group # Create and assign a floating IP for item in group: instance_id = group[item]["value"] floating_ip = Network.find_assign_floating_ip(cloudname=cloudname, instance_id=instance_id) if floating_ip is not None: Console.ok("Created and assigned Floating IP {} to instance {}." .format(floating_ip, instance_id)) # Refresh VM in db self.refresh_vm(cloudname) else: Console.error("No group {} in the Cloudmesh database." .format(group_name)) return "" # floating-ip not supplied, instance-id supplied elif not floating_ip and instance_id is not None: """ Floating IP has not been provided, instance-id provided. Generate one from the pool, and assign to vm and return """ floating_ip = Network.find_assign_floating_ip(cloudname=cloudname, instance_id=instance_id) if floating_ip is not None: Console.ok("Associated floating IP {} to instance {}." .format(floating_ip, instance_id)) # instance-id & floating-ip supplied elif instance_id is not None: """ Floating IP & Instance ID have been provided Associate the IP to the instance and return """ Network.find_assign_floating_ip(cloudname=cloudname, instance_id=instance_id, floating_ip=floating_ip[0]) # Invalid parameters else: Console.error("Please provide at least one of [--group] OR [--instance] parameters.\n" "You can also provide [FLOATING_IP] AND [--instance] parameters.\n" "See 'cm network --help' for more info.") return "" # Refresh VM in db self.refresh_vm(cloudname) elif arguments["disassociate"] \ and arguments["floating"]: # Get all command-line arguments group_name = arguments["--group"] instance_id = arguments["--instance"] floating_ip = arguments["FLOATING_IP"] # group supplied if group_name is not None: """ Group name has been provided. Remove floating IPs of all vms in the group and return """ # Get the group information group = Group.get_info(name=group_name, category=cloudname, output="json") if group is not None: # Convert from str to json group = json.loads(group) # For each vm in the group # Create and assign a floating IP for item in group: instance_id = group[item]["value"] # Get the instance dict instance_dict = Network.get_instance_dict(cloudname=cloudname, instance_id=instance_id) # Instance not found if instance_dict is None: Console.error("Instance {} not found in the cloudmesh database!" .format(instance_id)) return "" # Get the instance name instance_name = instance_dict["name"] floating_ip = instance_dict["floating_ip"] # Floating ip argument invalid if floating_ip is None: Console.error("Instance{} does not have a floating_ip." .format(instance_name)) return "" result = Network.disassociate_floating_ip(cloudname=cloudname, instance_name=instance_name, floating_ip=floating_ip) if result is not None: Console.ok("Disassociated Floating IP {} from instance {}." .format(floating_ip, instance_name)) else: Console.error("No group {} in the Cloudmesh database." .format(group_name)) return "" # floating-ip not supplied, instance-id supplied elif len(floating_ip) == 0 and instance_id is not None: """ Floating IP has not been provided, instance-id provided. Remove floating ip allocated to vm and return """ instance_dict = Network.get_instance_dict(cloudname=cloudname, instance_id=instance_id) # Instance not found if instance_dict is None: Console.error("Instance {} not found in the cloudmesh database!" .format(instance_id)) return "" instance_name = instance_dict["name"] floating_ip = instance_dict["floating_ip"] # Floating ip argument invalid if floating_ip is None: Console.error("Instance{} does not have a floating_ip." .format(instance_name)) return "" result = Network.disassociate_floating_ip(cloudname=cloudname, instance_name=instance_name, floating_ip=floating_ip) if result is not None: Console.ok("Disassociated Floating IP {} from instance {}." .format(floating_ip, instance_name)) # instance-id & floating-ip supplied elif instance_id is not None: """ Floating IP & Instance ID have been provided Remove the IP from the instance and return """ instance_dict = Network.get_instance_dict(cloudname=cloudname, instance_id=instance_id) floating_ip = floating_ip[0] # Instance not found if instance_dict is None: Console.error("Instance {} not found in the cloudmesh database!" .format(instance_id)) return "" instance_name = instance_dict["name"] _floating_ip = instance_dict["floating_ip"] # Floating ip argument invalid if _floating_ip != floating_ip: Console.error("Invalid floating_ip {} for instance {}." .format(floating_ip, instance_name)) return "" result = Network.disassociate_floating_ip(cloudname=cloudname, instance_name=instance_name, floating_ip=floating_ip) if result is not None: Console.ok("Disassociated Floating IP {} from instance {}." .format(floating_ip, instance_name)) # Invalid parameters else: Console.error("Please provide at least one of [--group] OR [--instance] parameters.\n" "You can also provide [FLOATING_IP] AND [--instance] parameters.\n" "See 'cm network --help' for more info.") return "" # Refresh VM in db self.refresh_vm(cloudname) # Create new floating ip under floating pool elif arguments["create"] \ and arguments["floating"]: floating_pool = arguments["--pool"] result = Network.create_floating_ip(cloudname=cloudname, floating_pool=floating_pool) if result is not None: Console.ok("Created new floating IP {}".format(result)) else: Console.error("Failed to create floating IP! Please check arguments.") # Delete a floating ip address elif arguments["delete"] \ and arguments["floating"]: # delete all unused floating ips if arguments["--unused"]: unused_floating_ips = Network.get_unused_floating_ip_list(cloudname=cloudname) if unused_floating_ips: for floating_ip in unused_floating_ips: self._delete_floating_ip(cloudname=cloudname, floating_ip=floating_ip["id"]) else: Console.msg("No unused floating ips exist at this moment. Ok.") return "" # delete specified floating ips floating_ips = Parameter.expand(arguments["FLOATING_IP"]) for floating_ip in floating_ips: self._delete_floating_ip(cloudname=cloudname, floating_ip=floating_ip) # Floating IP Pool List elif arguments["list"] \ and arguments["floating"] \ and arguments["pool"]: result = Network.list_floating_ip_pool(cloudname) Console.msg(result) # Floating IP list [or info] elif arguments["list"] \ and arguments["floating"]: ip_or_id = arguments["IP_OR_ID"] instance_id = arguments["--instance"] # List unused floating addr if arguments["--unused"]: result = Network.list_unused_floating_ip(cloudname=cloudname) Console.msg(result) return "" # Refresh VM in db self.refresh_vm(cloudname) # If instance id is supplied if instance_id is not None: instance_dict = Network.get_instance_dict(cloudname=cloudname, instance_id=instance_id) # Instance not found if instance_dict is None: Console.error("Instance {} not found in the cloudmesh database!" .format(instance_id)) return "" # Read the floating_ip from the dict ip_or_id = instance_dict["floating_ip"] if ip_or_id is None: Console.error("Instance with ID {} does not have a floating IP address!" .format(instance_id)) return "" # If the floating ip or associated ID is supplied if ip_or_id is not None: result = Network.get_floating_ip(cloudname, floating_ip_or_id=ip_or_id) if result is not None: Console.msg(result) else: Console.error("Floating IP not found! Please check your arguments.") return "" # Retrieve the full list else: result = Network.list_floating_ip(cloudname) Console.msg(result) # Create a virtual cluster elif arguments["cluster"] and \ arguments["create"]: group_name = arguments["--group"] or \ Default.get(name="group", category=cloudname) # Get the group information group = Group.get_info(name=group_name, category=cloudname, output="json") if group is not None: # Convert from str to json group = json.loads(group) # var contains pub key of all vms public_keys = "" login_users = [] login_ips = [] # For each vm in the group # Create and assign a floating IP for item in group: instance_id = group[item]["value"] # Get the instance dict instance_dict = Network.get_instance_dict(cloudname=cloudname, instance_id=instance_id) # Instance not found if instance_dict is None: Console.error("Instance {} not found in the cloudmesh database!" .format(instance_id)) return "" # Get the instance name instance_name = instance_dict["name"] floating_ip = instance_dict["floating_ip"] # If vm does not have floating ip, then create if floating_ip is None: floating_ip = Network.create_assign_floating_ip(cloudname=cloudname, instance_name=instance_name) if floating_ip is not None: Console.ok("Created and assigned Floating IP {} to instance {}." .format(floating_ip, instance_name)) # Refresh VM in db self.refresh_vm(cloudname) # Get the login user for this machine user = input("Enter the login user for VM {} : ".format(instance_name)) passphrase = getpass.getpass("Enter the passphrase key on VM {} : ".format(instance_name)) # create list for second iteration login_users.append(user) login_ips.append(floating_ip) login_args = [ user + "@" + floating_ip, ] keygen_args = [ "ssh-keygen -t rsa -f ~/.ssh/id_rsa -N " + passphrase ] cat_pubkey_args = [ "cat ~/.ssh/id_rsa.pub" ] generate_keypair = login_args + keygen_args result = Shell.ssh(*generate_keypair) # print("***** Keygen *****") # print(result) cat_public_key = login_args + cat_pubkey_args result = Shell.ssh(*cat_public_key) public_keys += "\n" + result # print("***** id_rsa.pub *****") # print(result) # print("***** public keys *****") # print(public_keys) for user, ip in zip(login_users, login_ips): arguments = [ user + "@" + ip, "echo '" + public_keys + "' >> ~/.ssh/authorized_keys" ] # copy the public key contents to auth_keys result = Shell.ssh(*arguments) Console.ok("Virtual cluster creation successfull.") else: Console.error("No group {} in the Cloudmesh database." .format(group_name)) return "" return ""
def do_group(self, args, arguments): """ :: Usage: group list [GROUPNAME] [--format=FORMAT] group remove NAMES [--group=GROUPNAME] group add NAMES [--type=TYPE] [--group=GROUPNAME] group delete GROUPS group copy FROM TO group merge GROUPA GROUPB MERGEDGROUP manage the groups Arguments: NAMES names of object to be added GROUPS names of a groups FROM name of a group TO name of a group GROUPA name of a group GROUPB name of a group MERGEDGROUP name of a group Options: --format=FORMAT the output format --type=TYPE the resource type --name=NAME the name of the group --id=IDS the ID(s) to add to the group Description: Todo: design parameters that are useful and match description Todo: discuss and propose command cloudmesh can manage groups of resource related objects. As it would be cumbersome to for example delete many virtual machines or delete VMs that are in the same group, but are running in different clouds. Hence it is possible to add a virtual machine to a specific group. The group name to be added to can be set as a default. This way all subsequent commands use this default group. It can also be set via a command parameter. Another convenience function is that the group command can use the last used virtual machine. If a vm is started it will be automatically added to the default group if it is set. If finer grained deletion is needed, it can be achieved with the delete command that supports deletion by name It is also possible to remove a VM from the group using the remove command, by supplying the ID Note: The type is internally called for the group species, we may eliminate the species column and just use the type column for it, Example: default group mygroup group add --type=vm --id=albert-[001-003] adds the vms with the given name using the Parameter see base group add --type=vm adds the last vm to the group group delete --name=mygroup deletes all objects in the group """ # pprint(arguments) if arguments["list"]: output = arguments["--format"] or Default.get( name="format", category="general") or "table" name = arguments["GROUPNAME"] if name is None: result = Group.list(output=output) if result: print(result) else: print( "No groups found other than the default group but it has no members." ) else: result = Group.list(name=name, output=output) if result: print(result) else: msg_a = ("No group found with name `{name}` found in the " "category `{category}`.".format(**locals())) ''' # find alternate result = Group.get(name=name) msg_b = "" if result is not None and len(result) < 0: msg_b = " However we found such a variable in " \ "category `{category}`. Please consider " \ "using --category={category}".format(**locals()) Console.error(msg_a + msg_b) else: Console.error("No group with name {name} exists.".format(**locals())) ''' return "" elif arguments["add"]: # group add NAME... [--type=TYPE] [--category=CLOUD] [--group=GROUP] print("AAA", arguments["NAMES"]) members = Parameter.expand(arguments["NAMES"]) print("MMMM", members) data = dotdict({ "species": arguments["--type"] or "vm", "name": arguments["--group"] or Default.group }) print("DDD", data) for member in members: data.member = member pprint(data) Group.add(**data) return "" elif arguments["delete"]: groups = Parameter.expand(arguments["GROUPS"]) for group in groups: result = Group.delete(group) if result: Console.ok(result) else: Console.error("delete group {}. failed.".format(group)) return "" elif arguments["remove"]: members = Parameter.expand(arguments["NAMES"]) group = arguments["--group"] or Default.group for member in members: result = Group.remove(group, member) if result: Console.ok(result) else: Console.error("remove {} from group {}. failed.".format( group, member)) return "" elif arguments["copy"]: _from = arguments["FROM"] _to = arguments["TO"] Group.copy(_from, _to) return "" elif arguments["merge"]: _groupA = arguments["GROUPA"] _groupB = arguments["GROUPB"] _mergedGroup = arguments["MERGEDGROUP"] Group.merge(_groupA, _groupB, _mergedGroup) return ""
def do_workflow(self, args, arguments): """ :: Usage: workflow refresh [--cloud=CLOUD] [-v] workflow list [ID] [NAME] [--cloud=CLOUD] [--format=FORMAT] [--refresh] [-v] workflow add NAME LOCATION workflow delete ID workflow status [NAMES] workflow show ID workflow save NAME WORKFLOWSTR workflow run NAME workflow service start workflow service stop This lists out the workflows present for a cloud Options: --format=FORMAT the output format [default: table] --cloud=CLOUD the cloud name --refresh refreshes the data before displaying it from the cloud Examples: cm workflow refresh cm workflow list cm workflow list --format=csv cm workflow show 58c9552c-8d93-42c0-9dea-5f48d90a3188 --refresh cm workflow run workflow1 """ arg = dotdict(arguments) if arg.NAMES is not None: arg.names = Parameter.expand(arg.NAMES) else: arg.names = None pprint(arg) cloud = arguments["--cloud"] or Default.cloud if cloud is None: Console.error("Default cloud doesn't exist") return if arguments["-v"]: print("Cloud: {}".format(cloud)) if arguments["refresh"] or Default.refresh: msg = "Refresh workflow for cloud {:}.".format(cloud) if Workflow.refresh(cloud): Console.ok("{:} ok".format(msg)) else: Console.error("{:} failed".format(msg)) return "" if arguments["list"]: id = arguments['ID'] live = arguments['--refresh'] output_format = arguments["--format"] result = None if id is None: result = Workflow.list(cloud, output_format) else: result = Workflow.details(cloud, id, live, output_format) if result is None: Console.error("No workflow(s) found. Failed.") else: print(result) return "" elif arguments["show"]: workflow_id = arguments["ID"] Console.ok("I executed show") # Console.msg(workflow_id) if workflow_id is None: Console.msg("Please enter a Workflow Id to execute workflow") else: result = Workflow.run(cloud, workflow_id) # print (result) if result is None: Console.msg( "Use workflow list to view existing workflows or do workflow save to save a new one" ) else: # Console.msg(result) # Console.msg(result[0]) Console.msg(result[0]['workflow_str']) # entry_point(result[0]['workflow_str']) Console.msg("All Set to execute") elif arguments["save"]: workflow_name = arguments["NAME"] workflow_str = arguments["WORKFLOWSTR"] result = Workflow.save(cloud=cloud, name=workflow_name, str=workflow_str) if result is not None: Console.ok(result) else: Console.error("Failed to save workflow!") elif arguments["run"]: Console.msg("Execute Run") workflow_name = arguments["NAME"] # cm_id = arguments["ID"] result = Workflow.run(cloud, name=workflow_name) elif arguments["delete"]: workflow_id = arguments["ID"] result = Workflow.delete(cloud, workflow_id) if result is not None: Console.msg(result) else: Console.error("Failed to save workflow!") elif arguments["service"] and arguments["start"]: # Console.msg() # mkdir -pf ~/.cloudmesh/workflow pass
def do_network(self, args, arguments): """ :: Usage: network get fixed [ip] [--cloud=CLOUD] FIXED_IP network get floating [ip] [--cloud=CLOUD] FLOATING_IP_ID network reserve fixed [ip] [--cloud=CLOUD] FIXED_IP network unreserve fixed [ip] [--cloud=CLOUD] FIXED_IP network associate floating [ip] [--cloud=CLOUD] [--group=GROUP] [--instance=INS_ID_OR_NAME] [FLOATING_IP] network disassociate floating [ip] [--cloud=CLOUD] [--group=GROUP] [--instance=INS_ID_OR_NAME] [FLOATING_IP] network create floating [ip] [--cloud=CLOUD] [--pool=FLOATING_IP_POOL] network delete floating [ip] [--cloud=CLOUD] [--unused] [FLOATING_IP] network list floating pool [--cloud=CLOUD] network list floating [ip] [--cloud=CLOUD] [--unused] [--instance=INS_ID_OR_NAME] [IP_OR_ID] network create cluster --group=demo_group network -h | --help Options: -h help message --unused unused floating ips --cloud=CLOUD Name of the IaaS cloud e.g. india_openstack_grizzly. --group=GROUP Name of the group in Cloudmesh --pool=FLOATING_IP_POOL Name of Floating IP Pool --instance=INS_ID_OR_NAME ID or Name of the vm instance Arguments: IP_OR_ID IP Address or ID of IP Address FIXED_IP Fixed IP Address, e.g. 10.1.5.2 FLOATING_IP Floating IP Address, e.g. 192.1.66.8 FLOATING_IP_ID ID associated with Floating IP, e.g. 185c5195-e824-4e7b-8581-703abec4bc01 Examples: network get fixed ip --cloud=india 10.1.2.5 network get fixed --cloud=india 10.1.2.5 network get floating ip --cloud=india 185c5195-e824-4e7b-8581-703abec4bc01 network get floating --cloud=india 185c5195-e824-4e7b-8581-703abec4bc01 network reserve fixed ip --cloud=india 10.1.2.5 network reserve fixed --cloud=india 10.1.2.5 network unreserve fixed ip --cloud=india 10.1.2.5 network unreserve fixed --cloud=india 10.1.2.5 network associate floating ip --cloud=india --instance=albert-001 192.1.66.8 network associate floating --cloud=india --instance=albert-001 network associate floating --cloud=india --group=albert_group network disassociate floating ip --cloud=india --instance=albert-001 192.1.66.8 network disassociate floating --cloud=india --instance=albert-001 192.1.66.8 network create floating ip --cloud=india --pool=albert-f01 network create floating --cloud=india --pool=albert-f01 network delete floating ip --cloud=india 192.1.66.8 192.1.66.9 network delete floating --cloud=india 192.1.66.8 192.1.66.9 network list floating ip --cloud=india network list floating --cloud=india network list floating --cloud=india --unused network list floating --cloud=india 192.1.66.8 network list floating --cloud=india --instance=323c5195-7yy34-4e7b-8581-703abec4b network list floating pool --cloud=india network create cluster --group=demo_group """ # pprint(arguments) # Get the cloud parameter OR read default cloudname = arguments["--cloud"] or Default.cloud if cloudname is None: Console.error("Default cloud has not been set!" "Please use the following to set it:\n" "cm default cloud=CLOUDNAME\n" "or provide it via the --cloud=CLOUDNAME argument.") return "" # Fixed IP info if arguments["get"] \ and arguments["fixed"]: fixed_ip = arguments["FIXED_IP"] result = Network.get_fixed_ip(cloudname, fixed_ip_addr=fixed_ip) Console.msg(result) # Floating IP info elif arguments["get"] \ and arguments["floating"]: floating_ip_id = arguments["FLOATING_IP_ID"] result = Network.get_floating_ip(cloudname, floating_ip_or_id=floating_ip_id) Console.msg(result) # Reserve a fixed ip elif arguments["reserve"] \ and arguments["fixed"]: fixed_ip = arguments["FIXED_IP"] result = Network.reserve_fixed_ip(cloudname=cloudname, fixed_ip_addr=fixed_ip) if result is not None: Console.ok( "Reserve fixed ip address {} complete.".format(fixed_ip)) # Un-Reserve a fixed ip elif arguments["unreserve"] \ and arguments["fixed"]: fixed_ip = arguments["FIXED_IP"] result = Network.unreserve_fixed_ip(cloudname=cloudname, fixed_ip_addr=fixed_ip) if result is not None: Console.ok("Un-Reserve fixed ip address {} complete.".format( fixed_ip)) # Associate floating IP elif arguments["associate"] \ and arguments["floating"]: # Get all command-line arguments group_name = arguments["--group"] instance_id = arguments["--instance"] floating_ip = arguments["FLOATING_IP"] # group supplied if group_name is not None: """ Group name has been provided. Assign floating IPs to all vms in the group and return """ # Get the group information group = Group.get_info(name=group_name, category=cloudname, output="json") if group is not None: # Convert from str to json group = json.loads(group) # For each vm in the group # Create and assign a floating IP for item in group: instance_id = group[item]["value"] floating_ip = Network.find_assign_floating_ip( cloudname=cloudname, instance_id=instance_id) if floating_ip is not None: Console.ok( "Created and assigned Floating IP {} to instance {}." .format(floating_ip, instance_id)) # Refresh VM in db self.refresh_vm(cloudname) else: Console.error( "No group {} in the Cloudmesh database.".format( group_name)) return "" # floating-ip not supplied, instance-id supplied elif not floating_ip and instance_id is not None: """ Floating IP has not been provided, instance-id provided. Generate one from the pool, and assign to vm and return """ floating_ip = Network.find_assign_floating_ip( cloudname=cloudname, instance_id=instance_id) if floating_ip is not None: Console.ok( "Associated floating IP {} to instance {}.".format( floating_ip, instance_id)) # instance-id & floating-ip supplied elif instance_id is not None: """ Floating IP & Instance ID have been provided Associate the IP to the instance and return """ Network.find_assign_floating_ip(cloudname=cloudname, instance_id=instance_id, floating_ip=floating_ip[0]) # Invalid parameters else: Console.error( "Please provide at least one of [--group] OR [--instance] parameters.\n" "You can also provide [FLOATING_IP] AND [--instance] parameters.\n" "See 'cm network --help' for more info.") return "" # Refresh VM in db self.refresh_vm(cloudname) elif arguments["disassociate"] \ and arguments["floating"]: # Get all command-line arguments group_name = arguments["--group"] instance_id = arguments["--instance"] floating_ip = arguments["FLOATING_IP"] # group supplied if group_name is not None: """ Group name has been provided. Remove floating IPs of all vms in the group and return """ # Get the group information group = Group.get_info(name=group_name, category=cloudname, output="json") if group is not None: # Convert from str to json group = json.loads(group) # For each vm in the group # Create and assign a floating IP for item in group: instance_id = group[item]["value"] # Get the instance dict instance_dict = Network.get_instance_dict( cloudname=cloudname, instance_id=instance_id) # Instance not found if instance_dict is None: Console.error( "Instance {} not found in the cloudmesh database!" .format(instance_id)) return "" # Get the instance name instance_name = instance_dict["name"] floating_ip = instance_dict["floating_ip"] # Floating ip argument invalid if floating_ip is None: Console.error( "Instance{} does not have a floating_ip.". format(instance_name)) return "" result = Network.disassociate_floating_ip( cloudname=cloudname, instance_name=instance_name, floating_ip=floating_ip) if result is not None: Console.ok( "Disassociated Floating IP {} from instance {}." .format(floating_ip, instance_name)) else: Console.error( "No group {} in the Cloudmesh database.".format( group_name)) return "" # floating-ip not supplied, instance-id supplied elif len(floating_ip) == 0 and instance_id is not None: """ Floating IP has not been provided, instance-id provided. Remove floating ip allocated to vm and return """ instance_dict = Network.get_instance_dict( cloudname=cloudname, instance_id=instance_id) # Instance not found if instance_dict is None: Console.error( "Instance {} not found in the cloudmesh database!". format(instance_id)) return "" instance_name = instance_dict["name"] floating_ip = instance_dict["floating_ip"] # Floating ip argument invalid if floating_ip is None: Console.error( "Instance{} does not have a floating_ip.".format( instance_name)) return "" result = Network.disassociate_floating_ip( cloudname=cloudname, instance_name=instance_name, floating_ip=floating_ip) if result is not None: Console.ok( "Disassociated Floating IP {} from instance {}.". format(floating_ip, instance_name)) # instance-id & floating-ip supplied elif instance_id is not None: """ Floating IP & Instance ID have been provided Remove the IP from the instance and return """ instance_dict = Network.get_instance_dict( cloudname=cloudname, instance_id=instance_id) floating_ip = floating_ip[0] # Instance not found if instance_dict is None: Console.error( "Instance {} not found in the cloudmesh database!". format(instance_id)) return "" instance_name = instance_dict["name"] _floating_ip = instance_dict["floating_ip"] # Floating ip argument invalid if _floating_ip != floating_ip: Console.error( "Invalid floating_ip {} for instance {}.".format( floating_ip, instance_name)) return "" result = Network.disassociate_floating_ip( cloudname=cloudname, instance_name=instance_name, floating_ip=floating_ip) if result is not None: Console.ok( "Disassociated Floating IP {} from instance {}.". format(floating_ip, instance_name)) # Invalid parameters else: Console.error( "Please provide at least one of [--group] OR [--instance] parameters.\n" "You can also provide [FLOATING_IP] AND [--instance] parameters.\n" "See 'cm network --help' for more info.") return "" # Refresh VM in db self.refresh_vm(cloudname) # Create new floating ip under floating pool elif arguments["create"] \ and arguments["floating"]: floating_pool = arguments["--pool"] result = Network.create_floating_ip(cloudname=cloudname, floating_pool=floating_pool) if result is not None: Console.ok("Created new floating IP {}".format(result)) else: Console.error( "Failed to create floating IP! Please check arguments.") # Delete a floating ip address elif arguments["delete"] \ and arguments["floating"]: # delete all unused floating ips if arguments["--unused"]: unused_floating_ips = Network.get_unused_floating_ip_list( cloudname=cloudname) if unused_floating_ips: for floating_ip in unused_floating_ips: self._delete_floating_ip(cloudname=cloudname, floating_ip=floating_ip["id"]) else: Console.msg( "No unused floating ips exist at this moment. Ok.") return "" # delete specified floating ips floating_ips = Parameter.expand(arguments["FLOATING_IP"]) for floating_ip in floating_ips: self._delete_floating_ip(cloudname=cloudname, floating_ip=floating_ip) # Floating IP Pool List elif arguments["list"] \ and arguments["floating"] \ and arguments["pool"]: result = Network.list_floating_ip_pool(cloudname) Console.msg(result) # Floating IP list [or info] elif arguments["list"] \ and arguments["floating"]: ip_or_id = arguments["IP_OR_ID"] instance_id = arguments["--instance"] # List unused floating addr if arguments["--unused"]: result = Network.list_unused_floating_ip(cloudname=cloudname) Console.msg(result) return "" # Refresh VM in db self.refresh_vm(cloudname) # If instance id is supplied if instance_id is not None: instance_dict = Network.get_instance_dict( cloudname=cloudname, instance_id=instance_id) # Instance not found if instance_dict is None: Console.error( "Instance {} not found in the cloudmesh database!". format(instance_id)) return "" # Read the floating_ip from the dict ip_or_id = instance_dict["floating_ip"] if ip_or_id is None: Console.error( "Instance with ID {} does not have a floating IP address!" .format(instance_id)) return "" # If the floating ip or associated ID is supplied if ip_or_id is not None: result = Network.get_floating_ip(cloudname, floating_ip_or_id=ip_or_id) if result is not None: Console.msg(result) else: Console.error( "Floating IP not found! Please check your arguments.") return "" # Retrieve the full list else: result = Network.list_floating_ip(cloudname) Console.msg(result) # Create a virtual cluster elif arguments["cluster"] and \ arguments["create"]: group_name = arguments["--group"] or \ Default.get(name="group", category=cloudname) # Get the group information group = Group.get_info(name=group_name, category=cloudname, output="json") if group is not None: # Convert from str to json group = json.loads(group) # var contains pub key of all vms public_keys = "" login_users = [] login_ips = [] # For each vm in the group # Create and assign a floating IP for item in group: instance_id = group[item]["value"] # Get the instance dict instance_dict = Network.get_instance_dict( cloudname=cloudname, instance_id=instance_id) # Instance not found if instance_dict is None: Console.error( "Instance {} not found in the cloudmesh database!". format(instance_id)) return "" # Get the instance name instance_name = instance_dict["name"] floating_ip = instance_dict["floating_ip"] # If vm does not have floating ip, then create if floating_ip is None: floating_ip = Network.create_assign_floating_ip( cloudname=cloudname, instance_name=instance_name) if floating_ip is not None: Console.ok( "Created and assigned Floating IP {} to instance {}." .format(floating_ip, instance_name)) # Refresh VM in db self.refresh_vm(cloudname) # Get the login user for this machine user = input("Enter the login user for VM {} : ".format( instance_name)) passphrase = getpass.getpass( "Enter the passphrase key on VM {} : ".format( instance_name)) # create list for second iteration login_users.append(user) login_ips.append(floating_ip) login_args = [ user + "@" + floating_ip, ] keygen_args = [ "ssh-keygen -t rsa -f ~/.ssh/id_rsa -N " + passphrase ] cat_pubkey_args = ["cat ~/.ssh/id_rsa.pub"] generate_keypair = login_args + keygen_args result = Shell.ssh(*generate_keypair) # print("***** Keygen *****") # print(result) cat_public_key = login_args + cat_pubkey_args result = Shell.ssh(*cat_public_key) public_keys += "\n" + result # print("***** id_rsa.pub *****") # print(result) # print("***** public keys *****") # print(public_keys) for user, ip in zip(login_users, login_ips): arguments = [ user + "@" + ip, "echo '" + public_keys + "' >> ~/.ssh/authorized_keys" ] # copy the public key contents to auth_keys result = Shell.ssh(*arguments) Console.ok("Virtual cluster creation successfull.") else: Console.error("No group {} in the Cloudmesh database.".format( group_name)) return "" return ""
def do_inventory(self, args, arguments): """ :: Usage: inventory add NAMES [--label=LABEL] [--service=SERVICES] [--project=PROJECT] [--owners=OWNERS] [--comment=COMMENT] [--cluster=CLUSTER] [--ip=IP] inventory set NAMES for ATTRIBUTE to VALUES inventory delete NAMES inventory clone NAMES from SOURCE inventory list [NAMES] [--format=FORMAT] [--columns=COLUMNS] inventory info Arguments: NAMES Name of the resources (example i[10-20]) FORMAT The format of the output is either txt, yaml, dict, table [default: table]. OWNERS a comma separated list of owners for this resource LABEL a unique label for this resource SERVICE a string that identifies the service PROJECT a string that identifies the project SOURCE a single host name to clone from COMMENT a comment Options: -v verbose mode Description: add -- adds a resource to the resource inventory list -- lists the resources in the given format delete -- deletes objects from the table clone -- copies the content of an existing object and creates new once with it set -- sets for the specified objects the attribute to the given value or values. If multiple values are used the values are assigned to the and objects in order. See examples map -- allows to set attibutes on a set of objects with a set of values Examples: cm inventory add x[0-3] --service=openstack adds hosts x0, x1, x2, x3 and puts the string openstack into the service column cm lists lists the repository cm x[3-4] set temperature to 32 sets for the resources x3, x4 the value of the temperature to 32 cm x[7-8] set ip 128.0.0.[0-1] sets the value of x7 to 128.0.0.0 sets the value of x8 to 128.0.0.1 cm clone x[5-6] from x3 clones the values for x5, x6 from x3 """ print(arguments) filename = config_file("/cloudmesh_inventory.yaml") sorted_keys = True if arguments["info"]: i = Inventory() i.read() i.info() elif arguments["list"]: i = Inventory() i.read() if arguments["--columns"]: order = arguments["--columns"].split(",") else: order = i.order print(i.list(format="table", order=order)) elif arguments["NAMES"] is None: Console.error("Please specify a host name") # elif arguments["set"]: # hosts = Parameter.expand_hostlist(arguments["NAMES"]) # i = inventory() # i.read() # element = {} # for attribute in i.order: # try: # attribute = arguments["ATTRIBUTE"] # value = arguments["VALUE"] # if value is not None: # element[attribute] = value # except: # pass # element['host'] = arguments["NAMES"] # i.add(**element) # print (i.list(format="table")) elif arguments["set"]: hosts = Parameter.expand(arguments["NAMES"]) values = Parameter.expand(arguments["VALUES"]) if len(values) == 1: values *= len(hosts) print(hosts) print(values) attribute = arguments["ATTRIBUTE"] if len(hosts) != len(values): Console.error( "Number of names {:} != number of values{:}".format( len(hosts), len(values))) i = Inventory() i.read() for index in range(0, len(hosts)): host = hosts[index] value = values[index] host_object = {'host': host, attribute: value} i.add(**host_object) print(i.list(format="table")) elif arguments["add"]: hosts = Parameter.expand(arguments["NAMES"]) i = Inventory() i.read() element = {} for attribute in i.order: try: value = arguments["--" + attribute] if value is not None: element[attribute] = value except: pass element['host'] = arguments["NAMES"] i.add(**element) print(i.list(format="table")) elif arguments["delete"]: hosts = Parameter.expand(arguments["NAMES"]) i = Inventory() i.read() for host in hosts: del i.data[host] i.save() elif arguments["clone"]: hosts = Parameter.expand(arguments["NAMES"]) source = arguments["SOURCE"] i = Inventory() i.read() if source in i.data: for host in hosts: i.data[host] = dict(i.data[source]) i.save() else: Console.error("The source {:} does not exist".format(source)) return ""
def do_launcher(self, args, arguments): """ :: Usage: launcher repo add NAME URL launcher repo delete NAME launcher repo list launcher repo launcher list [NAMES] [--cloud=CLOUD] [--format=FORMAT] [--source=db|dir] launcher add NAME SOURCE launcher delete [NAMES] [--cloud=CLOUD] launcher clear launcher run [NAME] launcher resume [NAME] launcher suspend [NAME] launcher refresh launcher log [NAME] launcher status [NAME] Arguments: KEY the name of the launcher Options: --cloud=CLOUD the name of the cloud --format=FORMAT the output format [launcher: table] --all lists all the launcher values Description: Launcher is a command line tool to test the portal launch functionalities through command line. The current launcher values can by listed with --all option:( if you have a launcher cloud specified. You can also add a cloud parameter to apply the command to a specific cloud) launcher list A launcher can be deleted with launcher delete KEY Examples: launcher list --all launcher list --cloud=general launcher delete <KEY> """ print ("AAA") arg = dotdict(arguments) if arg.NAMES is not None: arg.names = Parameter.expand(arg.NAMES) else: arg.names = None if arg.name == ["all"]: arg.names = None arg.cloud = arguments["--cloud"] or Default.cloud arg.output = arguments['--format'] or 'table' arg.source = arguments['--source'] or 'db' print ("BBB") pprint(arg) # arg.cloud = arguments["--cloud"] or Default.cloud # c = arg.list # print ("Hallo {cloud} is list={list}".format(**arg)) # launcher = Launcher(kind=None) if arg.cloud is None: Console.error("Default arg.cloud not set") return result = "" if arguments["repo"] and arguments["list"]: print("repo list") launchers = ConfigDict(filename="cm_launcher.yaml")["cloudmesh"]["repo"] print("repo add") d = {} for name in launchers: location = launchers[name]["location"] d[name] = {"name": name, "location": location} print (Printer.dict_table(d)) return "" elif arguments["repo"] and arguments["add"]: launchers = ConfigDict(filename="cm_launcher.yaml") print("repo add") print(launchers) return "" elif arguments["repo"] and arguments["delete"]: print("repo delete") return "" elif arguments["repo"] and not arguments["list"]: print(arg.names) result = Launcher.list(name=arg.names, output=arg.output) elif arguments["add"]: result = Launcher.add(name=arg.NAME, source=arg.SOURCE) elif arguments["delete"]: # if arg.name is not None: # result = Launcher.delete(name=arg.name, category=arg.cloud) # else: # result = Launcher.delete(name=None) for name in arg.names: result = Launcher.delete(name=name, category=arg.cloud) elif arguments["run"]: result = Launcher.run() elif arguments["resume"]: result = Launcher.resume(name=arg.name) elif arguments["suspend"]: result = Launcher.suspend(name=arg.name) elif arguments["details"]: result = Launcher.details(name=arg.name) elif arguments["clear"]: result = Launcher.clear() elif arguments["refresh"]: result = Launcher.refresh(name=arg.name) print(result)
def do_comet(self, args, arguments): """ :: Usage: comet init comet active [ENDPOINT] comet ll [CLUSTERID] [--format=FORMAT] [--endpoint=ENDPOINT] comet cluster [--concise|--status] [CLUSTERID] [--format=FORMAT] [--sort=SORTKEY] [--endpoint=ENDPOINT] comet computeset [COMPUTESETID] [--allocation=ALLOCATION] [--cluster=CLUSTERID] [--state=COMPUTESESTATE] [--endpoint=ENDPOINT] comet start CLUSTERID [--count=NUMNODES] [COMPUTENODEIDS] [--allocation=ALLOCATION] [--reservation=RESERVATION] [--walltime=WALLTIME] [--endpoint=ENDPOINT] comet terminate COMPUTESETID [--endpoint=ENDPOINT] comet power (on|off|reboot|reset|shutdown) CLUSTERID [NODESPARAM] [--endpoint=ENDPOINT] comet console [--link] CLUSTERID [COMPUTENODEID] [--endpoint=ENDPOINT] comet node info CLUSTERID [COMPUTENODEID] [--format=FORMAT] [--endpoint=ENDPOINT] comet node rename CLUSTERID OLDNAMES NEWNAMES [--endpoint=ENDPOINT] comet iso list [--endpoint=ENDPOINT] comet iso upload [--isoname=ISONAME] PATHISOFILE [--endpoint=ENDPOINT] comet iso attach ISOIDNAME CLUSTERID [COMPUTENODEIDS] [--endpoint=ENDPOINT] comet iso detach CLUSTERID [COMPUTENODEIDS] [--endpoint=ENDPOINT] comet reservation (list|create|update|delete) Options: --endpoint=ENDPOINT Specify the comet nucleus service endpoint to work with, e.g., dev or production --format=FORMAT Format is either table, json, yaml, csv, rest [default: table] --sort=SORTKEY Sorting key for the table view --count=NUMNODES Number of nodes to be powered on. When this option is used, the comet system will find a NUMNODES number of arbitrary nodes that are available to boot as a computeset --allocation=ALLOCATION Allocation to charge when power on node(s) --reservation=RESERVATION Submit the request to an existing reservation --walltime=WALLTIME Walltime requested for the node(s). Walltime could be an integer value followed by a unit (m, h, d, w, for minute, hour, day, and week, respectively). E.g., 3h, 2d --isoname=ISONAME Name of the iso image after being stored remotely. If not specified, use the original filename --state=COMPUTESESTATE List only computeset with the specified state. The state could be submitted, running, completed --link Whether to open the console url or just show the link --concise Concise table view for cluster info --status Cluster table view displays only those columns showing state of nodes Arguments: ENDPOINT Service endpoint based on the yaml config file. By default it's either dev or production. CLUSTERID The assigned name of a cluster, e.g. vc1 COMPUTESETID An integer identifier assigned to a computeset COMPUTENODEID A compute node name, e.g., vm-vc1-0 If not provided, the requested action will be taken on the frontend node of the specified cluster COMPUTENODEIDS A set of compute node names in hostlist format, e.g., vm-vc1-[0-3] One single node is also acceptable: vm-vc1-0 If not provided, the requested action will be taken on the frontend node of the specified cluster NODESPARAM Specifying the node/nodes/computeset to act on. In case of integer, will be intepreted as a computesetid; in case of a hostlist format, e.g., vm-vc1-[0-3], a group of nodes; or a single host is also acceptable, e.g., vm-vc1-0 ISONAME Name of an iso image at remote server ISOIDNAME Index or name of an iso image at the remote server. The index is based on the list from 'comet iso list'. PATHISOFILE The full path to the iso image file to be uploaded OLDNAMES The list of current node names to be renamed, in hostlist format. A single host is also acceptable. NEWNAMES The list of new names to rename to, in hostlist format. A single host is also acceptable. """ # back up of all the proposed commands/options """ comet status comet tunnel start comet tunnel stop comet tunnel status comet logon comet logoff comet ll [CLUSTERID] [--format=FORMAT] comet docs comet info [--user=USER] [--project=PROJECT] [--format=FORMAT] comet cluster [CLUSTERID][--name=NAMES] [--user=USER] [--project=PROJECT] [--hosts=HOSTS] [--start=TIME_START] [--end=TIME_END] [--hosts=HOSTS] [--format=FORMAT] comet computeset [COMPUTESETID] comet start ID comet stop ID comet power on CLUSTERID [NODESPARAM] [--allocation=ALLOCATION] [--walltime=WALLTIME] comet power (off|reboot|reset|shutdown) CLUSTERID [NODESPARAM] comet console CLUSTERID [COMPUTENODEID] comet delete [all] [--user=USER] [--project=PROJECT] [--name=NAMES] [--hosts=HOSTS] [--start=TIME_START] [--end=TIME_END] [--host=HOST] comet delete --file=FILE comet update [--name=NAMES] [--hosts=HOSTS] [--start=TIME_START] [--end=TIME_END] comet add [--user=USER] [--project=PROJECT] [--host=HOST] [--description=DESCRIPTION] [--start=TIME_START] [--end=TIME_END] NAME comet add --file=FILENAME Options: --user=USER user name --name=NAMES Names of the vcluster --start=TIME_START Start time of the vcluster, in YYYY/MM/DD HH:MM:SS format. [default: 1901-01-01] --end=TIME_END End time of the vcluster, in YYYY/MM/DD HH:MM:SS format. In addition a duratio can be specified if the + sign is the first sig The duration will than be added to the start time. [default: 2100-12-31] --project=PROJECT project id --host=HOST host name --description=DESCRIPTION description summary of the vcluster --file=FILE Adding multiple vclusters from one file --format=FORMAT Format is either table, json, yaml, csv, rest [default: table] --allocation=ALLOCATION Allocation to charge when power on node(s) --walltime=WALLTIME Walltime requested for the node(s) Arguments: FILENAME the file to open in the cwd if . is specified. If file in in cwd you must specify it with ./FILENAME Opens the given URL in a browser window. """ """ if not arguments["tunnel"] and Comet.tunnelled and not Comet.is_tunnel(): Console.error("Please establish a tunnel first with:") print print (" comet tunnel start") print return "" try: if not arguments["tunnel"]: logon = Comet.logon() if logon is False: Console.error("Could not logon") return "" except: Console.error("Could not logon") # pprint (arguments) output_format = arguments["--format"] or "table" if arguments["status"]: Comet.state() elif arguments["tunnel"] and arguments["start"]: Comet.tunnel(True) elif arguments["tunnel"] and arguments["stop"]: Comet.tunnel(False) elif arguments["tunnel"] and arguments["status"]: Comet.state() elif arguments["logon"]: if self.context.comet_token is None: if Comet.logon(): Console.ok("logging on") self.context.comet_token = Comet.token else: Console.error("could not logon") else: Console.error("already logged on") elif arguments["logoff"]: if self.context.comet_token is None: Console.error("not logged in") else: if Comet.logoff(): Console.ok("Logging off") self.context.comet_token = None else: Console.error( "some issue while logging off. Maybe comet not reachable") elif arguments["docs"]: Comet.docs() elif arguments["info"]: Console.error("not yet implemented") elif arguments["add"]: print ("add the cluster") elif arguments["start"]: cluster_id = arguments["ID"] print("start", cluster_id) Cluster.start(cluster_id) elif arguments["stop"]: cluster_id = arguments["ID"] print("stop", cluster_id) Cluster.stop(cluster_id) elif arguments["ll"]: """ if arguments["init"]: print ("Initializing the comet configuration file...") config = ConfigDict("cloudmesh.yaml") # for unit testing only. cometConf = config["cloudmesh.comet"] endpoints = [] # print (cometConf.keys()) if "endpoints" in cometConf.keys(): endpoints = cometConf["endpoints"].keys() if len(endpoints) < 1: Console.error("No service endpoints available. " "Please check the config template", traceflag=False) return "" if "username" in cometConf.keys(): default_username = cometConf['username'] # print (default_username) if 'TBD' == default_username: set_default_user = \ input("Set a default username (RETURN to skip): ") if set_default_user: config.data["cloudmesh"]["comet"]["username"] = \ set_default_user config.save() Console.ok("Comet default username set!") if "active" in cometConf.keys(): active_endpoint = cometConf['active'] set_active_endpoint = \ input("Set the active service endpoint to use. " "The availalbe endpoints are - %s [%s]: " % ("/".join(endpoints), active_endpoint) ) if set_active_endpoint: if set_active_endpoint in endpoints: config.data["cloudmesh"]["comet"]["active"] = \ set_active_endpoint config.save() Console.ok("Comet active service endpoint set!") else: Console.error("The provided endpoint does not match " "any available service endpoints. Try %s" % "/".join(endpoints), traceflag=False) if cometConf['active'] in endpoints: endpoint_url = cometConf["endpoints"] \ [cometConf['active']]["nucleus_base_url"] api_version = cometConf["endpoints"] \ [cometConf['active']]["api_version"] set_endpoint_url = \ input("Set the base url for the nucleus %s service [%s]: " \ % (cometConf['active'], endpoint_url) ) if set_endpoint_url: if set_endpoint_url != endpoint_url: config.data["cloudmesh"]["comet"]["endpoints"] \ [cometConf['active']]["nucleus_base_url"] \ = set_endpoint_url config.save() Console.ok("Service base url set!") set_api_version = \ input("Set the api version for the nucleus %s service [%s]: " \ % (cometConf['active'], api_version) ) if set_api_version: if set_api_version != api_version: config.data["cloudmesh"]["comet"]["endpoints"] \ [cometConf['active']]["api_version"] \ = set_api_version config.save() Console.ok("Service api version set!") print("Authenticating to the nucleus %s " \ "service and obtaining the apikey..." \ % cometConf['active']) Comet.get_apikey(cometConf['active']) return '' # Comet.get_apikey() if arguments["active"]: config = ConfigDict("cloudmesh.yaml") cometConf = config["cloudmesh.comet"] endpoint = arguments["ENDPOINT"] or None # parameter specified, intended to change if endpoint: if "endpoints" in cometConf.keys(): endpoints = cometConf["endpoints"].keys() if endpoint in endpoints: config.data["cloudmesh"] \ ["comet"] \ ["active"] = endpoint config.save() Console.ok("Comet active service endpoint set" " to: %s" % endpoint) else: Console.error("The provided endpoint does not match " "any available service endpoints. Try %s." % "/".join(endpoints), traceflag = False) else: Console.error("No available endpoint to set. " "Check config file!", traceflag=False) else: if "active" in cometConf.keys(): active_endpoint = cometConf['active'] Console.ok("Current active service endpoint is: %s" % active_endpoint) else: Console.error("Cannot set active endpoint. " "Check config file!", traceflag = False) try: endpoint = None config = ConfigDict("cloudmesh.yaml") cometConf = config["cloudmesh.comet"] if arguments["--endpoint"]: endpoint = arguments["--endpoint"] if "endpoints" in cometConf.keys(): endpoints = cometConf["endpoints"].keys() if endpoint not in endpoints: Console.error("The provided endpoint does not match " "any available service endpoints. Try %s." % "/".join(endpoints), traceflag = False) return '' logon = Comet.logon(endpoint=endpoint) if logon is False: Console.error("Could not logon. Please try first:\n" "cm comet init", traceflag = False) return "" except: Console.error("Could not logon", traceflag = False) output_format = arguments["--format"] or "table" if arguments["ll"]: cluster_id = arguments["CLUSTERID"] or None print(Cluster.simple_list(cluster_id, format=output_format)) elif arguments["cluster"]: view = "FULL" if arguments["--concise"]: view = "CONCISE" if arguments["--status"]: view = "STATE" cluster_id = arguments["CLUSTERID"] sortkey = arguments["--sort"] print(Cluster.list(cluster_id, format=output_format, sort=sortkey, view=view)) elif arguments["computeset"]: computeset_id = arguments["COMPUTESETID"] or None cluster = arguments["--cluster"] or None state = arguments["--state"] or None allocation = arguments["--allocation"] or None cluster = arguments["--cluster"] or None print (Cluster.computeset(computeset_id, cluster, state, allocation)) elif arguments["start"]: clusterid = arguments["CLUSTERID"] numnodes = arguments["--count"] or None computenodeids = arguments["COMPUTENODEIDS"] or None # check allocation information for the cluster cluster = Cluster.list(clusterid, format='rest') try: allocations = cluster[0]['allocations'] except: # print (cluster) Console.error("No allocation available for the specified cluster."\ "Please check with the comet help team", traceflag=False) return "" # checking whether the computesetids is in valid hostlist format if computenodeids: try: hosts_param = hostlist.expand_hostlist(computenodeids) except hostlist.BadHostlist: Console.error("Invalid hosts list specified!", traceflag=False) return "" elif numnodes: try: param = int(numnodes) except ValueError: Console.error("Invalid count value specified!", traceflag=False) return "" if param <= 0: Console.error("count value has to be greather than zero", traceflag=False) return "" numnodes = param else: Console.error("You have to specify either the count of nodes, " \ "or the names of nodes in hostlist format", traceflag=False) return "" walltime = arguments["--walltime"] or None allocation = arguments["--allocation"] or None reservation = arguments["--reservation"] or None # validating walltime and allocation parameters walltime = Cluster.convert_to_mins(walltime) if not walltime: print("No valid walltime specified. " \ "Using system default (2 days)") if not allocation: if len(allocations) == 1: allocation = allocations[0] else: allocation = Cluster.display_get_allocation(allocations) # issuing call to start a computeset with specified parameters print(Cluster.computeset_start(clusterid, computenodeids, numnodes, allocation, reservation, walltime) ) elif arguments["terminate"]: computesetid = arguments["COMPUTESETID"] print(Cluster.computeset_terminate(computesetid)) elif arguments["power"]: clusterid = arguments["CLUSTERID"] or None fuzzyparam = arguments["NODESPARAM"] or None # parsing nodesparam for proper action if fuzzyparam: try: param = int(fuzzyparam) subject = 'COMPUTESET' except ValueError: param = fuzzyparam try: hosts_param = hostlist.expand_hostlist(fuzzyparam) subject = 'HOSTS' except hostlist.BadHostlist: Console.error("Invalid hosts list specified!", traceflag=False) return "" else: subject = 'FE' param = None if arguments["on"]: action = "on" elif arguments["off"]: action = "off" elif arguments["reboot"]: action = "reboot" elif arguments["reset"]: action = "reset" elif arguments["shutdown"]: action = "shutdown" else: action = None print (Cluster.power(clusterid, subject, param, action) ) elif arguments["console"]: clusterid = arguments["CLUSTERID"] linkonly = False if arguments["--link"]: linkonly = True nodeid = None if 'COMPUTENODEID' in arguments: nodeid = arguments["COMPUTENODEID"] Comet.console(clusterid, nodeid, linkonly) elif arguments["iso"]: if arguments["list"]: isos = (Comet.list_iso()) idx = 0 for iso in isos: if iso.startswith("public/"): iso = iso.split("/")[1] idx += 1 print ("{}: {}".format(idx, iso)) if arguments["upload"]: isofile = arguments["PATHISOFILE"] isofile = os.path.abspath(isofile) if os.path.isfile(isofile): if arguments["--isoname"]: filename = arguments["--isoname"] else: filename = os.path.basename(isofile) else: print ("File does not exist - {}" \ .format(arguments["PATHISOFILE"])) return "" print(Comet.upload_iso(filename, isofile)) elif arguments["attach"]: isoidname = arguments["ISOIDNAME"] clusterid = arguments["CLUSTERID"] computenodeids = arguments["COMPUTENODEIDS"] or None print(Cluster.attach_iso(isoidname, clusterid, computenodeids)) elif arguments["detach"]: clusterid = arguments["CLUSTERID"] computenodeids = arguments["COMPUTENODEIDS"] or None print(Cluster.detach_iso(clusterid, computenodeids)) elif arguments["node"]: if arguments["info"]: clusterid = arguments["CLUSTERID"] nodeid = arguments["COMPUTENODEID"] print (Cluster.node_info(clusterid, nodeid=nodeid, format=output_format)) elif arguments["rename"]: clusterid = arguments["CLUSTERID"] oldnames = Parameter.expand(arguments["OLDNAMES"]) newnames = Parameter.expand(arguments["NEWNAMES"]) if len(oldnames) != len(newnames): Console.error("Length of OLDNAMES and NEWNAMES have to be the same", traceflag=False) return "" else: for newname in newnames: if newname.strip() == "": Console.error("Newname cannot be empty string", traceflag=False) return "" cluster_data = Cluster.list(clusterid, format="rest") if len(cluster_data) > 0: computes = cluster_data[0]["computes"] nodenames = [x["name"] for x in computes] else: Console.error("Error obtaining the cluster information", traceflag=False) return "" # check if new names ar not already taken # to be implemented # print (oldnames) # print (newnames) # print (nodenames) oldset = set(oldnames) newset = set(newnames) currentset = set(nodenames) # at least one OLDNAME does not exist if not oldset <= currentset: Console.error("Not all OLDNAMES are valid", traceflag=False) return "" else: # those unchanged nodes keptset = currentset - oldset # duplication between name of unchanged nodes and # the requested NEWNAMES if keptset & newset != set(): Console.error("Not proceeding as otherwise introducing "\ "duplicated names", traceflag=False) else: for i in range(0,len(oldnames)): oldname = oldnames[i] newname = newnames[i] print ("%s -> %s" % (oldname, newname)) confirm = input("Confirm batch renaming (Y/y to confirm, "\ "any other key to abort):") if confirm.lower() == 'y': print ("Conducting batch renaming") for i in range(0,len(oldnames)): oldname = oldnames[i] newname = newnames[i] print (Cluster.rename_node(clusterid, oldname, newname)) else: print ("Action aborted!") elif arguments["reservation"]: if arguments["create"] or \ arguments["update"] or \ arguments["delete"]: Console.info("Operation not supported. Please contact XSEDE helpdesk for help!") if arguments["list"]: if "hpcinfo" in cometConf: hpcinfourl = cometConf["hpcinfo"]["endpoint"] else: Console.error("Admin feature not configured for this client", traceflag = False) return "" ret = requests.get("%s/reservations/%s" % (hpcinfourl, cometConf['active']) ) jobs = ret.json() result = Printer.write(jobs) print (result) return ""
def do_vc(self, args, arguments): """ :: Usage: vc key add KEYFILE NAMES [--username=USERNAME] [--proxy=PROXY] vc key distribute NAMES [--username=USERNAME] [--proxy=PROXY] vc key list NAMES [--usort] [--username=USERNAME] [--proxy=PROXY] [--format=FORMAT] vc key proxy NAMES [--username=USERNAME] [--proxy=PROXY] Options: --format=FORMAT the output format [default: table] Description: see examples Examples: cm vc key add keys.txt gregor-[001-010] adds the keys in the file keys.txt to the authorized_keys file in the user that is registered for the vm cm vc key add keys.txt gregor-[001-010] --username=ubuntu adds the keys in the file keys.txt to the authorized_keys file in the user ubuntu for each of the vms vc key distribute gregor-[001-010] gathers the keys from the host gathers it into a single file and adds them to the authorized keys file. Duplicated keys will be ignored. vc key list gregor-[001-010] [--usort] creates a table with all keys in authorized_keys from all of the remote machines. If the parameter usort is specified it only lists the key once, but lists in the host column the list of all host on which the key is stored Proxy server vc key proxy NAMES sometimes you may not have enough floating IPs so it is possible to dedicate one machine as a proxy server that has such a floating ip. The way this is done is that you need to set up ssh tunnels via the proxy server in your .ssh/config file. The command will print a template that you could include in your .ssh/config file to gain easily access to your other machines without floating ip. For example it will generate the following for a given PROXY host, USERNAME, and vm1 is the name of the first vm in NAMES Host vm1 User USERNAME Hostname PROXY ProxyCommand ssh 10.1.1.2 nc %h %p ForwardX11 yes Note: this is just a draft and will be improved upon discussion with the team """ arg = dotdict(arguments) arg.usort = arguments["--usort"] arg.format = arguments["--format"] arg.username = arguments["--username"] arg.proxy = arguments["--proxy"] if arg.NAMES is not None: arg.names = Parameter.expand(arg.NAMES) else: arg.names = None pprint (arg) if arg.add: print("vc key add KEYFILE NAMES --username=USERNAME") print(arg.username) print(arg.names) print(arg.KEYFILE) print(arg.proxy) Console.TODO("not yet implemented") return "" elif arg.distribute: print("vc key distribute NAMES --username=USERNAME") print(arg.names) print(arg.username) print(arg.proxy) Console.TODO("not yet implemented") return "" elif arg.list: print("vc key list NAMES [--usort] [--format=FORMAT]") print(arg.names) print(arg.username) print(arg.format) print(arg.usort) print(arg.proxy) result = Vc.list(names=arg.names) # dont forget format and sort Console.TODO("not yet implemented") return "" elif arg.proxy: print("vc key proxy NAMES [--username=USERNAME] [--proxy=PROXY]") print(arg.names) print(arg.username) print(arg.format) print(arg.usort) print(arg.proxy) result = Vc.list(names=arg.names) # dont forget format and sort Console.TODO("not yet implemented") return ""
def do_vc(self, args, arguments): """ :: Usage: vc key add KEYFILE NAMES [--username=USERNAME] [--proxy=PROXY] vc key distribute NAMES [--username=USERNAME] [--proxy=PROXY] vc key list NAMES [--usort] [--username=USERNAME] [--proxy=PROXY] [--format=FORMAT] vc key proxy NAMES [--username=USERNAME] [--proxy=PROXY] Options: --format=FORMAT the output format [default: table] Description: see examples Examples: cm vc key add keys.txt gregor-[001-010] adds the keys in the file keys.txt to the authorized_keys file in the user that is registered for the vm cm vc key add keys.txt gregor-[001-010] --username=ubuntu adds the keys in the file keys.txt to the authorized_keys file in the user ubuntu for each of the vms vc key distribute gregor-[001-010] gathers the keys from the host gathers it into a single file and adds them to the authorized keys file. Duplicated keys will be ignored. vc key list gregor-[001-010] [--usort] creates a table with all keys in authorized_keys from all of the remote machines. If the parameter usort is specified it only lists the key once, but lists in the host column the list of all host on which the key is stored Proxy server vc key proxy NAMES sometimes you may not have enough floating IPs so it is possible to dedicate one machine as a proxy server that has such a floating ip. The way this is done is that you need to set up ssh tunnels via the proxy server in your .ssh/config file. The command will print a template that you could include in your .ssh/config file to gain easily access to your other machines without floating ip. For example it will generate the following for a given PROXY host, USERNAME, and vm1 is the name of the first vm in NAMES Host vm1 User USERNAME Hostname PROXY ProxyCommand ssh 10.1.1.2 nc %h %p ForwardX11 yes Note: this is just a draft and will be improved upon discussion with the team """ arg = dotdict(arguments) arg.usort = arguments["--usort"] arg.format = arguments["--format"] arg.username = arguments["--username"] arg.proxy = arguments["--proxy"] if arg.proxy: Console.error("proxy not yet supported", traceflag=False) if arg.NAMES is not None: arg.names = Parameter.expand(arg.NAMES) else: arg.names = None pprint(arg) if arg.add: print("vc key add KEYFILE NAMES --username=USERNAME") print(arg.username) print(arg.names) print(arg.KEYFILE) print(arg.proxy) Console.TODO("not yet implemented") return "" elif arg.distribute: print("vc key distribute NAMES --username=USERNAME") print(arg.names) print(arg.username) print(arg.proxy) Console.TODO("not yet implemented") return "" elif arg.list: print("vc key list NAMES [--usort] [--format=FORMAT]") print(arg.names) print(arg.username) print(arg.format) print(arg.usort) print(arg.proxy) result = Vc.list(names=arg.names) # dont forget format and sort Console.TODO("not yet implemented") return "" elif arg.proxy: print("vc key proxy NAMES [--username=USERNAME] [--proxy=PROXY]") print(arg.names) print(arg.username) print(arg.format) print(arg.usort) print(arg.proxy) result = Vc.list(names=arg.names) # dont forget format and sort Console.TODO("not yet implemented") return ""
elif arguments['nodes']: if data.ID is None: myjobs = ids(data.user) else: myjobs = [data.ID] print(type(myjobs)) print('jjj', myjobs) if len(myjobs) > 1: print(jobs, len(myjobs)) Console.error( "More than one swarm cluster running, please specify ID") n = nodes(data.user, myjobs[0])[0] print(n) print(Parameter.expand(n)) elif arguments['queue']: #system("ssh india /opt/slurm/bin/squeue | grep {user} ".format(**data)) if data.all: jobs(data.user) else: jobs(data.user, allusers=True) n = ids(data.user) print(n) elif arguments['run']:
def do_launcher(self, args, arguments): """ :: Usage: launcher repo add NAME URL launcher repo delete NAME launcher repo list launcher repo launcher list [NAMES] [--cloud=CLOUD] [--format=FORMAT] [--source=db|dir] launcher add NAME SOURCE launcher delete [NAMES] [--cloud=CLOUD] launcher clear launcher run [NAME] launcher resume [NAME] launcher suspend [NAME] launcher refresh launcher log [NAME] launcher status [NAME] Arguments: KEY the name of the launcher Options: --cloud=CLOUD the name of the cloud --format=FORMAT the output format [launcher: table] --all lists all the launcher values Description: Launcher is a command line tool to test the portal launch functionalities through command line. The current launcher values can by listed with --all option:( if you have a launcher cloud specified. You can also add a cloud parameter to apply the command to a specific cloud) launcher list A launcher can be deleted with launcher delete KEY Examples: launcher list --all launcher list --cloud=general launcher delete <KEY> """ print("AAA") arg = dotdict(arguments) if arg.NAMES is not None: arg.names = Parameter.expand(arg.NAMES) else: arg.names = None if arg.name == ["all"]: arg.names = None arg.cloud = arguments["--cloud"] or Default.cloud arg.output = arguments['--format'] or 'table' arg.source = arguments['--source'] or 'db' print("BBB") pprint(arg) # arg.cloud = arguments["--cloud"] or Default.cloud # c = arg.list # print ("Hallo {cloud} is list={list}".format(**arg)) # launcher = Launcher(kind=None) if arg.cloud is None: Console.error("Default arg.cloud not set") return result = "" if arguments["repo"] and arguments["list"]: print("repo list") launchers = ConfigDict( filename="cm_launcher.yaml")["cloudmesh"]["repo"] print("repo add") d = {} for name in launchers: location = launchers[name]["location"] d[name] = {"name": name, "location": location} print(Printer.dict_table(d)) return "" elif arguments["repo"] and arguments["add"]: launchers = ConfigDict(filename="cm_launcher.yaml") print("repo add") print(launchers) return "" elif arguments["repo"] and arguments["delete"]: print("repo delete") return "" elif arguments["repo"] and not arguments["list"]: print(arg.names) result = Launcher.list(name=arg.names, output=arg.output) elif arguments["add"]: result = Launcher.add(name=arg.NAME, source=arg.SOURCE) elif arguments["delete"]: # if arg.name is not None: # result = Launcher.delete(name=arg.name, category=arg.cloud) # else: # result = Launcher.delete(name=None) for name in arg.names: result = Launcher.delete(name=name, category=arg.cloud) elif arguments["run"]: result = Launcher.run() elif arguments["resume"]: result = Launcher.resume(name=arg.name) elif arguments["suspend"]: result = Launcher.suspend(name=arg.name) elif arguments["details"]: result = Launcher.details(name=arg.name) elif arguments["clear"]: result = Launcher.clear() elif arguments["refresh"]: result = Launcher.refresh(name=arg.name) print(result)
def do_group(self, args, arguments): """ :: Usage: group list [GROUPNAME] [--format=FORMAT] group remove NAMES [--group=GROUPNAME] group add NAMES [--type=TYPE] [--group=GROUPNAME] group delete GROUPS group copy FROM TO group merge GROUPA GROUPB MERGEDGROUP manage the groups Arguments: NAMES names of object to be added GROUPS names of a groups FROM name of a group TO name of a group GROUPA name of a group GROUPB name of a group MERGEDGROUP name of a group Options: --format=FORMAT the output format --type=TYPE the resource type --name=NAME the name of the group --id=IDS the ID(s) to add to the group Description: Todo: design parameters that are useful and match description Todo: discuss and propose command cloudmesh can manage groups of resource related objects. As it would be cumbersome to for example delete many virtual machines or delete VMs that are in the same group, but are running in different clouds. Hence it is possible to add a virtual machine to a specific group. The group name to be added to can be set as a default. This way all subsequent commands use this default group. It can also be set via a command parameter. Another convenience function is that the group command can use the last used virtual machine. If a vm is started it will be automatically added to the default group if it is set. If finer grained deletion is needed, it can be achieved with the delete command that supports deletion by name It is also possible to remove a VM from the group using the remove command, by supplying the ID Note: The type is internally called for the group species, we may eliminate the species column and just use the type column for it, Example: default group mygroup group add --type=vm --id=albert-[001-003] adds the vms with the given name using the Parameter see base group add --type=vm adds the last vm to the group group delete --name=mygroup deletes all objects in the group """ # pprint(arguments) if arguments["list"]: output = arguments["--format"] or Default.get(name="format", category="general") or "table" name = arguments["GROUPNAME"] if name is None: result = Group.list(output=output) if result: print(result) else: print("No groups found other than the default group but it has no members.") else: result = Group.list(name=name, output=output) if result: print(result) else: msg_a = ("No group found with name `{name}` found in the " "category `{category}`.".format(**locals())) ''' # find alternate result = Group.get(name=name) msg_b = "" if result is not None and len(result) < 0: msg_b = " However we found such a variable in " \ "category `{category}`. Please consider " \ "using --category={category}".format(**locals()) Console.error(msg_a + msg_b) else: Console.error("No group with name {name} exists.".format(**locals())) ''' return "" elif arguments["add"]: # group add NAME... [--type=TYPE] [--category=CLOUD] [--group=GROUP] print ("AAA", arguments["NAMES"]) members = Parameter.expand(arguments["NAMES"]) print ("MMMM", members) data = dotdict({ "species": arguments["--type"] or "vm", "name": arguments["--group"] or Default.group }) print ("DDD", data) for member in members: data.member = member pprint(data) Group.add(**data) return "" elif arguments["delete"]: groups = Parameter.expand(arguments["GROUPS"]) for group in groups: result = Group.delete(group) if result: Console.ok(result) else: Console.error( "delete group {}. failed.".format(group)) return "" elif arguments["remove"]: members = Parameter.expand(arguments["NAMES"]) group = arguments["--group"] or Default.group for member in members: result = Group.remove(group, member) if result: Console.ok(result) else: Console.error( "remove {} from group {}. failed.".format(group, member)) return "" elif arguments["copy"]: _from = arguments["FROM"] _to = arguments["TO"] Group.copy(_from, _to) return "" elif arguments["merge"]: _groupA = arguments["GROUPA"] _groupB = arguments["GROUPB"] _mergedGroup = arguments["MERGEDGROUP"] Group.merge(_groupA, _groupB, _mergedGroup) return ""