def do_container(self, args, arguments): """ :: Usage: container [--os=OS] [--command=COMMAND] [--shell=SHELL] [--interactive=INTERACTIVE] [--window=WINDOW] Starts a docker container in interactive mode in a new container and executes the command in it. Arguments: --command=COMMAND the command --os=OS the os [default: cloudmesh/book:latest] --shell=SHELL [default: /bin/bash] --window=WINDOW [default: True] --interactive=INTERACTIVE [default: True] Options: -f specify the file Description: container --os="cloudmesh/book:1.7" --command=ls """ # m = Manager() map_parameters(arguments, 'os', 'command', 'interactive', 'shell', 'window') arguments.cwd = os.getcwd() if arguments.command is None: arguments.command = "" else: arguments.command += ";" VERBOSE(arguments, label='arguments', verbose=1) if arguments.os is None: Shell.terminal(command=arguments.command) else: if arguments.interactive.lower() in ['true', 'on']: arguments.interactive = "-it" else: arguments.interactive = "" arguments.shell = "" VERBOSE(arguments, label='arguments', verbose=1) command = "cd {cwd}; docker run -v {cwd}:{cwd} -w {cwd} --rm {interactive} {os} {command}{shell}".format( **arguments) print(command) Shell.terminal(command=command) return ""
def do_storagelifecycle(self, args, arguments): """ :: Usage: storagelifecycle put SERVICE STORAGE_BUCKET_NAME (--expiry_in_days=NUM_DAYS | --lifecycle_config FILE) storagelifecycle get SERVICE STORAGE_BUCKET_NAME storagelifecycle delete SERVICE STORAGE_BUCKET_NAME Options: --expiry_in_days=NUM_DAYS Days until objects in bucket are removed --lifecycle_config FILE File containing storage lifecycle rules for bucket or objects in bucket Arguments: SERVICE Name of the cloud service provider (i.e. aws, google, azure) STORAGE_BUCKET_NAME Id of the cloud service provider bucket NUM_DAYS NUMBER OF DAYS Description: Manage cloud service provider objects so that they are stored cost-effectively throughout their lifecycle. AWS and google are currently supported. storagelifecycle put SERVICE STORAGE_BUCKET_NAME (--expiry_in_days=NUM_DAYS | --lifecycle_config FILE) Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. storagelifecycle delete SERVICE STORAGE_BUCKET_NAME Removes all the lifecycle configuration rules in the lifecycle subresource associated with the (STORAGE_ID). storagelifecycle get SERVICE STORAGE_BUCKET_NAME Returns the lifecycle configuration information set on the bucket. Example: storagelifecycle put "google" "cloudmesh-bucket-001" --expiry_in_days=90 storagelifecycle put "google" "cloudmesh-bucket-001" --lifecycle_config="C:\\mydir\\google_lifecycle_config.json" storagelifecycle get "google" "cloudmesh-bucket-001" storagelifecycle delete "google" "cloudmesh-bucket-001" """ # Map parameters with -- to regular argument dicts for easier processing. map_parameters(arguments, "expiry_in_days", "lifecycle_config") # Create instance of generic Provider service provider = Provider(arguments.SERVICE) # Invoke function based on command arguments if arguments["put"]: provider.put(arguments.SERVICE, arguments.STORAGE_BUCKET_NAME, arguments) elif arguments["get"]: provider.get(arguments.SERVICE, arguments.STORAGE_BUCKET_NAME) elif arguments["delete"]: provider.delete(arguments.SERVICE, arguments.STORAGE_BUCKET_NAME) else: return NotImplementedError pass
def test_variables_overwrite(self): HEADING() arguments = {"list": True, "--list": "hello"} try: map_parameters(arguments, 'list') assert False except: assert True print(arguments)
def do_provider(self, args, arguments): """ :: Usage: provider list [--output=OUTPUT] provider delete NAME provider add NAME Arguments: NAME The name of the key. Options: --output=OUTPUT the format of the output [default: table] Description: THIS IS NOT YET IMPLEMENTED Managing the providers """ map_parameters(arguments, 'output') if arguments.list: banner("Loaded Compute Providers") providers = ComputeProviderPlugin.__subclasses__() for provider in providers: print(provider.kind) pprint(provider) banner("Available Compute Providers") providers = ProviderList() for name in ["openstack", "azure"]: try: provider = providers[name] print(name) except Exception as e: print(e) elif arguments.delete: raise NotImplementedError elif arguments.add: raise NotImplementedError return ""
def do_mongo(self, args, arguments): """ :: Usage: mongo deploy --config FILE mongo deploy --ips=IPS --names=NAMES --shards=SHARDS --replicas=REPLICAS This command does some useful things. Arguments: FILE a file name Options: -f specify the file Description: > mongo deploy --config FILE > mongo deploy --ips=10.0.0.[1-5] > --names=master,worker[2-5] > --shards=3 > --replicas=2 """ map_parameters(arguments, "config", "ips", "names", "replicas") if arguments.deploy and arguments.config: print(f"configure from file {arguments.FILE}") config = parse_config(arguments.FILE) valid = validate_config(config) if arguments.deploy and arguments.ips: names = Parameter.expand(arguments.names) ips = Parameter.expand(arguments.ips) if len(ips) != len(names): Console.error("The length of ips and names do not match") print(f"configure") print(ips) print(names) print(arguments.shards) print(arguments.replicas) else: Console.error("parameters not specified correctly") return ""
def test_variables_predefined(self): HEADING() for argument in [ 'clear', 'copy', 'fromkeys', 'get', 'items', 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values', 'format', 'type' ]: arguments = { argument: True, } print(arguments) try: map_parameters(arguments, argument) assert False except Exception as e: print(e) assert True
def do_scriptrunner(self, args, arguments): """ :: Usage: scriptrunner --file=FILE --bucket=BUCKET --upload=UPLOAD scriptrunner --bucket=BUCKET list This command does some useful things. Arguments: FILE a file name BUCKET a bucket name UPLOAD TRUE Options: -f specify the file -b specify the s3 bucket name """ map_parameters(arguments, 'upload') arguments.FILE = arguments['--file'] or None arguments.BUCKET = arguments['--bucket'] or None arguments.UPLOAD = arguments['--upload'] or None VERBOSE(arguments) m = Manager() if arguments.UPLOAD: print("option upload") gr = GlueRunner.GlueRunner(arguments.FILE, arguments.BUCKET) gr.upload() # if arguments.FILE: # print("option a") # m.list(path_expand(arguments.FILE)) # elif arguments.list: # print("option b") # m.list("just calling list without parameter") return ""
def do_analytics(self, args, arguments): """ :: Usage: analytics help FUNCTION analytics manual SERVICE analytics codegen function FILENAME --service=NAME [--dir=DIR] [--port=PORT] [--host=HOST] analytics codegen sklearn MODEL --service=NAME [--port=PORT] [--dir=DIR] [--host=HOST] analytics server start --service=NAME [--cloud=CLOUD] [--dir=DIR] [--detached] analytics server stop SERVICE [--service=NAME] [--cloud=CLOUD] analytics file put SERVICE FILENAME [--cloud=CLOUD] [--port=PORT] analytics file list SERVICE [--cloud=CLOUD] [--port=PORT] analytics file get SERVICE FILENAME [--cloud=CLOUD] [--port=PORT] analytics file run SERVICE PARAMETERS... FILENAME [--cloud=CLOUD] [--port=PORT] [-v] analytics run SERVICE PARAMETERS... [--cloud=CLOUD] [--port=PORT] [-v] analytics SERVICE [--cloud=CLOUD] [--port=PORT] [-v] This command manages the cloudmesh analytics server on the given cloud. If the cloud is not spified it is run on localhost Options: --cloud=CLOUD The name of the cloud as specified in the cloudmesh.yaml file --dir=DIR The directory in which the service is to be placed [default=./build] --port=PORT The port of the service [default=8000] --host=HOST The hostname to run this server on [default=127.0.0.1] --service=NAME The name of the service (should than not just be name?) Arguments: SERVICE the name of the service PARAMETERS the PARAMETERS to be send toy the service Description: http://127.0.0.1:8000/cloudmesh/LinearRegression/ui/ Examples: cms analytics manual LinearRegression cms analytics help LinearRegression cms analytics help sklearn.linear_model.LinearRegression """ map_parameters(arguments, 'detached', 'service', 'host', 'dir', 'cloud', 'port', 'v') port = arguments.port or str(8000) ip = f"{arguments.cloud}:{arguments.port}" # pprint(arguments) def find_server_parameters(): """ finds parameters from the commandline arguments. This includes any string with an = sign any string without. :return: parameters, flags """ commands = arguments.PARAMETERS parameters = [] flag = [] for command in commands: if '=' in command: parameters.append(command) else: flag.append(command) return parameters, flag if arguments.codegen and arguments.function and arguments.FILENAME: filename = arguments.FILENAME name = arguments.NAME module_name = filename.replace(".py", "").replace("/", ".") module = importlib.import_module(module_name) f = getattr(module, name) print(f"from {module_name} import {name}") print(f.__doc__) print(f.__annotations__) return "" elif arguments.help: function = arguments.FUNCTION module, function = function.rsplit(".", 1) helper.get_help(module, function) return "" elif arguments.manual: print(manual(arguments.SERVICE)) return "" elif arguments.codegen and arguments.sklearn: banner("Generate the Cloudmesh OpenAPI Server") service = arguments.service directory = arguments.dir host = arguments.host print(" Service: ", service) print(" Directory:", directory) print(" Host: ", host) print(" Port: ", port) print() cms_autoapi.main_generate(service, directory, port) elif arguments.server and arguments.stop: print('killing the server') service = arguments.SERVICE result = Shell.ps().splitlines() for entry in result: if ".py" in entry and service in entry: pid = int(entry.split(" ")[0]) os.kill(pid, signal.SIGKILL) return "" elif arguments.run and arguments.SERVICE: host = arguments.cloud or "127.0.0.1" port = arguments.port or 8000 service = arguments.SERVICE ip = f"{host}:{port}" parameters, flag = find_server_parameters() print("Parameters", parameters) print("Flags", flag) print("Service:", service) print("Ip:", ip) # def run(service, flag, parameters, root_url): res = Request.run(service, flag[0], parameters, ip) print(res) return "" elif arguments.file and arguments.put: host = arguments.cloud or "127.0.0.1" port = arguments.port or 8000 filename = arguments.FILENAME service = arguments.SERVICE ip = f"{host}:{port}" res = Request.file_put(ip, service, filename) print(res) return "" elif arguments.file and arguments.list: # parameters, flag = find_server_parameters() host = arguments.cloud or "127.0.0.1" port = arguments.port or 8000 service = arguments.SERVICE url = f"{host}:{port}/cloudmesh/{service}" res = Request.file_list(url) print(res) return "" elif arguments.file and arguments.get: # analytics file read SERVICE FILENAME [--cloud=CLOUD] [--port=PORT] # host = arguments.cloud or "127.0.0.1" port = arguments.port or 8000 filename = arguments.FILENAME service = arguments.SERVICE ip = f"{host}:{port}" res = Request.file_get(ip, service, filename) print(res) return "" elif arguments.server and arguments.start and arguments.cloud: #pprint (arguments) service = arguments.service directory = arguments.dir print(" Service: ", service) print(" Directory:", directory) print(" Cloud: ", arguments.cloud) print() banner('Manaul') print('comamnd to issue the manual') print() if arguments.detached: print("DETACHED") command = [f"python", f"{service}_server.py"] pprint(command) directory = Path(f"{directory}/{service}").resolve() print(directory) try: p = subprocess.Popen(args=command, stdout=False, cwd=directory) banner(f"Pid: {p.pid}") except Exception as e: if "Address already in use": print() Console.error("The address is already in use") print() name = f"{service}" pid = Shell.get_pid(name) print(pid, name) if pid is not None: Console.error( f"There is also a server running on pid {pid}") # with open(setting_path, 'r') as settings: # settings = json.load(settings) # # settings['server_id'] = p.pid # # with open(setting_path, 'w') as new_settings: # json.dump(settings, new_settings) return "" else: settings = None if arguments.cloud in ['local', '127.0.0.1']: banner('OpenAPI Manual') print(' The Online manaul is available at ') print() print(f" http://127.0.0.1:{port}/cloudmesh/{service}/ui") print() banner(f'Start the Server {service}') which = Shell.which("python") version = Shell.execute("python", ["--version"]) command = f'cd {directory}/{service}; python {service}_server.py' print() print(" Python :", version, which) print(" Command:", command) os.system(command) return "" elif arguments.SERVICE: service = arguments.SERVICE parameters, flag = find_server_parameters() host = arguments.cloud or "127.0.0.1" port = arguments.port or 8000 ip = f"{host}:{port}" res = Request.constructor(service, ip, verbose=arguments["-v"]) print(res) return ""
def do_burn(self, args, arguments): """ :: Usage: burn gui [--hostname=HOSTNAME] [--ip=IP] [--ssid=SSID] [--wifipassword=PSK] [--bs=BLOCKSIZE] [--dryrun] [--no_diagram] burn ubuntu NAMES [--inventory=INVENTORY] [--ssid=SSID] [-f] [--wifipassword=PSK] [-v] --device=DEVICE [--country=COUNTRY] [--upgrade] burn raspberry NAMES --device=DEVICE [--inventory=INVENTORY] [--ssid=SSID] [--wifipassword=PSK] [--country=COUNTRY] [--password=PASSWORD] [-v] [-f] burn firmware check burn firmware update burn install burn load --device=DEVICE burn format --device=DEVICE burn imager [TAG...] burn mount [--device=DEVICE] [--os=OS] burn unmount [--device=DEVICE] [--os=OS] burn network list [--ip=IP] [--used] burn network burn info [--device=DEVICE] burn image versions [--details] [--refresh] [--yaml] burn image ls burn image delete [--image=IMAGE] burn image get [--url=URL] [TAG...] burn backup [--device=DEVICE] [--to=DESTINATION] burn copy [--device=DEVICE] [--from=DESTINATION] burn shrink [--image=IMAGE] burn cluster --device=DEVICE --hostname=HOSTNAME [--burning=BURNING] [--ip=IP] [--ssid=SSID] [--wifipassword=PSK] [--bs=BLOCKSIZE] [--os=OS] [-y] [--imaged] [--set_passwd] burn create [--image=IMAGE] [--device=DEVICE] [--burning=BURNING] [--hostname=HOSTNAME] [--ip=IP] [--sshkey=KEY] [--blocksize=BLOCKSIZE] [--passwd=PASSWD] [--ssid=SSID] [--wifipassword=PSK] [--format] [--tag=TAG] [--inventory=INVENTORY] [--name=NAME] [-y] burn sdcard [TAG...] [--device=DEVICE] [-y] burn set [--hostname=HOSTNAME] [--ip=IP] [--key=KEY] [--keyboard=COUNTRY] [--cmdline=CMDLINE] burn enable ssh burn wifi --ssid=SSID [--passwd=PASSWD] [--country=COUNTRY] burn check [--device=DEVICE] burn mac --hostname=HOSTNAME Options: -h --help Show this screen. --version Show version. --image=IMAGE The image filename, e.g. 2019-09-26-raspbian-buster.img --device=DEVICE The device, e.g. /dev/sdX --hostname=HOSTNAME The hostnames of the cluster --ip=IP The IP addresses of the cluster --key=KEY The name of the SSH key file --blocksize=BLOCKSIZE The blocksise to burn [default: 4M] --burning=BURNING The hosts to be burned Arguments: TAG Keyword tags to identify an image Files: This is not fully thought through and needs to be documented ~/.cloudmesh/images Location where the images will be stored for reuse Description: cms burn create --inventory=INVENTORY --device=DEVICE --name=NAME Will refer to a specified cloudmesh inventory file (see cms help inventory). Will search the configurations for NAME inside of INVENTORY and will burn to DEVICE. Supports parameter expansion. cms burn create --passwd=PASSWD if the passwd flag is added the default password is queried from the commandline and added to all SDCards if the flag is omitted login via the password is disabled and only login via the sshkey is allowed Network cms burn network list Lists the ip addresses that are on the same network +------------+---------------+----------+-----------+ | Name | IP | Status | Latency | |------------+---------------+----------+-----------| | Router | 192.168.1.1 | up | 0.0092s | | iPhone | 192.168.1.4 | up | 0.061s | | red01 | 192.168.1.46 | up | 0.0077s | | laptop | 192.168.1.78 | up | 0.058s | | unkown | 192.168.1.126 | up | 0.14s | | red03 | 192.168.1.158 | up | 0.0037s | | red02 | 192.168.1.199 | up | 0.0046s | | red | 192.168.1.249 | up | 0.00021s | +------------+----------------+----------+-----------+ cms burn network list [--used] Lists the used ip addresses as a comma separated parameter list 192.168.50.1,192.168.50.4,... cms burn network address Lists the own network address +---------+----------------+----------------+ | Label | Local | Broadcast | |---------+----------------+----------------| | wlan0 | 192.168.1.12 | 192.168.1.255 | +---------+----------------+----------------+ cms burn firmware check Checks if the firmware on the Pi is up to date cms burn firmware update Checks and updates the firmware on the Pi cms burn install Installs a program to shrink img files. THis is useful, after you created a backup to make the backup smaller and allow faster burning in case of recovery This command is not supported on MacOS cms burn load --device=DEVICE Loads the sdcard into the USB drive. Thi sis similar to loading a cdrom drive. It s the opposite to eject cms burn format --device=DEVICE Formats the SDCard in the specified device. Be careful it is the correct device. cms burn info will help you to identifying it cms burn mount [--device=DEVICE] [--os=OS] Mounts the file systems available on the SDCard cms burn unmount [--device=DEVICE] [--os=OS] Unmounts the mounted file systems from the SDCard cms burn info [--device=DEVICE] Provides useful information about the SDCard cms burn image versions [--refresh] [--yaml] The images that you like to burn onto your SDCard can be cached locally with the image command. The available images for the PI can be found when using the --refresh option. If you do not specify it it reads a copy of the image list from our cache cms burn image ls Lists all downloaded images in our cache. You can download them with the cms burn image get command cms burn image delete [--image=IMAGE] Deletes the specified image. The name can be found with the image ls command cms burn image get [--url=URL] [TAG...] Downloads a specific image or the latest image. The tag are a number of words separated by a space that must occur in the tag that you find in the versions command cms burn backup [--device=DEVICE] [--to=DESTINATION] This command requires you to install pishrink previously with cms burn install Backs up a SDCard to the given location. cms burn copy [--device=DEVICE] [--from=DESTINATION] Copies the file form the destination on the SDCard this is the same as the SDCard command. we will in future remove one cms burn shrink [--image=IMAGE] Shrinks the size of a backup or image file that is on your local file system. It can only be used for .img files This command is not supported on MacOS. cms burn create [--image=IMAGE] [--device=DEVICE] [--hostname=HOSTNAME] [--ip=IP] [--sshkey=KEY] [--blocksize=BLOCKSIZE] [--passwd=PASSWD] [--ssid=SSID] [--wifipassword=PSK] [--format] This command not only can format the SDCard, but also initializes it with specific values cms burn sdcard [TAG...] [--device=DEVICE] this burns the sd card, see also copy and create cms burn set [--hostname=HOSTNAME] [--ip=IP] [--key=KEY] [--mount=MOUNTPOINT] [--keyboard=COUNTRY] [--cmdline=CMDLINE] Sets specific values on the sdcard after it has ben created with the create, copy or sdcard command a --ssh is missing from this command cms burn enable ssh [--mount=MOUNTPOINT] Enables the ssh server once it is booted cms burn wifi --ssid=SSID [--passwd=PASSWD] [--country=COUNTRY] Sets the wifi ssid and password after the card is created, copied, or the sdcard is used. The option country option expects an ISO 3166-1 two digit country code. The default is "US" and the option not required if suitable. See https://en.wikipedia.org/wiki/ISO_3166-1 for other countries. cms burn check [--device=DEVICE] Lists the parameters that were set with the set or create command Examples: ( \\ is not shown) > cms burn create --image=2019-09-26-raspbian-buster-lite > --device=/dev/mmcblk0 > --hostname=red[5-7] > --ip=192.168.1.[5-7] > --sshkey=id_rsa > cms burn image get latest > cms burn image get https://downloads.raspberrypi.org/ > raspbian_lite/images/ > raspbian_lite-2018-10-11/2018-10-09-raspbian-stretch-lite.zip > cms burn image delete 2019-09-26-raspbian-buster-lite """ map_parameters(arguments, "details", "refresh", "device", "dryrun", "burning", "hostname", "ip", "sshkey", "blocksize", "ssid", "url", "imaged", "key", "keyboard", "passwd", "wifipassword", "version", "to", "os", "country", "inventory", "name", "bs", "set_passwd", "cmdline", "upgrade", "no_diagram") # arguments.MOUNTPOINT = arguments["--mount"] arguments.FORMAT = arguments["--format"] arguments.FROM = arguments["--from"] arguments.IMAGE = arguments["--image"] arguments.output = "table" # hard code for now arguments.bs = arguments.bs or "4M" arguments.yes = arguments["-y"] if len(arguments.TAG) == 0: arguments.TAG = "latest" # VERBOSE(arguments) def execute(label, function): StopWatch.start(label) result = function StopWatch.stop(label) StopWatch.status(label, True) return result burner = Burner() sdcard = SDCard() if arguments.imager: arguments.TAG = arguments.TAG or ["latest-lite"] Console.msg(f"Tags: {arguments.TAG}") try: file = Imager.fetch(tag=arguments.TAG) except: # noqa: E722 pass try: Imager.launch(file=file) except Exception as e: Console.error( f"could not find image with the tag {arguments.TAG}\n\n{e}\n" ) return "" elif arguments.gui: from cloudmesh.burn.gui import Gui VERBOSE(arguments) g = Gui(hostname=arguments.hostname, ip=arguments.ip, dryrun=arguments.dryrun, no_diagram=arguments.no_diagram) g.run() return "" elif arguments.raspberry: banner(txt="RaspberryOS Burn", figlet=True) if arguments.inventory: inv_path = path_expand(f'~/.cloudmesh/{arguments.inventory}') try: burner = RaspberryBurner( inventory=inv_path, ssid=arguments['--ssid'], wifipassword=arguments['--wifipassword'], country=arguments['--country']) except: Console.error('Burner Error') return "" else: try: burner = RaspberryBurner( names=arguments.NAMES, ssid=arguments['--ssid'], wifipassword=arguments['--wifipassword'], force_inv=arguments['-f'], country=arguments['--country']) except Exception as e: Console.error('Burner Error') raise e return "" execute( "burn raspberry", burner.multi_burn( names=arguments.NAMES, devices=arguments.device, verbose=arguments['-v'], password=arguments['--password'], )) return "" elif arguments.ubuntu: banner(txt="Ubuntu Burn with cloud-init", figlet=True) names = Parameter.expand(arguments.NAMES) if len(Parameter.expand(arguments.device)) > 1: Console.error( "Too many devices specified. Please only specify one") return "" if arguments.inventory: c = Configure(inventory=arguments.inventory, debug=arguments['-v']) inv = Inventory(filename=arguments.inventory) else: names = Parameter.expand(arguments.NAMES) manager, workers = Host.get_hostnames(names) if workers: worker_base_name = ''.join( [i for i in workers[0] if not i.isdigit()]) cluster_name = manager or worker_base_name inventory = path_expand( f'~/.cloudmesh/inventory-{cluster_name}.yaml') if not os.path.exists(inventory) or arguments['-f']: if not manager: Console.error("No inventory found. Can not create an " "inventory without a " "manager.") return "" Inventory.build_default_inventory( filename=inventory, manager=manager, workers=workers, manager_image='ubuntu-20.10-64-bit', worker_image='ubuntu-20.10-64-bit') c = Configure(inventory=inventory, debug=arguments['-v'], download_images=True) inv = Inventory(filename=inventory) names = Parameter.expand(arguments.NAMES) manager, workers = Host.get_hostnames(names) if manager: if not arguments.ssid and 'wifi' in c.configs[manager][ 'services']: arguments.ssid = get_ssid() if arguments.ssid == "": Console.info('Could not determine SSID, skipping wifi ' 'config') arguments.ssid = None if not arguments.wifipassword and arguments.ssid is not None: arguments.country = Shell.locale().upper() arguments.wifipassword = getpass( f"Using --SSID=" f"{arguments.ssid} and " f" --COUNTRY=" f"{arguments.country}, please " f"enter wifi password:"******"" if 'ubuntu' not in tag: Console.error( "This command only supports burning ubuntu cards") return "" sdcard = SDCard(card_os="ubuntu") # Code below taken from arguments.sdcard try: USB.check_for_readers() except Exception as e: print() Console.error(e) print() return "" # determine if we are burning a manager, as this needs to be done # first to get the ssh public key # manager = False # for name in names: # if not inv.has_host(name): # Console.error(f'Could not find {name} in inventory {inv.filename}') # return "" # service = inv.get(name=name, attribute='service') # if service == 'manager' and not manager: # manager = name # # make manager first in names # names.remove(name) # names.insert(0, name) # elif service == 'manager' and manager: # raise Exception('More than one manager detected in NAMES') for name in names: if not yn_choice( f'Is the card to be burned for {name} inserted?'): if not yn_choice( f"Please insert the card to be burned for {name}. " "Type 'y' when done or 'n' to terminante"): Console.error("Terminating: User Break") return "" service = inv.get(name=name, attribute='service') # Make sure bridge is only enabled if WiFi enabled if service == 'manager': services = inv.get(name=name, attribute='services') if 'bridge' in services and not arguments.ssid: Console.error( 'Service bridge can only be configured if WiFi' ' is enabled with --ssid and --wifipassword') return "" else: enable_bridge = 'bridge' in services Console.info(f'Burning {name}') sdcard.format_device(device=arguments.device, yes=True) sdcard.unmount(device=arguments.device) sdcard.burn_sdcard(tag=tag, device=arguments.device, yes=True) sdcard.mount(device=arguments.device, card_os="ubuntu") if service == 'manager': # Generate a private public key pair for the manager that will be persistently used # priv_key, pub_key = c.generate_ssh_key(name) # Write priv_key and pub_key to /boot/id_rsa and /boot/id_rsa.pub # SDCard.writefile(filename=f'{sdcard.boot_volume}/id_rsa', content=priv_key) # SDCard.writefile(filename=f'{sdcard.boot_volume}/id_rsa.pub', content=pub_key) c.build_user_data( name=name, country=arguments.country, upgrade=arguments.upgrade, with_bridge=enable_bridge).write( filename=sdcard.boot_volume + '/user-data') c.build_network_data(name=name, ssid=arguments.ssid, password=arguments.wifipassword)\ .write(filename=sdcard.boot_volume + '/network-config') else: c.build_user_data( name=name, add_manager_key=manager, upgrade=arguments.upgrade).write( filename=sdcard.boot_volume + '/user-data') c.build_network_data(name=name).write( filename=sdcard.boot_volume + '/network-config') time.sleep( 1 ) # Sleep for 1 seconds to give ample time for writing to finish sdcard.unmount(device=arguments.device, card_os="ubuntu") Console.info("Remove card") Console.ok(f"Burned {len(names)} card(s)") return "" elif arguments.firmware and arguments.check: execute("firmware check", burner.firmware(action="check")) return "" elif arguments.firmware and arguments.update: execute("firmware update", burner.firmware(action="update")) return "" if arguments.check: execute("check", burner.check(device=arguments.device)) return "" elif arguments.versions and arguments['image']: StopWatch.start("image versions") result = Image.create_version_cache(refresh=arguments["--refresh"]) output = "table" if arguments["--yaml"]: output = "yaml" order = ["tag", 'date', "os", "type", 'version'] header = ["Tag", 'Date', "OS", "Type", 'Version'] if arguments.details: order = ["tag", 'date', "os", "type", 'version', "url"] header = ["Tag", 'Date', "OS", "Type", 'Version', "Url"] print( Printer.write(result, order=order, header=header, output=output)) StopWatch.stop("image versions") StopWatch.status("image versions", True) return "" elif arguments.load: execute("load", sdcard.load_device(device=arguments.device)) return "" elif arguments[ "format"]: # as format is a python word, we need to use an index execute( "format", sdcard.format_device(device=arguments.device, unmount=True)) return "" elif arguments.network and arguments["list"]: if os_is_mac(): Console.error("Not yet implemented on MacOS") return "" ip = arguments.ip or Network.address()[0]['local'] details = Network.nmap(ip=ip) if arguments.used: print(','.join([x['ip'] for x in details])) else: print( Printer.write(details, order=[ 'name', "ip", "status", "latency", ], header=[ 'Name', "IP", "Status", "Latency", ])) return "" elif arguments.network: if os_is_mac(): Console.error("Not yet implemented on MacOS") return "" # print (Network.nmap()) details = Network.address() print( Printer.write(details, order=['label', "local", "broadcast"], header=["Label", "Local", "Broadcast"])) return "" elif arguments.wifi: password = arguments.passwd ssid = arguments.ssid or get_ssid() country = arguments.country if password is None: password = getpass("Please enter the Wifi password or enter " "for no password: "******"macos" elif os_is_linux(): host = "linux" elif os_is_pi(): host = "raspberry" else: Console.error( "This command is not yet implemented for your OS") return "" burner.configure_wifi(ssid, psk=password, country=country, host=host) return "" elif arguments.info: output = arguments.output or "table" card = SDCard() execute("info", card.info(output=output)) try: USB.check_for_readers() except Exception as e: print() Console.error(e) print() return "" return "" elif arguments.install: if os_is_mac(): Console.error("Not yet implemented on MacOS") return "" execute("install", burner.install()) return "" elif arguments.shrink: if os_is_mac(): Console.error("Not yet implemented on MacOS") return "" execute("shrink", burner.shrink(image=arguments.IMAGE)) return "" elif arguments.backup: try: USB.check_for_readers() except Exception as e: print() Console.error(e) print() return "" execute( "backup", sdcard.backup(device=arguments.device, to_file=arguments.to)) return "" elif arguments[ "copy"]: # as copy is a reserved word we need to use the index USB.check_for_readers() execute( "copy", sdcard.copy(device=arguments.device, from_file=arguments.FROM)) return "" elif arguments.sdcard: try: USB.check_for_readers() except Exception as e: print() Console.error(e) print() return "" if arguments.device is None: card = SDCard() card.info() Console.error("Please specify a device") return "" arguments.TAG = arguments.TAG or ["latest-lite"] if any("ubuntu" in tag for tag in arguments.TAG): sdcard = SDCard(card_os="ubuntu") execute( "format", sdcard.format_device(device=arguments.device, unmount=True)) execute("unmount", sdcard.unmount(device=arguments.device)) execute( "sdcard", sdcard.burn_sdcard(tag=arguments.TAG, device=arguments.device, yes=arguments.yes)) return "" elif arguments.mount: if arguments.device is None: card = SDCard card.info() Console.error("Please specify a device") return "" execute( "mount", sdcard.mount(device=arguments.device, card_os=arguments.os)) return "" elif arguments.unmount: card = SDCard(card_os=arguments.os) execute( "unmount", card.unmount(device=arguments.device, card_os=arguments.os)) return "" elif arguments.mac: hostnames = Parameter.expand(arguments.hostname) execute("mac", burner.mac(hostnames=hostnames)) return "" elif arguments.set: try: USB.check_for_readers() except Exception as e: print() Console.error(e) print() return "" if arguments.hostname: execute("set hostname", burner.set_hostname(arguments.hostname)) if arguments.ip: execute("set ip", burner.set_static_ip(arguments.ip)) if arguments.key: execute("set key", burner.set_key(arguments.key)) if arguments.keyboard: execute("set keyboard", burner.keyboard(country=arguments.keyboard)) if arguments.cmdline: execute("set cmdline", burner.set_cmdline(arguments.cmdline)) return "" elif arguments.enable and arguments.ssh: try: USB.check_for_readers() except Exception as e: print() Console.error(e) print() return "" execute("enable ssh", burner.enable_ssh()) return "" # elif arguments.versions and arguments.image: # image = Image() elif arguments.ls and arguments['image']: execute("image ls", Image().ls()) return "" elif arguments.delete and arguments.IMAGE: execute("image rm", Image().rm(arguments.IMAGE)) return "" elif arguments["get"] and arguments['image'] and arguments["--url"]: image = Image() execute("image fetch", image.fetch(url=arguments.url)) return "" elif arguments["get"] and arguments['image'] and arguments["TAG"]: tag = arguments["TAG"] if "latest" in tag and ("full" in tag or "lite" in tag): result = Image.create_version_cache( refresh=arguments["--refresh"]) image = Image() execute("image fetch", image.fetch(tag=arguments["TAG"])) return "" elif arguments["get"] and arguments['image']: image = Image() execute("image fetch", image.fetch(tag="latest")) return "" elif arguments.cluster: # is true when # # cms burn cluster --hostname=red,red00[1-2] # --device=/dev/sdb # --ip=10.1.1.[1-3] # --ssid=myssid # --wifipassword=mypass # try: USB.check_for_readers() except Exception as e: print() Console.error(e) print() return "" execute("cluster", burner.cluster(arguments=arguments)) return "" elif arguments.create and arguments.inventory: try: USB.check_for_readers() except Exception as e: print() Console.error(e) print() return "" if not os_is_pi(): print() Console.error( "This command has only been written for a Raspberry Pis. " "Terminating for caution") print() if yn_choice("Continue anyways?"): pass else: return if not arguments.name: Console.error( "Missing --name parameter. See cms help burn for usage") return "" if not arguments.device: Console.error( "Missing --device parameter. See cms help burn for usage") return "" StopWatch.start("burn inventory") multi_burner = MultiBurner() # Perhaps we want to change the path at some point inventory = f"~/.cloudmesh/{arguments.inventory}" multi_burner.burn_inventory(inventory=inventory, name=arguments.name, device=arguments.device, yes=arguments.yes, passwd=arguments.passwd) StopWatch.stop("burn inventory") StopWatch.status("burn inventory", True) StopWatch.benchmark(sysinfo=False, csv=False) return "" elif arguments.create: try: USB.check_for_readers() except Exception as e: print() Console.error(e) print() return "" if arguments["--passwd"]: passwd = arguments["--passwd"] elif "PASSWD" in os.environ: passwd = os.environ["PASSWD"] else: passwd = generate_strong_pass() psk = None if arguments["--ssid"]: ssid = arguments["--ssid"] if arguments["--wifipassword"]: psk = arguments["--wifipassword"] else: psk = None else: if arguments["--wifipassword"]: print("Can't have wifi password with no ssid") return else: ssid = None image = 'latest' or arguments.IMAGE dev = os.environ['DEV'] if 'DEV' in os.environ else None devices = arguments["--device"] or dev or None if devices is not None: devices = Parameter.expand_string(devices) hostnames = Parameter.expand(arguments.hostname) if arguments.burnimg is None: burning = hostnames else: burning = arguments.burning VERBOSE(arguments) ips = None if not arguments.ip else Parameter.expand(arguments.ip) key = arguments.sshkey tag = arguments['--tag'] if os_is_pi() or os_is_linux(): blocksize = arguments.blocksize StopWatch.start("total") multi = MultiBurner() multi.burn_all( burning=burning, image=image, device=devices, blocksize=blocksize, progress=True, hostnames=hostnames, # not difference between names and name, maybe we should align ips=ips, key=key, password=passwd, ssid=ssid, psk=psk, tag=tag, yes=arguments.yes) StopWatch.stop("total") StopWatch.status("total", True) StopWatch.benchmark(sysinfo=False, csv=False) else: Console.error( "This command is only supported ona Pi and Linux") return "" Console.error("see manual page: cms help burn") return ""
def do_key(self, args, arguments): """ :: Usage: key -h | --help key list --cloud=CLOUDS [--output=OUTPUT] key list --source=ssh [--dir=DIR] [--output=OUTPUT] key list --source=git [--output=OUTPUT] [--username=USERNAME] key list [--output=OUTPUT] key add NAME --filename=FILENAME [--output=OUTPUT] key add [NAME] [--source=FILENAME] key add [NAME] [--source=git] key add [NAME] [--source=ssh] key delete NAMES [--cloud=CLOUDS] [--dryrun] key upload [NAMES] [--cloud=CLOUDS] [--dryrun] key upload [NAMES] [VMS] [--dryrun] key group upload [NAMES] [--group=GROUPNAMES] [--cloud=CLOUDS] [--dryrun] key group add [--group=GROUPNAMES] [--cloud=CLOUDS] [--dryrun] key group add --file=FILENAME key group delete [--group=GROUPNAMES] [NAMES] [--dryrun] key group list [--group=GROUPNAMES] [--output=OUTPUT] key group export --group=GROUNAMES --filename=FILENAME Arguments: VMS Parameterized list of virtual machines CLOUDS The clouds NAME The name of the key. SOURCE db, ssh, all KEYNAME The name of a key. For key upload it defaults to the default key name. OUTPUT The format of the output (table, json, yaml) FILENAME The filename with full path in which the key is located Options: --dir=DIR the directory with keys [default: ~/.ssh] --output=OUTPUT the format of the output [default: table] --source=SOURCE the source for the keys --username=USERNAME the source for the keys [default: none] --name=KEYNAME The name of a key Description: Please note that some values are read from the cloudmesh.yaml file. One such value is cloudmesh.profile.user Manages public keys is an essential component of accessing virtual machine sin the cloud. There are a number of sources where you can find public keys. This includes teh ~/.ssh directory and for example github. Keys will be uploaded into cloudmesh database with the add command under the given NAME. If the name is not specified the name cloudmesh.profile.user is assumed. key add NAME --source=ssh adds the default key in ~/.ssh/id_rsa.pub key add NAME --source=FILENAME adds the key specified by the filename with the given name key add NAME --git --username=username adds a named github key from a user with the given github username. Once the keys are uploaded to github, they can be listed To list these keys the following list functions are provided. key list --source=git [--username=USERNAME] lists all keys in git for the specified user. If the name is not specified it is read from cloudmesh.yaml key list --source=ssh [--dir=DIR] [--output=OUTPUT] lists all keys in the directory. If the directory is not specified the default will be ~/.ssh key list NAMES lists all keys in the named virtual machines. List command can use the [--output=OUTPUT] option list the keys loaded to cloudmesh in the given format: json, yaml, table. table is default. The NAME can be specified and if omitted the name cloudmesh.profile.user is assumed. To get keys from the cloudmesh database the following commands are available: key delete NAMES deletes the Named keys. This may also have an impact on groups key rename NAME NEW renames the key from NAME to NEW in the cloudmesh database. Group management of keys is an important concept in cloudmesh, allowing multiple users to be added to virtual machines while managing the keys associated with them. The keys must be uploaded to cloudmesh database with a name so they can be used in a group. The --dryrun option executes the command without uploading the information to the clouds. If no group name is specified the group name default is assumed. If no cloudnamesh are specified, all active clouds are assumed. active clouds can be set in the cloudmesh.yaml file. key group delete [GROUPNAMES] [NAMES] [--dryrun] deletes the named keys from the named groups. key group list [GROUPNAMES] [--output=OUTPUT] list the key names and details in the group. key group upload [GROUPNAMES] [CLOUDS] [--dryrun] uploads the named groups to the specified clouds. In some cases you may want to store the public keys in files. For this reason we support the following commands. key group add --group=GROUPNAME --file=FILENAME the command adds the keys to the given group. The keys are written in the files in yaml format. key group export --group=GROUNAMES --filename=FILENAME the command exports the keys to the given group. The keys are written in the files in yaml format. The yaml format is as follows: cloudmesh: keys: NAMEOFKEY: name: NAMEOFKEY key: ssh-rsa AAAA..... comment group: - GROUPNAME ... If a key is included in multiple groups they will be added to the grouplist of the key """ def print_keys(keys): print( Printer.write( keys, sort_keys=["name"], order=["name", "type", "fingerprint", "comment"], header=["Name", "Type", "Fingerprint", "Comment"], output=arguments.output)) map_parameters(arguments, 'cloud', 'output', 'source', 'dir', 'output', 'source', 'dryrun') variables = Variables() if arguments.list and arguments.source == "git": config = Config() username = config["cloudmesh.profile.github"] keys = SSHkey().get_from_git(username) print_keys(keys) return "" elif arguments.list and arguments.source == "ssh": # this is much simpler sshkey = SSHkey() print_keys([sshkey]) return "" elif arguments.list and arguments.cloud: clouds = Parameter.expand(arguments.cloud) if len(clouds) == 0: variables = Variables() cloudname = variables['cloud'] clouds = [cloudname] keys = [] for cloud in clouds: print(f"cloud {cloud}") provider = Provider(name=cloud) keys = provider.keys() provider.Print(keys, output=arguments.output, kind="key") return "" elif arguments.list: cloud = "local" db = CmDatabase() keys = db.find(collection=f"{cloud}-key") print_keys(keys) return "" elif arguments.add: """ key add [NAME] [--source=FILENAME] key add [NAME] [--source=git] key add [NAME] [--source=ssh] """ key = Key() source = arguments["--source"] if source == "ssh": name = arguments.NAME or "ssh" key.add(name, "ssh") elif source == "git": name = arguments.NAME or "git" key.add("git", "git") elif source is not None: name = arguments.NAME or source key.add(name, "input") else: config = Config() name = config["cloudmesh.profile.github"] kind = "ssh" key.add(name, kind) elif arguments.upload: """ key upload [NAMES] [--cloud=CLOUDS] [--dryrun] key upload [NAMES] [VMS] [--dryrun] """ names = Parameter.expand(arguments.NAMES) # this may have a bug if NAMES is ommitted # # Step 0. Set keyname to variable # if names is None or len(names) == 0: config = Config() username = config["cloudmesh.profile.user"] names = [username] if len(names) == 1: name = names[0] variables = Variables() if "key" in variables: old = variables["key"] if old != name: Console.msg( f"Changing defualt key from {old} to {name}") variables["key"] = name # # Step 1. keys = find keys to upload # cloud = "local" db = CmDatabase() db_keys = db.find(collection=f"{cloud}-key") keys = [] for key in db_keys: if key["name"] in names: keys.append(key) if len(keys) == 0: Console.error( f"No keys with the names {names} found in cloudmesh. \n" " Use the command 'key add' to add the key.") # # Step 2. iterate over the clouds to upload # clouds, names = Arguments.get_cloud_and_names( "list", arguments, variables) for cloud in clouds: print(f"cloud {cloud}") provider = Provider(name=cloud) for key in db_keys: name = key['name'] if name in names: try: r = provider.key_upload(key) Console.ok(f"upload key '{name} successful'. ") except ValueError as e: Console.error( f"key '{name} already exists in {cloud}.") return "" elif arguments.delete and arguments.cloud and arguments.NAMES: # key delete NAMES --cloud=CLOUDS [--dryrun] names = Parameter.expand(arguments.NAMES) clouds = Parameter.expand(arguments.cloud) for cloud in clouds: provider = Provider(name=cloud) for name in names: if arguments.dryrun: Console.ok(f"Dryrun: delete {name} in {cloud}") else: images = provider.key_delete(name) return "" elif arguments.delete and arguments.NAMES: # key delete NAMES [--dryrun] names = Parameter.expand(arguments.NAMES) cloud = "local" db = CmDatabase() db_keys = db.find(collection=f"{cloud}-key") error = [] for key in db_keys: name = key['name'] if name in names: if arguments.dryrun: Console.ok(f"Dryrun: delete {name}") else: db.delete(collection="local-key", name=name) Console.ok(f"delete {name}") return "" elif arguments.group: raise NotImplementedError return ""
def do_flavor(self, args, arguments): """ :: Usage: flavor list [NAMES] [--cloud=CLOUD] [--refresh] [--output=OUTPUT] Options: --output=OUTPUT the output format [default: table] --cloud=CLOUD the ycloud name --refresh refreshes the data before displaying it Description: This lists out the flavors present for a cloud Examples: cm flavor list --refresh cm flavor list cm flavor list --output=csv cm flavor list 58c9552c-8d93-42c0-9dea-5f48d90a3188 --refresh please remember that a uuid or the flavor name can be used to identify a flavor. """ map_parameters(arguments, "refresh", "cloud", "output") variables = Variables() arguments.output = Parameter.find("output", arguments, variables, "table") arguments.refresh = Parameter.find_bool("refresh", arguments, variables) if arguments.list and arguments.refresh: names = [] clouds, names = Arguments.get_cloud_and_names("list", arguments, variables) for cloud in clouds: print(f"cloud {cloud}") provider = Provider(name=cloud) flavors = provider.flavors() provider.Print(flavors, output=arguments.output, kind="flavor") return "" elif arguments.list: names = [] clouds, names = Arguments.get_cloud_and_names("list", arguments, variables) print(clouds, names) try: for cloud in clouds: print(f"List {cloud}") provider = Provider(cloud) db = CmDatabase() flavors = db.find(collection=f"{cloud}-flavor") provider.Print(flavors, output=arguments.output, kind="flavor") except Exception as e: VERBOSE(e) return ""
def do_volume(self, args, arguments): """ :: Usage: volume list [NAMES] [--vm=VM] [--region=REGION] [--cloud=CLOUD] [--refresh] [--dryrun] [--output=FORMAT] volume create [NAME] [--size=SIZE] [--volume_type=TYPE] [--description=DESCRIPTION] [--dryrun] [--region=REGION] [--path=PATH] volume attach [NAMES] [--vm=VM] volume detach [NAMES] volume delete [NAMES] volume add_tag [NAME] [--key=KEY] [--value=VALUE] volume status [NAME] volume migrate [NAME] [--vm=VM] [--cloud=CLOUD] volume sync [NAMES] [--cloud=CLOUD] volume purge [--cloud=CLOUD] This command manages volumes across different clouds Arguments: NAME the name of the volume NAMES the names of multiple volumes Options: --vm=VM The name of the virtual machine --region=REGION The name of the region --cloud=CLOUD The name of the cloud --refresh If refresh the info is taken from the cloud --volume_type=TYPE The type of the volume --output=FORMAT Output format [default: table] --key=KEY The tag key --value=VALUE The value of tag key --snapshot The snapshot of volume --path=PATH The path of local volume Description: volume list [NAMES] [--vm=VM] [--region=REGION] [--cloud=CLOUD] [--refresh] [--dryrun] [--output=FORMAT] List all the volumes for certain vm, region, or cloud. volume create [NAME] [--size=SIZE] [--volume_type=TYPE] [--description=DESCRIPTION] [--dryrun] [--snapshot=SNAPSHOT] [--region=REGION] Creates a volume volume status [NAMES] [--cloud=CLOUD] Get the status (e.g. 'available', 'READY', 'in-use') of a volume volume attach [NAMES] [--vm=VM] Attach volume to a vm volume detach [NAMES] Detach volume from a vm volume delete [NAMES] Delete the named volumes volume migrate [NAME] [--vm=VM] [--cloud=CLOUD] Migrate volume from one vm to another vm in the same provider. volume sync [NAMES] [--cloud=CLOUD] Volume sync allows for data to be shared between two volumes. volume purge [--cloud=CLOUD] Volume purge delete all the "deleted" volumes in MongoDB database """ VERBOSE(arguments) variables = Variables() def get_last_volume(): """ get the last volume name :return: string """ config = Config() n = VolumeName(user=config["cloudmesh.profile.user"], kind="volume", path=f"{config.location}/volume.yaml", schema="{user}-volume-{counter}") last_volume_name = n return str(last_volume_name) def create_name(): """ Create volume name if name is not specified :return: string """ config = Config() n = VolumeName(user=config["cloudmesh.profile.user"], kind="volume", path=f"{config.location}/volume.yaml", schema="{user}-volume-{counter}") n.incr() return n map_parameters(arguments, "cloud", "vm", "region", "refresh", "dryrun", "output", "size", "volume_type", "description", "key", "value", "snapshot", "path") arguments.output = Parameter.find( "output", arguments, variables, ) cloud = variables['cloud'] if arguments.list: if arguments.NAMES: names = Parameter.expand(arguments["NAMES"]) if arguments.cloud: # "cms volume list NAMES --cloud=aws1" provider = Provider(name=arguments.cloud) result = provider.list(**arguments) print( provider.Print(result, kind='volume', output=arguments.output)) else: # if "cms volume list NAMES" config = Config() clouds = list(config["cloudmesh.volume"].keys()) for cloud in clouds: if len(names) != 0: banner(f"listing volume info from {cloud}") else: banner("End of listing Volumes") break active = config[f"cloudmesh.volume.{cloud}.cm.active"] if active: provider = Provider(name=cloud) listed = [] for name in names: volume = provider.search(name=name) if volume: arguments.NAME = name result = provider.list(**arguments) print( provider.Print( result, kind='volume', output=arguments.output)) listed.append(name) if len(listed) > 0: # delete all listed volumes in names for name in listed: names.remove(name) else: if arguments.cloud: # "cms volume list --cloud=aws1" provider = Provider(name=arguments.cloud) result = provider.list(**arguments) print( provider.Print(result, kind='volume', output=arguments.output)) else: # "cms volume list" arguments['cloud'] = cloud provider = Provider(name=arguments.cloud) result = provider.list(**arguments) print( provider.Print(result, kind='volume', output=arguments.output)) return "" elif arguments.create: if arguments.cloud is None: arguments['cloud'] = cloud if arguments.NAME is None: arguments.NAME = str(create_name()) provider = Provider(name=arguments.cloud) result = provider.create(**arguments) print( provider.Print(result, kind='volume', output=arguments.output)) elif arguments.delete: names = arguments.NAMES or variables["volume"] names = Parameter.expand(names) if names is None: Console.error("No volume specified or found") return "" config = Config() clouds = list(config["cloudmesh.volume"].keys()) for cloud in clouds: # if len(names) != 0: # banner(f"Deleting volumes from {cloud}") # else: # banner("End of Deleting Volumes") active = config[f"cloudmesh.volume.{cloud}.cm.active"] if active: provider = Provider(name=cloud) deleted = [] for name in names: volume = provider.search(name=name) if volume: result = provider.delete(name=name) deleted.append(name) if len(deleted) > 0: # delete all deleted volumes in names for name in deleted: names.remove(name) elif arguments.attach: arguments.cloud = arguments.cloud or cloud names = arguments.NAMES or variables["volume"] vm = arguments.vm or variables["vm"] if names is None: Console.error("No Volume specified or found") return "" if vm is None: Console.error("No vm specified or found") return "" names = Parameter.expand(names) # banner(f"Attaching {names} to {arguments.vm}") provider = Provider(name=arguments.cloud) result = provider.attach(names, vm) print( provider.Print(result, kind='volume', output=arguments.output)) elif arguments.detach: config = Config() clouds = list(config["cloudmesh.volume"].keys()) volumes = arguments.NAMES or variables["volume"] if volumes is None: Console.error("No volume specified or found") return "" volumes = Parameter.expand(volumes) for cloud in clouds: # if len(volumes) != 0: # banner(f"Detaching volumes from {cloud}") # else: # banner("End of Detaching Volumes") # break active = config[f"cloudmesh.volume.{cloud}.cm.active"] if active: detached = [] provider = Provider(name=cloud) for name in volumes: # returns volume name if found in the cloud, # None if it is not in the cloud volume = provider.search(name=name) if volume: # banner(f"Detaching {name} from {cloud}") result = provider.detach(name=name) detached.append(name) print( provider.Print(result, kind='volume', output=arguments.output)) if len(detached) > 0: # delete all detached volumes in volumes for name in detached: volumes.remove(name) elif arguments.add_tag: arguments.cloud = arguments.cloud or cloud name = arguments.NAME or variables["volume"] or get_last_volume() arguments.NAME = name provider = Provider(name=arguments.cloud) result = provider.add_tag(**arguments) print( provider.Print(result, kind='volume', output=arguments.output)) elif arguments.status: arguments.cloud = arguments.cloud or cloud name = arguments.NAME or variables["volume"] or get_last_volume() arguments.NAME = name provider = Provider(name=arguments.cloud) result = provider.status(name=name) print( provider.Print(result, kind='volume', output=arguments.output)) elif arguments.migrate: if arguments.cloud: # "cms volume migrate NAME --vm=VM --cloud=aws1" # if no given volume, get the current volume # or get the last volume, name = arguments.NAME or variables[ "volume"] or get_last_volume() arguments.NAME = name provider = Provider(name=arguments.cloud) result = provider.migrate(**arguments) print( provider.Print(result, kind='volume', output=arguments.output)) else: raise NotImplementedError elif arguments.sync: # "cms volume sync NAMES --cloud=CLOUD" # when len(NAMES)==2, sync volume (NAMES[0]) with volume (NAMES[1]) # when len(NAMES)==1, sync current volume with volume(NAMES[0]) # what it actually does is copy second volume and overwrite # the other (current volume or first volume in NAMES) if len([arguments.NAMES]) == 1: volumes = [ variables["volume"] or get_last_volume(), arguments.NAMES ] arguments.NAMES = volumes elif len([arguments.NAMES]) == 2: volumes = arguments.NAMES else: Console.error("Two volumes should be specified") # if arguments.cloud: arguments.cloud = cloud provider = Provider(name=arguments.cloud) result = provider.sync(**arguments) print( provider.Print(result, kind='volume', output=arguments.output)) # else: # raise NotImplementedError elif arguments.purge: arguments.cloud = arguments.cloud or cloud provider = Provider(name=arguments.cloud) provider.purge(**arguments) result = provider.list() print( provider.Print(result, kind='volume', output=arguments.output))
def do_storage(self, args, arguments): """ :: Usage: storage [--storage=SERVICE] create dir DIRECTORY storage [--storage=SERVICE] get SOURCE DESTINATION [--recursive] storage [--storage=SERVICE] put SOURCE DESTINATION [--recursive] storage [--storage=SERVICE] list [SOURCE] [--recursive] [--output=OUTPUT] storage [--storage=SERVICE] delete SOURCE storage [--storage=SERVICE] search DIRECTORY FILENAME [--recursive] [--output=OUTPUT] storage [--storage=SERVICE] sync SOURCE DESTINATION [--name=NAME] [--async] storage [--storage=SERVICE] sync status [--name=NAME] storage config list [--output=OUTPUT] storage copy SOURCE DESTINATION [--recursive] This command does some useful things. Arguments: SOURCE SOURCE can be a directory or file DESTINATION DESTINATION can be a directory or file DIRECTORY DIRECTORY refers to a folder on the cloud service Options: --storage=SERVICE specify the cloud service name like aws or azure or box or google Description: commands used to upload, download, list files on different cloud storage services. storage put [options..] Uploads the file specified in the filename to specified cloud from the SOURCEDIR. storage get [options..] Downloads the file specified in the filename from the specified cloud to the DESTDIR. storage delete [options..] Deletes the file specified in the filename from the specified cloud. storage list [options..] lists all the files from the container name specified on the specified cloud. storage create dir [options..] creates a folder with the directory name specified on the specified cloud. storage search [options..] searches for the source in all the folders on the specified cloud. sync SOURCE DESTINATION puts the content of source to the destination. If --recursive is specified this is done recursively from the source If --async is specified, this is done asynchronously If a name is specified, the process can also be monitored with the status command by name. If the name is not specified all date is monitored. sync status The status for the asynchronous sync can be seen with this command config list Lists the configures storage services in the yaml file storage copy SOURCE DESTINATION Copies files from source storage to destination storage. The syntax of SOURCE and DESTINATION is: SOURCE - awss3:source.txt DESTINATION - azure:target.txt Example: set storage=azureblob storage put SOURCE DESTINATION --recursive is the same as storage --storage=azureblob put SOURCE DESTINATION --recursive storage copy azure:source.txt oracle:target.txt """ # arguments.CONTAINER = arguments["--container"] map_parameters(arguments, "recursive", "storage") VERBOSE(arguments) if arguments.storage is None: if arguments.copy is None: try: v = Variables() arguments.storage = v['storage'] except Exception as e: arguments.storage = None raise ValueError("Storage provider is not defined") else: if arguments.DESTINATION.split(":")[0] == "local": arguments.storage = arguments.SOURCE.split(":")[0] else: arguments.storage = arguments.DESTINATION.split(":")[0] arguments.storage = Parameter.expand(arguments.storage) if arguments["get"]: provider = Provider(arguments.storage[0]) result = provider.get(arguments.SOURCE, arguments.DESTINATION, arguments.recursive) elif arguments.put: provider = Provider(arguments.storage[0]) result = provider.put(arguments.SOURCE, arguments.DESTINATION, arguments.recursive) elif arguments.create and arguments.dir: provider = Provider(arguments.storage[0]) result = provider.create_dir(arguments.DIRECTORY) elif arguments.list: source = arguments.SOURCE or '.' for storage in arguments.storage: provider = Provider(storage) result = provider.list(source, arguments.recursive) elif arguments.delete: for storage in arguments.storage: provider = Provider(storage) provider.delete(arguments.SOURCE) elif arguments.search: for storage in arguments.storage: provider = Provider(storage) provider.search(arguments.DIRECTORY, arguments.FILENAME, arguments.recursive) elif arguments.rsync: # TODO: implement raise NotImplementedError elif arguments.copy: VERBOSE(f"COPY: Executing Copy command from {arguments.SOURCE} to " f"{arguments.DESTINATION} providers") print(f"DEBUG storage.py: INITIALIZE with {arguments.storage[0]} " "provider.") provider = Provider(arguments.storage[0]) result = provider.copy(arguments.SOURCE, arguments.DESTINATION, arguments.recursive)
def do_multipass(self, args, arguments): """ :: Usage: multipass deploy [--dryrun] multipass list [--output=OUTPUT] [--dryrun] multipass images [--output=OUTPUT] [--dryrun] multipass create NAMES [--image=IMAGE] [--size=SIZE] [--mem=MEMORY] [--cpus=CPUS] [--cloud-init=FILE] [--dryrun] multipass delete NAMES [--output=OUTPUT][--dryrun] multipass destroy NAMES [--output=OUTPUT][--dryrun] multipass shell NAMES [--dryrun] multipass run COMMAND NAMES [--output=OUTPUT] [--dryrun] multipass info NAMES [--output=OUTPUT] [--dryrun] multipass suspend NAMES [--output=OUTPUT] [--dryrun] multipass resume NAMES [--output=OUTPUT] [--dryrun] multipass start NAMES [--output=OUTPUT] [--dryrun] multipass stop NAMES [--output=OUTPUT] [--dryrun] multipass reboot NAMES [--output=OUTPUT] [--dryrun] multipass mount SOURCE DESTINATION [--dryrun] multipass umount SOURCE [--dryrun] multipass transfer SOURCE DESTINATION [--dryrun] multipass set key=VALUE [--dryrun] multipass get [key] [--dryrun] multipass deploy [--dryrun] multipass rename NAMES [--dryrun] multipass version Interface to multipass Options: --output=OUTPUT the output format [default: table]. Other values are yaml, csv and json. --image=IMAGE the image name to be used to create a VM. --cpus=CPUS Number of CPUs to allocate. Minimum: 1, default: 1. --size=SIZE Disk space to allocate. Positive integers, in bytes, or with K, M, G suffix. Minimum: 512M, default: 5G. --mem=MEMORY Amount of memory to allocate. Positive integers, in bytes, or with K, M, G suffix. Minimum: 128M, default: 1G. --cloud-init=FILE Path to a user-data cloud-init configuration Arguments: NAMES the names of the virtual machine Description: The NAMES can be a parameterized hostname such as red[0-1,5] = red0,red1,red5 Commands: First you can see the supported multipass images with cms multipass images Create and launch a new vm using cms multipass create NAMES Optionally you can provide image name, size, memory, number of cpus to create an instance. Start one or multiple multipass vms with cms multipass start NAMES Stop one or multiple vms with cms multipass stop NAMES Gets all multipass internal key values with cms multipass get Gets a specific internal key. cms multipass get KEY Known keys client.gui.autostart client.primary-name local.driver are there more? Reboot (stop and then start) vms with cms multipass reboot NAMES Delete one of multiple vms without purging with cms multipass delete NAMES Destory multipass vms (delete and purge) with cms multipass destroy NAMES Caution: Once destroyed everything in vm will be deleted and cannot be recovered. WHEN YOU IMPLEMENT A FUNCTION INCLUDE MINIMAL DOCUMENTATION HERE """ name = arguments.NAME map_parameters(arguments, "dryrun", "refresh", "cloud", "image", "size", "mem", "cpus", "cloud-init", "output") # so we can use arguments.cloudinit arguments["cloudinit"] = arguments["--cloud-init"] image = arguments.image variables = Variables() arguments.output = Parameter.find("output", arguments, variables, "table") names = Parameter.expand(arguments.NAMES) VERBOSE(arguments) if arguments.version: if arguments.dryrun: banner("dryrun list") else: provider = Provider() version = provider.version() del version["name"] print(Printer.attribute(version, header=["Program", "Version"])) return "" elif arguments.list: if arguments.dryrun: banner("dryrun list") else: provider = Provider() list = provider.list() print( provider.Print(list, kind='image', output=arguments.output)) return "" elif arguments.images: if arguments.dryrun: banner("dryrun images") else: provider = Provider() images = provider.images() print( provider.Print(images, kind='image', output=arguments.output)) return "" elif arguments.run: if arguments.dryrun: banner("dryrun run") for name in names: if arguments.dryrun: Console.ok(f"run {name} {arguments.COMMAND}") else: provider = Provider() provider.run(name, arguments.COMMAND) return "" elif arguments.create: result = "" if arguments.dryrun: banner("create") timeout = 360 group = None kwargs = { "cloud_init": arguments.cloud_init, "cpus": arguments.cpus, "memory": arguments.mem } for name in names: if arguments.dryrun: Console.ok(f"dryrun create {name} {image}") else: provider = Provider() result = provider.create(name, image, arguments.size, timeout, group, **kwargs) VERBOSE(result) return result elif arguments.start: result = "" if arguments.dryrun: banner("start") for name in names: if arguments.dryrun: Console.ok(f"dryrun start {name}") else: provider = Provider() result = provider.start(name) VERBOSE(result) return result elif arguments.stop: result = "" if arguments.dryrun: banner("stop") for name in names: if arguments.dryrun: Console.ok(f"dryrun stop {name}") else: provider = Provider(name=name) result = provider.stop(name) VERBOSE(result) return result elif arguments.delete: result = "" if arguments.dryrun: banner("delete") for name in names: if arguments.dryrun: Console.ok(f"dryrun delete {name}") else: provider = Provider() # Default purge is false. Is this ok? result = provider.delete(name) VERBOSE(result) return result elif arguments.info: result = "" if arguments.dryrun: banner(f"info {name}") for name in names: if arguments.dryrun: Console.ok(f"dryrun info {name}") else: provider = Provider() # Default purge is false. Is this ok? result = provider.info(name) VERBOSE(result) return result elif arguments.rename: result = "" if arguments.dryrun: banner(f"Current Name: {names[0]}" f"New Name: {names[1]}") if names.len() > 2: Console.error("You have entered too many names." "Only enter TWO names at a time.") else: old_name = names[0] new_name = names[1] provider = Provider() result = provider.rename(old_name, new_name) VERBOSE(result) return result elif arguments.suspend: result = "" if arguments.dryrun: banner("suspend") for name in names: if arguments.dryrun: Console.ok(f"dryrun suspend {name}") else: provider = Provider() result = provider.suspend(name) VERBOSE(result) return result elif arguments.resume: result = "" if arguments.dryrun: banner("resume") for name in names: if arguments.dryrun: Console.ok(f"dryrun resume {name}") else: provider = Provider() result = provider.resume(name) VERBOSE(result) return result elif arguments.destroy: result = "" if arguments.dryrun: banner("destroy") for name in names: if arguments.dryrun: Console.ok(f"dryrun destroy {name}") else: provider = Provider() result = provider.destroy(name) VERBOSE(result) return result elif arguments.reboot: result = "" if arguments.dryrun: banner("reboot") for name in names: if arguments.dryrun: Console.ok(f"dryrun reboot {name}") else: provider = Provider() result = provider.reboot(name) VERBOSE(result) return result elif arguments.shell: if len(names) > 1: Console.error("shell must only have one host") return "" name = names[0] if arguments.dryrun: banner("dryrun shell {name}") else: provider = Provider() provider.shell() return "" elif arguments.info: if arguments.dryrun: banner("dryrun info") else: provider = Provider() info = provider.info() print( provider.Print(info, kind='info', output=arguments.output)) return "" elif arguments.mount: if arguments.dryrun: banner( f"dryrun mount {arguments.SOURCE} {arguments.DESTINATION}") else: provider = Provider() provider.mount(arguments.SOURCE, arguments.DESTINATION) # list the mounts and display as table return "" elif arguments.deploy: provider = Deploy(dryrun=arguments.dryrun) provider.install() else: Console.error("Not yet implemented") return ""
def do_emr(self, args, arguments): """ :: Usage: emr list clusters [--status=STATUS...] [--format=FORMAT] emr list instances CLUSTERID [--status=STATUS...] [--type=TYPE...] [--format=FORMAT] emr list steps CLUSTERID [--state=STATE...] [--format=FORMAT] emr describe CLUSTERID [--format=FORMAT] emr start NAME [--master=MASTER] [--node=NODE] [--count=COUNT] emr stop CLUSTERID emr upload FILE BUCKET BUCKETNAME emr copy CLUSTERID BUCKET BUCKETNAME emr run CLUSTERID BUCKET BUCKETNAME This command is used to interface with Amazon Web Services Elastic Map Reduce (EMR) service to run Apache Spark jobs. It can start, list, and stop clusters and submit jobs to them. Arguments: CLUSTERID The AWS Cluster ID. NAME The name of the cluster. FILE The local file to upload. BUCKET The name of the S3 bucket to use. BUCKETNAME The name to file in the Bucket to use. Options: --status=STATUS The status to search for. [default: all] --type=TYPE The type of instance to search for. [default: all] --format=FORMAT How to format the output. [default: table] --master=MASTER The type of server to use for the master node. [default: m3.xlarge] --node=NODE The type of server to use for the worker nodes. [default: m3.xlarge] --count=COUNT The number of servers to use [default: 3] --state=STATE The state of the job step to filter for. Description: emr list clusters [--status=STATUS] [--format=FORMAT] Lists all clusters viewable to the credentials with a given status [default: all]. Valid statuses are: start, boot, run, wait, terminating, shutdown, and error. emr list instances [--status=STATUS...] [--format=FORMAT] Lists all instances viewable to the credentials with a given status [default: all]. Valid statuses are: start, provision, boot, run, down. Valid types are: master, core, and task. emr list steps CLUSTERID [--state=STATE...] Lists all steps being performed by a cluster. Valid states are pending, canceling, running, completed cancelled, failed, and interrupted emr describe CLUSTERID Describes a cluster. Lists its status, region, type, etc. emr stop CLUSTERID Stops a cluster. Once a shutdown is initiated, it cannot be undone. emr start NAME [--master=MASTER] [--node=NODE] [--count=COUNT] Starts a cluster with a given name, number of servers, and server type. Bootstraps with Hadoop and Spark. emr copy BUCKET BUCKETNAME Copy a file from S3 to the cluster's master node. emr run CLUSTERID BUCKET BUCKETNAME Submit a spark application stored in an S3 bucket to the spark cluster. """ map_parameters(arguments, 'status', 'format', 'type', 'master', 'node', 'count', 'state') emr = Manager() if arguments['list'] and arguments['clusters']: clusters = emr.list_clusters(arguments)[0]['data'] if len(clusters) == 0: print("No clusters were found.") else: print(Printer.flatwrite(clusters, sort_keys=["Id"], order=["Id", "Name", "Status.State", "Status.StateChangeReason.Code", "Status.StateChangeReason.Message", "NormalizedInstanceHours"], header=["ID", "Name", "State", "State Reason", "State Message", "Hours"], output=arguments['format'])) elif arguments['list'] and arguments['instances']: instances = emr.list_instances(arguments)[0]['data'] if len(instances) == 0: print("No instances were found.") else: print(Printer.flatwrite(instances, sort_keys=["Id"], order=["Id", "Status.State", "Status.StateChangeReason.Code", "Status.StateChangeReason.Message", "Market", "InstanceType"], header=["ID", "State", "State Reason", "State Message", "Market", "Instance Type"], output=arguments['format'])) elif arguments['list'] and arguments['steps']: steps = emr.list_steps(arguments)[0]['data'] if len(steps) == 0: print("No steps were found.") else: print(Printer.flatwrite(steps, sort_keys=["Id"], order=["Id", "Name", "Status.State", "Status.StateChangeReason"], header=["ID", "Name", "Status", "Status Reason"], output=arguments['format'])) elif arguments['describe']: cluster = emr.describe_cluster(arguments)[0]['data'] # Fixing formatting. apps = "" for application in cluster["Applications"]: apps += "{} {}, ".format(application["Name"], application["Version"]) apps = apps[:-2] cluster["Applications"] = apps cluster = [cluster] print(Printer.flatwrite(cluster, sort_keys=["Id"], order=["Id", "Name", "Status.State", "Status.StateChangeReason.Code", "Status.StateChangeReason.Message", "Ec2InstanceAttributes.Ec2AvailabilityZone", "InstanceCollectionType", "NormalizedInstanceHours", "Applications"], header=["ID", "Name", "State", "State Reason", "State Message", "Region", "Type", "Instance Hours", "Applications"], output=arguments['format'])) elif arguments['stop']: cluster = emr.stop_cluster(arguments)[0]['data'] print("{}: Stopping".format(cluster['name'])) elif arguments['start']: cluster = emr.start_cluster(arguments)[0]['data'] print("{}: {} Starting".format(cluster['name'], cluster['cluster'])) elif arguments['upload']: upload = emr.upload_file(arguments)[0]['data'] print("File uploaded to: {} - {}".format(upload['bucket'], upload['file'])) elif arguments['copy']: results = emr.copy_file(arguments)[0]['data'] print("Copy step is running. Step ID: {}".format( results['StepIds'][0])) elif arguments['run']: results = emr.run(arguments)[0]['data'] print("Run step is running. Step ID: {}".format( results['StepIds'][0])) return ""
def do_vm(self, args, arguments): """ :: Usage: vm ping [NAMES] [--cloud=CLOUDS] [--count=N] [--processors=PROCESSORS] vm check [NAMES] [--cloud=CLOUDS] [--processors=PROCESSORS] vm status [NAMES] [--cloud=CLOUDS] vm console [NAME] [--force] vm start [NAMES] [--cloud=CLOUD] [--dryrun] vm stop [NAMES] [--cloud=CLOUD] [--dryrun] vm terminate [NAMES] [--cloud=CLOUD] [--dryrun] vm delete [NAMES] [--cloud=CLOUD] [--dryrun] vm refresh [--cloud=CLOUDS] vm list [NAMES] [--cloud=CLOUDS] [--output=OUTPUT] [--refresh] vm boot [--name=VMNAMES] [--cloud=CLOUD] [--username=USERNAME] [--image=IMAGE] [--flavor=FLAVOR] [--public] [--secgroup=SECGROUPs] [--key=KEY] [--dryrun] vm boot [--n=COUNT] [--cloud=CLOUD] [--username=USERNAME] [--image=IMAGE] [--flavor=FLAVOR] [--public] [--secgroup=SECGROUPS] [--key=KEY] [--dryrun] vm run [--name=VMNAMES] [--username=USERNAME] [--dryrun] COMMAND vm script [--name=NAMES] [--username=USERNAME] [--dryrun] SCRIPT vm ip assign [NAMES] [--cloud=CLOUD] vm ip show [NAMES] [--group=GROUP] [--cloud=CLOUD] [--output=OUTPUT] [--refresh] vm ip inventory [NAMES] vm ssh [NAMES] [--username=USER] [--quiet] [--ip=IP] [--key=KEY] [--command=COMMAND] [--modify-knownhosts] vm rename [OLDNAMES] [NEWNAMES] [--force] [--dryrun] vm wait [--cloud=CLOUD] [--interval=SECONDS] vm info [--cloud=CLOUD] [--output=OUTPUT] vm username USERNAME [NAMES] [--cloud=CLOUD] vm resize [NAMES] [--size=SIZE] Arguments: OUTPUT the output format COMMAND positional arguments, the commands you want to execute on the server(e.g. ls -a) separated by ';', you will get a return of executing result instead of login to the server, note that type in -- is suggested before you input the commands NAME server name. By default it is set to the name of last vm from database. NAMES server name. By default it is set to the name of last vm from database. KEYPAIR_NAME Name of the vm keypair to be used to create VM. Note this is not a path to key. NEWNAMES New names of the VM while renaming. OLDNAMES Old names of the VM while renaming. Options: --output=OUTPUT the output format [default: table] -H --modify-knownhosts Do not modify ~/.ssh/known_hosts file when ssh'ing into a machine --username=USERNAME the username to login into the vm. If not specified it will be guessed from the image name and the cloud --ip=IP give the public ip of the server --cloud=CLOUD give a cloud to work on, if not given, selected or default cloud will be used --count=COUNT give the number of servers to start --detail for table, a brief version is used as default, use this flag to print detailed table --flavor=FLAVOR give the name or id of the flavor --group=GROUP give the group name of server --secgroup=SECGROUP security group name for the server --image=IMAGE give the name or id of the image --key=KEY specify a key to use, input a string which is the full path to the private key file --keypair_name=KEYPAIR_NAME Name of the vm keypair to be used to create VM. Note this is not a path to key. --user=USER give the user name of the server that you want to use to login --name=NAME give the name of the virtual machine --force rename/ delete vms without user's confirmation --command=COMMAND specify the commands to be executed Description: commands used to boot, start or delete servers of a cloud vm default [options...] Displays default parameters that are set for vm boot either on the default cloud or the specified cloud. vm boot [options...] Boots servers on a cloud, user may specify flavor, image .etc, otherwise default values will be used, see how to set default values of a cloud: cloud help vm start [options...] Starts a suspended or stopped vm instance. vm stop [options...] Stops a vm instance . vm delete [options...] Delete servers of a cloud, user may delete a server by its name or id, delete servers of a group or servers of a cloud, give prefix and/or range to find servers by their names. Or user may specify more options to narrow the search vm floating_ip_assign [options...] assign a public ip to a VM of a cloud vm ip show [options...] show the ips of VMs vm ssh [options...] login to a server or execute commands on it vm list [options...] same as command "list vm", please refer to it vm status [options...] Retrieves status of last VM booted on cloud and displays it. vm refresh [--cloud=CLOUDS] this command refreshes the data for virtual machines, images and flavors for the specified clouds. vm ping [NAMES] [--cloud=CLOUDS] [--count=N] [--processors=PROCESSORS] pings the specified virtual machines, while using at most N pings. The ping is executed in parallel. If names are specifies the ping is restricted to the given names in parameter format. If clouds are specified, names that are not in these clouds are ignored. If the name is set in the variables this name is used. Tip: give the VM name, but in a hostlist style, which is very convenient when you need a range of VMs e.g. sample[1-3] => ['sample1', 'sample2', 'sample3'] sample[1-3,18] => ['sample1', 'sample2', 'sample3', 'sample18'] Quoting commands: cm vm login gvonlasz-004 --command=\"uname -a\" Limitations: Azure: rename is not supported """ map_parameters(arguments, 'active', 'cloud', 'command', 'dryrun', 'flavor', 'force', 'output', 'group', 'image', 'interval', 'ip', 'key', 'modify-knownhosts', 'n', 'name', 'public', 'quiet', 'secgroup', 'size', 'username') VERBOSE(arguments) variables = Variables() if arguments.refresh: names = [] clouds, names = Arguments.get_cloud_and_names( "refresh", arguments, variables) return "" elif arguments.ping: # TODO: IMPLEMENT names = [] pings = int(arguments.N or 3) names = [] clouds, names = Arguments.get_cloud_and_names( "ping", arguments, variables) for name in names: ping = Shell.live("ping -c {N} {name}".format(name=name, N=arguments.N)) print(ping) else: return True return "" elif arguments.check: names = [] clouds, names = Arguments.get_cloud_and_names( "check", arguments, variables) return "" elif arguments.status: names = [] clouds, names = Arguments.get_cloud_and_names( "status", arguments, variables) return "" elif arguments.start: names = [] clouds, names = Arguments.get_cloud_and_names( "start", arguments, variables) return "" elif arguments.stop: names = [] clouds, names = Arguments.get_cloud_and_names( "stop", arguments, variables) return "" elif arguments.terminate: names = [] clouds, names = Arguments.get_cloud_and_names( "terminate", arguments, variables) return "" elif arguments.delete: clouds, names = Arguments.get_cloud_and_names( "delete", arguments, variables) return "" elif arguments.boot: print("boot the vm") elif arguments.list: # vm list [NAMES] # [--cloud=CLOUDS] # [--output=OUPTUT] # [--refresh] # if no clouds find the clouds of all specified vms by name # find all vms of the clouds, # print only those vms specified by name, if no name is given print all for the cloud # print("list the vms") clouds, names = Arguments.get_cloud_and_names( "list", arguments, variables) # print("Clouds:", clouds) if arguments.NAMES is not None: names = Parameter.expand(arguments.NAMES) Console.error("NAMES, not yet implemented" + str(names)) try: if arguments["--refresh"]: pass # find all clouds in db # iterate over the clouds # for each name in name queue, find it and add it to the cloud vm list # for each cloud print the vms else: pass # find all clouds in db # iterate over all clouds # find the vm with the name # add it to the cloud list # for each cloud print the vms except Exception as e: VERBOSE(e) return "" else: try: if arguments["--refresh"]: for cloud in clouds: Console.ok("refresh " + cloud) p = Provider(cloud) vms = p.list() order = p.p.output['vm']['order'] # not pretty header = p.p.output['vm']['header'] # not pretty print( Printer.flatwrite(vms, sort_keys=["cm.name"], order=order, header=header, output=arguments.output)) else: for cloud in clouds: p = Provider(cloud) kind = p.kind # pprint(p.__dict__) # pprint(p.p.__dict__) # not pretty collection = "{cloud}-node".format(cloud=cloud, kind=p.kind) db = CmDatabase() vms = db.find(collection=collection) # pprint(vms) # print(arguments.output) # print(p.p.output['vm']) order = p.p.output['vm']['order'] # not pretty header = p.p.output['vm']['header'] # not pretty print( Printer.flatwrite(vms, sort_keys=["cm.name"], order=order, header=header, output=arguments.output)) except Exception as e: VERBOSE(e) return "" elif arguments.info: """ vm info [--cloud=CLOUD] [--output=OUTPUT] """ print("info for the vm") cloud, names = Arguments.get_cloud_and_names( "info", arguments, variables) elif arguments.rename: print("rename the vm") v = Variables() cloud = v["cloud"] p = Provider(cloud) try: oldnames = Parameter.expand(arguments["OLDNAMES"]) newnames = Parameter.expand(arguments["NEWNAMES"]) force = arguments["--force"] if oldnames is None or newnames is None: Console.error("Wrong VMs specified for rename", traceflag=False) elif len(oldnames) != len(newnames): Console.error("The number of VMs to be renamed is wrong", traceflag=False) else: print(oldnames) print(newnames) for i in range(0, len(oldnames)): oldname = oldnames[i] newname = newnames[i] if arguments["--dryrun"]: Console.ok("Rename {} to {}".format( oldname, newname)) else: print(f"rename {oldname} -> {newname}") p.rename(source=oldname, destination=newname) msg = "info. OK." Console.ok(msg) except Exception as e: Error.traceback(e) Console.error("Problem renameing instances", traceflag=True) elif arguments["ip"] and arguments["show"]: print("show the ips") """ vm ip show [NAMES] [--group=GROUP] [--cloud=CLOUD] [--output=OUTPUT] [--refresh] """ elif arguments["ip"] and arguments["assign"]: """ vm ip assign [NAMES] [--cloud=CLOUD] """ print("assign the public ip") elif arguments["ip"] and arguments["inventory"]: """ vm ip inventory [NAMES] """ print("list ips that could be assigned") elif arguments.username: """ vm username USERNAME [NAMES] [--cloud=CLOUD] """ print("sets the username for the vm") elif arguments.default: print("sets defaults for the vm") elif arguments.run: """ vm run [--name=NAMES] [--username=USERNAME] [--dryrun] COMMAND """ pass elif arguments.script: """ vm script [--name=NAMES] [--username=USERNAME] [--dryrun] SCRIPT """ pass elif arguments.resize: """ vm resize [NAMES] [--size=SIZE] """ pass elif arguments.ssh: """ vm ssh [NAMES] [--username=USER] [--quiet] [--ip=IP] [--key=KEY] [--command=COMMAND] [--modify-knownhosts] """ print("ssh the vm") elif arguments.console: # vm console [NAME] [--force] names = Arguments.get_names(arguments, variables) for name in names: # r = vm.console(name,force=argument.force) Console.msg("{label} {name}".format(label="console", name=name)) return elif arguments.wait: """ vm wait [--cloud=CLOUD] [--interval=SECONDS] """ print("waits for the vm till its ready and one can login")
def do_sec(self, args, arguments): """ :: Usage: sec rule list [--cloud=CLOUDS] [--output=OUTPUT] sec rule add RULE FROMPORT TOPORT PROTOCOL CIDR sec rule delete RULE [--cloud=CLOUD] sec group list [--cloud=CLOUDS] [--output=OUTPUT] sec group add GROUP RULES DESCRIPTION sec group delete GROUP [--cloud=CLOUD] sec group load [GROUP] [--cloud=CLOUD] sec list [--output=OUTPUT] sec load sec clear Options: --output=OUTPUT Specify output format, in one of the following: table, csv, json, yaml, dict [default: table]. --cloud=CLOUD Name of the IaaS cloud e.g. kilo,chameleon. The clouds are defined in the yaml file. If the name "all" is used for the cloud all clouds will be selected. Arguments: RULE The security group rule name GROUP The label/name of the security group FROMPORT Staring port of the rule, e.g. 22 TOPORT Ending port of the rule, e.g. 22 PROTOCOL Protocol applied, e.g. TCP,UDP,ICMP CIDR IP address range in CIDR format, e.g., 129.79.0.0/16 Examples: # sec load # sec group list # sec group add my_new_group webapp 8080 8080 tcp 0.0.0.0/0 Bugs: # sec group list --cloud=chameleon # seg group delete my_group my_rule # sec group delete my_unused_group --cloud=kilo # sec group upload --cloud=kilo Description: Database commands: sec clear removes all rules and groups from the database sec load loads some default security groups and rules in the database sec clear deletes all security groups and rules in the database sec rule list [--output=OUTPUT] lists all security groups and rules in the database sec rule add RULE FROMPORT TOPORT PROTOCOL CIDR adds a security rule with the given group and the details of the security rules sec group add GROUP RULES DESCRIPTION adds a security group with the given group and the details of the security groups sec rule delete RULE deletes the rule form the database sec group delete GROUP deletes the group form the database Cloud commands: sec rule list --cloud=CLOUDS [--output=OUTPUT] lists all security rules in the specified cloud sec group list --cloud=CLOUDS [--output=OUTPUT] lists all security groups in the specified cloud sec rule delete RULE --cloud=CLOUD deletes the rule form the cloud sec group delete GROUP [--cloud=CLOUD] deletes the group from the cloud sec load GROUP --cloud=CLOUD uploads the group to the cloud with all its rules """ map_parameters(arguments, 'cloud', 'output', 'name') rules = SecgroupRule() groups = Secgroup() def Print(kind, list): if kind == "group": output = "" else: output = groups.output print( Printer.write(list, sort_keys=output[kind]['sort_keys'], order=output[kind]['order'], header=output[kind]['header'], output=arguments.output)) def list_all(): data = [] group_entries = groups.list() for group_entry in group_entries: group_name = group_entry['name'] for rule_name in group_entry['rules']: try: rule_entry = rules.list(name=rule_name)[0] rule_entry['rule'] = rule_name rule_entry['group'] = group_name data.append(rule_entry) except: pass Print("all", data) if (arguments.load and not arguments.group) or \ (arguments.load and arguments.group and not arguments.GROUP): examples = SecgroupExamples() examples.load() list_all() return "" elif arguments.load and arguments.group and arguments.cloud: provider = Provider(name=arguments.cloud) provider.upload_secgroup(name=arguments.GROUP) return "" elif arguments.list and not arguments.rule and not arguments.group: found = groups.list() for entry in found: group_rules = entry['rules'] if type(group_rules) == list: entry['rules'] = ', '.join(group_rules) Print("secgroup", found) found = rules.list() Print("secrule", found) elif arguments.group and arguments.delete: if arguments.cloud: clouds = Parameter.expand(arguments.cloud) for cloud in clouds: print(f"cloud {cloud}") provider = Provider(name=cloud) r = provider.remove_secgroup(name=arguments.GROUP) else: groups.remove(arguments.GROUP) elif (arguments.group or arguments.rule) and arguments.list and \ arguments.cloud: clouds = Parameter.expand(arguments.cloud) if len(clouds) == 0: variables = Variables() cloudname = variables['cloud'] clouds = [cloudname] keys = [] for cloud in clouds: print(f"cloud {cloud}") provider = Provider(name=cloud) cloud_groups = provider.list_secgroups() if arguments.output == 'table': result = [] for group in cloud_groups: if cloud == "aws": for rule in group['IpPermissions']: rule['name'] = group['GroupName'] rule['direction'] = "Inbound" if rule['UserIdGroupPairs']: rule['groupId'] = \ rule['UserIdGroupPairs'][0]['GroupId'] if rule['IpRanges']: rule['ipRange'] = rule['IpRanges'][0][ 'CidrIp'] result.append(rule) else: for rule in group['security_group_rules']: rule['name'] = group['name'] result.append(rule) cloud_groups = result provider.p.Print( cloud_groups, output=arguments.output, kind="secrule", ) return "" elif arguments.group and arguments.list: found = groups.list() for entry in found: group_rules = entry['rules'] if type(group_rules) == list: entry['rules'] = ', '.join(group_rules) Print("secgroup", found) return "" elif arguments.rule and arguments.list: found = rules.list() Print("secrule", found) return "" elif arguments.rule and arguments.add: rules = SecgroupRule() # name=None, protocol=None, ports=None, ip_range=None rules.add(name=arguments.RULE, ports=f"{arguments.FROMPORT}" + ":" + f"{arguments.TOPORT}", protocol=arguments.PROTOCOL, ip_range=arguments.CIDR) return "" elif arguments.group and arguments.add: group = Secgroup() group.add(name=arguments.GROUP, rules=arguments.RULES, description=arguments.DESCRIPTION) return "" elif arguments.list: found = rules.list() Print("secrule", found) return "" elif arguments.clear: groups.clear() rules.clear() return "" return ""
def do_host(self, args, arguments): """ :: Usage: host scp NAMES SOURCE DESTINATION [--dryrun] host ssh NAMES COMMAND [--dryrun] [--output=FORMAT] host config NAMES [IPS] [--user=USER] [--key=PUBLIC] host check NAMES [--user=USER] [--key=PUBLIC] host key create NAMES [--user=USER] [--dryrun] [--output=FORMAT] host key list NAMES [--output=FORMAT] host key gather NAMES [--authorized_keys] [FILE] host key scatter USERNAME NAMES FILE This command does some useful things. Arguments: FILE a file name Options: --dryrun shows what would be done but does not execute --output=FORMAT the format of the output Description: host scp NAMES SOURCE DESTINATION TBD host ssh NAMES COMMAND runs the command on all specified hosts Example: ssh red[01-10] \"uname -a\" host key create NAMES create a ~/.ssh/id_rsa and id_rsa.pub on all hosts specified Example: ssh key create "red[01-10]" host key list NAMES list all id_rsa.pub keys from all hosts specifed Example: ssh key list red[01-10] host key gather HOSTS FILE gathers all keys from file FILE including the one from localhost. ssh key gather "red[01-10]" keys.txt host key scatter USERNAME HOSTS FILE copies all keys from file FILE to authorized_keys on all hosts, but also makes sure that the users ~/.ssh/id_rsa.pub key is in the file. 1) adds ~/.id_rsa.pub to the FILE only if its not already in it 2) removes all duplicated keys Example: ssh key scatter pi "red[01-10]" ~/keys.txt host key scp NAMES FILE copies all keys from file FILE to authorized_keys on all hosts but also makes sure that the users ~/.ssh/id_rsa.pub key is in the file and removes duplicates, e.g. it calls fix before upload Example: ssh key list red[01-10] > pubkeys.txt ssh key scp red[01-10] pubkeys.txt host config NAMES IPS [--user=USER] [--key=PUBLIC] generates an ssh config file tempalte that can be added to your .ssh/config file Example: cms host config "red,red[01-03]" "198.168.1.[1-4]" --user=pi host check NAMES [--user=USER] [--key=PUBLIC] This command is used to test if you can login to the specified hosts. It executes the hostname command and compares it. It provides a table with a sucess column cms host check "red,red[01-03]" +-------+---------+--------+ | host | success | stdout | +-------+---------+--------+ | red | True | red | | red01 | True | red01 | | red02 | True | red02 | | red03 | True | red03 | +-------+---------+--------+ """ def _print(results): arguments.output = arguments.output or 'table' if arguments.output == 'table': print( Printer.write(results, order=['host', 'success', 'stdout'])) else: pprint(results) map_parameters(arguments, 'dryrun', 'output', 'user') dryrun = arguments.dryrun if dryrun: VERBOSE(arguments) if arguments.scp and not arguments.key: destinations = Parameter.expand(arguments.DESTINATION) source = arguments.SOURCE results_key = Host.scp(source, destinations, output="lines") elif arguments.ssh: names = Parameter.expand(arguments.NAMES) # print (names) results = Host.ssh(hosts=names, command=arguments.COMMAND) _print(results) elif arguments.key and arguments.create: Host.ssh_keygen(hosts=arguments.NAMES, username=arguments.user, dryrun=dryrun) #_print(results) elif arguments.key and arguments.list: names = Parameter.expand(arguments.NAMES) jobSet = JobSet("key_list", executor=JobSet.ssh) command = "cat .ssh/id_rsa.pub" for host in names: jobSet.add({"name": host, "host": host, "command": command}) jobSet.run(parallel=len(names)) #results = Host.ssh(hosts=names, # command='cat .ssh/id_rsa.pub', # username=arguments.user) jobSet.Print() elif arguments.key and arguments.gather: output = Host.gather_keys(username=arguments.user, hosts=arguments.NAMES, filename="~/.ssh/id_rsa.pub", key="~/.ssh/id_rsa", processors=3, dryrun=False) if arguments.FILE: filename = path_expand(arguments.FILE) directory = os.path.dirname(filename) if directory: Shell.mkdir(directory) with open(filename, "w") as f: f.write(output) else: print(output) elif arguments.key and arguments.scatter: names = arguments.NAMES file = arguments.get("FILE") if not os.path.isfile(file): Console.error("The file does not exist") return "" Host.put(hosts=names, source=file, username=arguments.USERNAME, destination=".ssh/authorized_keys") #_print(result) elif arguments.config: key = arguments.key or "~/.ssh/id_rsa.pub" result = Host.config(hosts=arguments.NAMES, ips=arguments.IPS, username=arguments.user, key=key) print(result) elif arguments.check: key = arguments.key or "~/.ssh/id_rsa.pub" result = Host.check(hosts=arguments.NAMES, username=arguments.user, key=key) for entry in result: entry['success'] = entry['stdout'] == entry['host'] _print(result) return ""
def do_check(self, args, arguments): """ :: Usage: check [KEYWORDS...] [--output=OUTPUT] Options: --output=OUTPUT the output format [default: table] Description: checks if some programs are available to cms in the system. This includes mongodb ssh These can also be passed as optional keywords """ map_parameters(arguments, "output") variables = Variables() arguments.output = Parameter.find("output", arguments, variables, "table") keywords = arguments.KEYWORDS or ['mongo', "mongod", "mongoimport"] def check_ssh(): cmd = "ssh " \ "-o StrictHostKeyChecking=no " \ "-o UserKnownHostsFile=/dev/null " \ f"-v localhost date" r = Shell.run(cmd) return "Connection refused" not in r def get_info(shell_command): v = "unkown" path = Shell.which(shell_command) if shell_command == "ssh": v = Shell.run(f"{shell_command} -V") elif path and len(path) > 0: try: v = Shell.run(f"{shell_command} --version") if shell_command.endswith("mongo"): v = v.splitlines()[0].replace("MongoDB shell version ", "") elif shell_command.endswith("mongod"): v = v.splitlines()[0].replace("db version ", "") elif shell_command.endswith("mongoimport"): v = v.splitlines()[0].replace("mongoimport version: ", "") except: v = "unkown" return path, v config = Config() try: machine = sys.platform mongo_path = config[ f"cloudmesh.data.mongo.MONGO_DOWNLOAD.{machine}.MONGO_HOME"] except: mongo_path = None data = {} for keyword in keywords: # # probe system mongo # path, version_string = get_info(keyword) entry = { 'system': { 'name': keyword, 'path': path, 'version': version_string } } data[keyword] = entry # # probe cloudmesh mongo # if "mongo" in ['mongo', 'mongod', 'mongoimport']: if mongo_path: path = str(Path(path_expand(mongo_path)) / "bin" / keyword) p, v = get_info(path) try: entry = { 'cloudmesh': { 'name': keyword, 'path': path, 'version': v } } except: Console.error(f"{keyword}") data[keyword].update(entry) path, v = get_info('ssh') data['ssh'] = { 'system': { 'name': 'ssh', 'path': path, 'version': v.strip(), 'enabled': check_ssh() } } # # probe ssh commands # for c in ["ssh-keygen", "ssh-add", "ssh-agent"]: data[c] = { 'system': { 'name': c, 'path': Shell.which(c), } } if len(data) > 0: banner("ssh, mongo, mongod, mongoimport") print(json.dumps(data, indent=2)) banner("os.environ") for attribute in os.environ: print(attribute, os.environ[attribute]) banner("Shell.run") for c in ["echo $0", "echo $SHELL"]: try: r = Shell.run(c).strip() except: r = 'error' print(f"Shell.run('{c}')", r) return ""
def do_flavor(self, args, arguments): """ :: Usage: flavor list [NAMES] [--cloud=CLOUD] [--refresh] [--output=OUTPUT] [--query=QUERY] Options: --output=OUTPUT the output format [default: table] --cloud=CLOUD the ycloud name --refresh refreshes the data before displaying it Description: This lists out the flavors present for a cloud Examples: cm flavor list --refresh cm flavor list cm flavor list --output=csv cm flavor list 58c9552c-8d93-42c0-9dea-5f48d90a3188 --refresh please remember that a uuid or the flavor name can be used to identify a flavor. cms flavor list --refresh --query=\'{\"a\": \"b\"}\' OpenStack Query Example: cms flavor list --refresh --query=\'{\"minDisk\": \"80\"}\' cms flavor list --refresh --query=\'{\"name\": \"m1.large\"}\' supported query parameters for OpenStack: min_disk min_ram name """ map_parameters(arguments, "query", "refresh", "cloud", "output") variables = Variables() arguments.output = Parameter.find("output", arguments, variables, "table") arguments.refresh = Parameter.find_bool("refresh", arguments, variables) if arguments.list and arguments.refresh: names = [] clouds, names = Arguments.get_cloud_and_names( "list", arguments, variables) for cloud in clouds: print(f"cloud {cloud} query={arguments.query}") provider = Provider(name=cloud) if arguments.query is not None: query = eval(arguments.query) flavors = provider.flavors(**query) else: flavors = provider.flavors() provider.Print(flavors, output=arguments.output, kind="flavor") return "" elif arguments.list: names = [] clouds, names = Arguments.get_cloud_and_names( "list", arguments, variables) try: for cloud in clouds: if arguments.output in ["table"]: print(f"List {cloud}") provider = Provider(name=cloud) db = CmDatabase() flavors = db.find(collection=f"{cloud}-flavor") provider.Print(flavors, output=arguments.output, kind="flavor") except Exception as e: VERBOSE(e) return ""
def do_diagram(self, args, arguments): """ :: Usage: diagram set CLUSTER --hostname=NAMES diagram set CLUSTER NAME ATTRIBUTE VALUE diagram rack CLUSTER [--output=FORMAT] [-n] diagram net CLUSTER [--output=FORMAT] [-n] This command produces some default network and rack diagrams for a small cluster setup. Arguments: FILE a file name --output=FORMAT THe output format, one of svg, png, pdf, gif The default is svg Options: -f specify the file -n no preview, just save to file Example: Installation: pip install cloudmesh-diagram Create a rack diagram: cms diagram set d --hostname="red[00-04]" cms diagram set d red01 rack.color blue cms diagram set d red02 net.color red cms diagram rack d cms diagram net d cms diagram net d --output=png -n """ map_parameters(arguments, 'hostname', 'output') arguments.view = not arguments["-n"] arguments.output = arguments.output or "svg" if arguments.set and arguments.hostname: hostnames = Parameter.expand(arguments.hostname) rack = Diagram(hostnames) name = arguments.CLUSTER rack.save(name) elif arguments.set and arguments.NAME: rack = Diagram() rack.load(arguments.CLUSTER) data = {arguments.ATTRIBUTE: arguments.VALUE} rack.set(arguments.NAME, **data) rack.save(arguments.CLUSTER) elif arguments.rack: diag = f"{arguments.CLUSTER}-rack" rack = Diagram() rack.load(arguments.CLUSTER) rack.render(kind="rack") rack.save_diagram(diag) rack.saveas(diag, kind="rack", output=arguments.output) if arguments.view: rack.view(diag, output=arguments.output) elif arguments.net: diag = f"{arguments.CLUSTER}-net" net = Diagram() net.load(arguments.CLUSTER) net.render(kind="bridge") net.save_diagram(diag) net.saveas(diag, kind="net", output=arguments.output) if arguments.view: net.view(diag, output=arguments.output) return ""
def do_ssh(self, args, arguments): """ :: Usage: ssh config list [--output=OUTPUT] ssh config add NAME IP [USER] [KEY] ssh config delete NAME Arguments: NAME Name or ip of the machine to log in list Lists the machines that are registered and the commands to login to them PARAMETERS Register te resource and add the given parameters to the ssh config file. if the resource exists, it will be overwritten. The information will be written in /.ssh/config USER The username for the ssh resource KEY The location of the public keye used for authentication to the host Options: --output=OUTPUT the format in which this list is given formats includes cat, table, json, yaml, dict. If cat is used, it is just printed as is. [default: table] Description: ssh config list lists the hostsnames that are present in the ~/.ssh/config file ssh config add NAME IP [USER] [KEY] registers a host i ~/.ssh/config file Parameters are attribute=value pairs ssh config delete NAME deletes the named host from the ssh config file Examples: ssh config add blue 192.168.1.245 gregor Adds the following to the !/.ssh/config file Host blue HostName 192.168.1.245 User gergor IdentityFile ~/.ssh/id_rsa.pub """ map_parameters(arguments, "output") if arguments.config and arguments.list: # ssh config list [--output=OUTPUT]" hosts = dict(ssh_config().hosts) # Make keywords uniform for host in hosts: if "HostName" in hosts[host]: hosts[host]["Hostname"] = hosts[host]["HostName"] del hosts[host]["HostName"] if "Identityfile" in hosts[host]: hosts[host]["IdentityFile"] = hosts[host]["Identityfile"] del hosts[host]["Identityfile"] print(Printer.write(hosts, order=['host', 'Hostname', 'User', 'IdentityFile'])) elif arguments.config and arguments.add: # ssh config add NAME IP [USER] [KEY] variables = Variables() user = Parameter.find("user", arguments, variables.dict()) key = Parameter.find("key", arguments, variables.dict(), {"key": "~/.ssh/id_rsa.pub"}) name = arguments.NAME or variables['vm'] ip = arguments.IP hosts = ssh_config() if name in hosts.hosts: Console.error("Host already in ~/.ssh/config") return "" hosts.generate(host=name, hostname=ip, identity=key, user=user) elif arguments.config and arguments.add: # ssh host add NAME location = path_expand("~/.ssh/known_hosts") name = arguments.NAME os.system("ssh-keygen -R {name}") os.system(f"ssh-keyscan -H {name} >> {location}") elif arguments.config and arguments.delete: # ssh host delete NAME name = arguments.NAME try: os.system("ssh-keygen -R {name}") except: pass ssh_config.delete(name)
def do_vm(self, args, arguments): """ :: Usage: vm ping [NAMES] [--cloud=CLOUDS] [--count=N] vm check [NAMES] [--cloud=CLOUDS] [--username=USERNAME] vm status [NAMES] [--cloud=CLOUDS] [--output=OUTPUT] vm console [NAME] [--force] vm log [NAME] [--force] vm stop [NAMES] [--dryrun] vm start [NAMES] [--dryrun] vm terminate [NAMES] [--cloud=CLOUD] [--dryrun] vm delete [NAMES] [--cloud=CLOUD] [--dryrun] vm refresh [--cloud=CLOUDS] vm list [NAMES] [--cloud=CLOUDS] [--output=OUTPUT] [--refresh] vm boot [--n=COUNT] [--name=VMNAMES] [--cloud=CLOUD] [--username=USERNAME] [--image=IMAGE] [--flavor=FLAVOR] [--network=NETWORK] [--public] [--secgroup=SECGROUPs] [--group=GROUPs] [--key=KEY] [--dryrun] [-v] vm meta list [NAME] vm meta set [NAME] KEY=VALUE... vm meta delete [NAME] KEY... vm script [--name=NAMES] [--username=USERNAME] [--key=KEY] [--dryrun] [--dir=DESTINATION] SCRIPT vm ip assign [NAMES] [--cloud=CLOUD] vm ip show [NAMES] [--group=GROUP] [--cloud=CLOUD] [--output=OUTPUT] [--refresh] vm ip inventory [NAMES] vm ssh [NAMES] [--username=USER] [--quiet] [--ip=IP] [--key=KEY] [--command=COMMAND] vm put SOURCE DESTINATION [NAMES] vm get SOURCE DESTINATION [NAMES] vm rename [OLDNAMES] [NEWNAMES] [--force] [--dryrun] vm wait [--cloud=CLOUD] [--interval=INTERVAL] [--timeout=TIMEOUT] vm info [--cloud=CLOUD] [--output=OUTPUT] vm username USERNAME [NAMES] [--cloud=CLOUD] vm resize [NAMES] [--size=SIZE] Arguments: OUTPUT the output format COMMAND positional arguments, the commands you want to execute on the server(e.g. ls -a) separated by ';', you will get a return of executing result instead of login to the server, note that type in -- is suggested before you input the commands NAME server name. By default it is set to the name of last vm from database. NAMES server name. By default it is set to the name of last vm from database. KEYPAIR_NAME Name of the vm keypair to be used to create VM. Note this is not a path to key. NEWNAMES New names of the VM while renaming. OLDNAMES Old names of the VM while renaming. Options: -v verbose, prints the dict at the end --output=OUTPUT the output format -H --modify-knownhosts Do not modify ~/.ssh/known_hosts file when ssh'ing into a machine --username=USERNAME the username to login into the vm. If not specified it will be guessed from the image name and the cloud --ip=IP give the public ip of the server --cloud=CLOUD give a cloud to work on, if not given, selected or default cloud will be used --count=COUNT give the number of servers to start --detail for table, a brief version is used as default, use this flag to print detailed table --flavor=FLAVOR give the name or id of the flavor --group=GROUP give the group name of server --secgroup=SECGROUP security group name for the server --image=IMAGE give the name or id of the image --key=KEY specify a key to use, input a string which is the full path to the private key file --keypair_name=KEYPAIR_NAME Name of the vm keypair to be used to create VM. Note this is not a path to key. --user=USER give the user name of the server that you want to use to login --name=NAME give the name of the virtual machine --force rename/ delete vms without user's confirmation --command=COMMAND specify the commands to be executed Description: commands used to boot, start or delete servers of a cloud vm default [options...] Displays default parameters that are set for vm boot either on the default cloud or the specified cloud. vm boot [options...] Boots servers on a cloud, user may specify flavor, image .etc, otherwise default values will be used, see how to set default values of a cloud: cloud help vm start [options...] Starts a suspended or stopped vm instance. vm stop [options...] Stops a vm instance . vm delete [options...] Delete servers of a cloud, user may delete a server by its name or id, delete servers of a group or servers of a cloud, give prefix and/or range to find servers by their names. Or user may specify more options to narrow the search vm floating_ip_assign [options...] assign a public ip to a VM of a cloud vm ip show [options...] show the ips of VMs vm ssh [options...] login to a server or execute commands on it vm list [options...] same as command "list vm", please refer to it vm status [options...] Retrieves status of last VM booted on cloud and displays it. vm refresh [--cloud=CLOUDS] this command refreshes the data for virtual machines, images and flavors for the specified clouds. vm ping [NAMES] [--cloud=CLOUDS] [--count=N] [--processors=PROCESSORS] pings the specified virtual machines, while using at most N pings. The ping is executed in parallel. If names are specifies the ping is restricted to the given names in parameter format. If clouds are specified, names that are not in these clouds are ignored. If the name is set in the variables this name is used. cms vm ssh --command=\"uname -a\" executes the uname command on the last booted vm vm script [--name=NAMES] [--username=USERNAME] [--key=KEY] [--dryrun] [--dir=DESTINATION] [--shell=SHELL] SCRIPT The script command copies a shell script to the specified vms into the DESTINATION directory and than execute it. With SHELL you can set the shell for executing the command, this coudl even be a python interpreter. Examples for SHELL are /bin/sh, /usr/bin/env python vm put SOURCE DESTINATION [NAMES] puts the file defined by SOURCE into the DESINATION folder on the specified machines. If the file exists it is overwritten, so be careful. vm get SOURCE DESTINATION [NAMES] gets the file defined by SOURCE into the DESINATION folder on the specified machines. The SOURCE is on the remote machine. If one machine is specified, the SOURCE is the same name as on the remote machine. If multiple machines are specified, the name of the machine will be a prefix to the filename. If the filenames exists, they will be overwritten, so be careful. Tip: give the VM name, but in a hostlist style, which is very convenient when you need a range of VMs e.g. sample[1-3] => ['sample1', 'sample2', 'sample3'] sample[1-3,18] => ['sample1', 'sample2', 'sample3', 'sample18'] Quoting commands: cm vm login gregor-004 --command=\"uname -a\" Limitations: Azure: rename is not supported """ map_parameters(arguments, 'active', 'cloud', 'command', 'dryrun', 'flavor', 'force', 'group' 'output', 'group', 'image', 'interval', 'timeout', 'ip', 'key', 'modify-knownhosts', 'n', 'name', 'public', 'quiet', 'secgroup', 'size', 'username', 'output', 'count', 'network', 'refresh') variables = Variables() database = CmDatabase() arguments.output = Parameter.find("output", arguments, variables, "table") arguments.refresh = Parameter.find_bool("refresh", arguments, variables) if (arguments.meta and arguments.list): name = arguments.NAME if arguments.NAME is None: name = variables['vm'] if name is None: Console.error("No vm specified") cloud = "chameleon" # cloud = Parameter.find(arguments, variables) print(f"vm metadata for {name} on {cloud}") provider = Provider(name=cloud) r = provider.get_server_metadata(name) print(r) elif arguments.meta and arguments.set: metadata = {} pairs = arguments['KEY=VALUE'] for pair in pairs: key, value = pair.split("=", 1) metadata[key] = value name = arguments.NAME if arguments.NAME is None: name = variables['vm'] if name is None: Console.error("No vm specified") cloud = "chameleon" # cloud = Parameter.find(arguments, variables) print(f"cloud {cloud} {name}") provider = Provider(name=cloud) provider.set_server_metadata(name, **metadata) r = provider.get_server_metadata(name) pprint(r) elif arguments.meta and arguments.delete: metadata = {} keys = arguments['KEY'] name = arguments.NAME if arguments.NAME is None: name = variables['vm'] if name is None: Console.error("No vm specified") cloud = "chameleon" # cloud = Parameter.find(arguments, variables) print(f"cloud {cloud} {name}") provider = Provider(name=cloud) for key in keys: provider.delete_server_metadata(name, key) r = provider.get_server_metadata(name) pprint(r) elif arguments.list and arguments.refresh: names = [] clouds, names = Arguments.get_cloud_and_names( "list", arguments, variables) for cloud in clouds: print(f"cloud {cloud}") provider = Provider(name=cloud) vms = provider.list() provider.Print(vms, output=arguments.output, kind="vm") return "" elif arguments.list: names = [] clouds, names = Arguments.get_cloud_and_names( "list", arguments, variables) try: for cloud in clouds: print(f"List {cloud}") p = Provider(cloud) kind = p.kind collection = "{cloud}-vm".format(cloud=cloud, kind=p.kind) db = CmDatabase() vms = db.find(collection=collection) p.Print(vms, output=arguments.output, kind="vm") except Exception as e: Console.error("Error in listing ", traceflag=True) VERBOSE(e) return "" elif arguments.ping: """ vm ping [NAMES] [--cloud=CLOUDS] [--count=N] """ if arguments.NAMES: variables['vm'] = arguments.NAMES if arguments['--cloud']: variables['cloud'] = arguments['--cloud'] clouds, names = Arguments.get_cloud_and_names( "status", arguments, variables) count = arguments.count if arguments.count: count = int(count) else: count = 1 def get_ips(): ips = [] for cloud in clouds: params = {} # gets public ips from database cursor = database.db[f'{cloud}-vm'] for name in names: for node in cursor.find({'name': name}): ips.append(node['ip_public']) ips = list(set(ips)) pprint(ips) return ips ips = get_ips() if len(ips) == 0: Console.warning("no public ip found.") for cloud in clouds: print(f"refresh for cloud {cloud}") provider = Provider(name=cloud) vms = provider.list() ips = get_ips() if len(ips) == 0: Console.error("No vms with public IPS found.") Console.error(" Make sure to use cms vm list --refresh") for ip in ips: result = Shell.ping(host=ip, count=count) banner(ip) print(result) print() elif arguments.check: raise NotImplementedError """ vm check [NAMES] [--cloud=CLOUDS] [--username=USERNAME] """ """ THIS IS ALL WRONG AS PROVIDER DEPENDENT !!! if arguments.NAMES: variables['vm'] = arguments.NAMES if arguments['--cloud']: variables['cloud'] = arguments['--cloud'] clouds, names = Arguments.get_cloud_and_names("status", arguments, variables) for cloud in clouds: provider = Provider(cloud) params = {} params['key'] = \ provider.p.spec["credentials"]['EC2_PRIVATE_KEY_FILE_PATH'] + \ provider.p.spec["credentials"]['EC2_PRIVATE_KEY_FILE_NAME'] params['username'] = arguments['--username'] # or get from db processors = arguments['--processors'] if processors: params['processors'] = int(processors[0]) # gets public ips from database public_ips = [] cursor = database.db['{cloud}-vm'] for name in names: for node in cursor.find({'name': name}): public_ips.append(node['public_ips']) public_ips = [y for x in public_ips for y in x] Host.check(hosts=public_ips, **params) """ elif arguments.status: if arguments.NAMES: variables['vm'] = arguments.NAMES if arguments['--cloud']: variables['cloud'] = arguments['--cloud'] clouds, names = Arguments.get_cloud_and_names( "status", arguments, variables) # gets status from database for cloud in clouds: provider = Provider(cloud) status = [] cursor = database.db[f'{cloud}-vm'] print(cloud) for name in names: for node in cursor.find({'name': name}): status.append(node) provider.Print(status, output=arguments.output, kind="status") return "" elif arguments.start: # TODO: not tested if arguments.NAMES: names = variables['vm'] = arguments.NAMES if arguments['--cloud']: variables['cloud'] = arguments['--cloud'] clouds, names = Arguments.get_cloud_and_names( "stop", arguments, variables) cloud = clouds[0] print(cloud) print(names) for name in names: provider = Provider(cloud) if arguments['--dryrun']: print(f"start node {name}") else: vms = provider.start(name=name, cloud=cloud) provider.Print(vms, output=arguments.output, kind="vm") return "" elif arguments.stop: # TODO: not tested if arguments.NAMES: variables['vm'] = arguments.NAMES if arguments['--cloud']: variables['cloud'] = arguments['--cloud'] clouds, names = Arguments.get_cloud_and_names( "stop", arguments, variables) for cloud in clouds: params = {} provider = Provider(cloud) if arguments['--dryrun']: Console.ok(f"Dryrun stop: " f" {cloud}\n" f" {names}" f" {provider}") else: for name in names: vms = provider.stop(name) provider.Print(vms, output=arguments.output, kind="vm") elif arguments.terminate: # TODO: not tested if arguments.NAMES: variables['vm'] = arguments.NAMES if arguments['--cloud']: variables['cloud'] = arguments['--cloud'] clouds, names = Arguments.get_cloud_and_names( "stop", arguments, variables) for cloud in clouds: params = {} provider = Provider(cloud) if arguments['--dryrun']: Console.ok(f"Dryrun terminate: " f" {cloud}\n" f" {names}" f" {provider}") else: for name in names: vms = provider.destroy(name) provider.Print(vms, output=arguments.output, kind="vm") elif arguments.delete: if arguments.NAMES: variables['vm'] = arguments.NAMES if arguments['--cloud']: variables['cloud'] = arguments['--cloud'] clouds, names = Arguments.get_cloud_and_names( "stop", arguments, variables) if names is not None: pass elif clouds is not None: for cloud in clouds: provider = Provider(cloud) vms = provider.list() for vm in vms: r = provider.destroy(name=vm) return "" else: return "" for cloud in clouds: provider = Provider(cloud) vms = provider.list() for vm in vms: name = vm["cm"]["name"] if name in names: r = provider.destroy(name=name) # TODO: username, secgroup elif arguments.boot: # not everything works """ vm boot [--name=VMNAMES] [--cloud=CLOUD] [--username=USERNAME] [--image=IMAGE] [--flavor=FLAVOR] [--network=NETWORK] [--public] [--secgroup=SECGROUP] [--key=KEY] [--group=GROUP] [--dryrun] """ # for name in names: # node = p.create(name=name, size=flavor, image=image) # VERBOSE(arguments) parameters = dotdict() names = Parameter.expand(arguments.name) cloud = Parameter.find("cloud", arguments, variables.dict()) defaults = Config()[f"cloudmesh.cloud.{cloud}.default"] groups = Parameter.find("group", arguments, variables.dict(), {"group": "default"}) parameters = dotdict() # parameters.names = arguments.name parameters.group = groups for attribute in [ "image", "username", "flavor", "key", "network", "secgroup" ]: parameters[attribute] = Parameter.find(attribute, arguments, variables.dict(), defaults) if arguments.username is None: parameters.user = Image.guess_username(parameters.image) provider = Provider(name=cloud) parameters.secgroup = arguments.secgroup or "default" # # determine names # if names and arguments.n and len(names) > 1: Console.error( f"When using --n={arguments.n}, you can only specify one name" ) return "" # cases # # only name --name = "a[1,2]" # name and count # --name="a" --n=3, names must be of length 1 # only count --n=2 names are read form var # nothing, just use one vm # determin names _names = [] if not names: if not arguments.n: count = 1 else: count = int(arguments.n) for i in range(0, count): if names is None: n = Name() n.incr() name = str(n) else: n = names[i] name = str(n) _names.append(name) names = _names elif len(names) == 1 and arguments.n: name = names[0] for i in range(0, int(arguments.n)): _names.append(f"{name}-{i}") names = _names # pprint(parameters) for name in names: parameters.name = name if arguments['--dryrun']: banner("boot") pprint(parameters) Console.ok(f"Dryrun boot {name}: \n" f" cloud={cloud}\n" f" names={names}\n" f" provider={provider}") print() for attribute in parameters: value = parameters[attribute] Console.ok(f" {attribute}={value}") else: # parameters.progress = len(parameters.names) < 2 try: vms = provider.create(**parameters) except TimeoutError: Console.error( f"Timeout during vm creation. There may be a problem with the cloud {cloud}" ) except Exception as e: Console.error("create problem", traceflag=True) print(e) return "" variables['vm'] = str(n) if arguments["-v"]: banner("Details") pprint(vms) # provider.Print(arguments.output, "vm", vms) elif arguments.info: """ vm info [--cloud=CLOUD] [--output=OUTPUT] """ print("info for the vm") cloud, names = Arguments.get_cloud_and_names( "info", arguments, variables) raise NotImplementedError elif arguments.rename: raise NotImplementedError # Not tested print("rename the vm") v = Variables() cloud = v["cloud"] p = Provider(cloud) try: oldnames = Parameter.expand(arguments["OLDNAMES"]) newnames = Parameter.expand(arguments["NEWNAMES"]) force = arguments["--force"] if oldnames is None or newnames is None: Console.error("Wrong VMs specified for rename", traceflag=False) elif len(oldnames) != len(newnames): Console.error("The number of VMs to be renamed is wrong", traceflag=False) else: print(oldnames) print(newnames) for i in range(0, len(oldnames)): oldname = oldnames[i] newname = newnames[i] if arguments["--dryrun"]: Console.ok("Rename {} to {}".format( oldname, newname)) else: print(f"rename {oldname} -> {newname}") p.rename(source=oldname, destination=newname) msg = "info. OK." Console.ok(msg) except Exception as e: Error.traceback(e) Console.error("Problem renaming instances", traceflag=True) elif arguments["ip"] and arguments["show"]: raise NotImplementedError print("show the ips") """ vm ip show [NAMES] [--group=GROUP] [--cloud=CLOUD] [--output=OUTPUT] [--refresh] """ elif arguments["ip"] and arguments["assign"]: raise NotImplementedError """ vm ip assign [NAMES] [--cloud=CLOUD] """ print("assign the public ip") elif arguments["ip"] and arguments["inventory"]: raise NotImplementedError """ vm ip inventory [NAMES] """ print("list ips that could be assigned") elif arguments.default: raise NotImplementedError print("sets defaults for the vm") elif arguments.script: raise NotImplementedError clouds, names = Arguments.get_cloud_and_names( "run", arguments, variables) username = arguments['--username'] script = arguments.SCRIPT for cloud in clouds: provider = Provider(cloud) name_ips = {} cursor = database.db['{}-node'.format(cloud)] for name in names: for node in cursor.find({'name': name}): name_ips[name] = node['public_ips'] if arguments['--dryrun']: print("run script {} on vms: {}".format(script, names)) else: provider.ssh(name_ips, username=username, script=script) elif arguments.username: raise NotImplementedError """ vm username USERNAME [NAMES] [--cloud=CLOUD] """ print("sets the username for the vm") elif arguments.resize: raise NotImplementedError """ vm resize [NAMES] [--size=SIZE] """ pass elif arguments.ssh: """ vm ssh [NAMES] [--username=USER] [--quiet] [--ip=IP] [--key=KEY] [--command=COMMAND] """ # VERBOSE(arguments) clouds, names, command = Arguments.get_commands( "ssh", arguments, variables) # print (clouds) # print(names) # print (command) if arguments.command is None and len(names) > 1: Console.error("Interactive shell can only be done on one vm") return "" elif arguments.command is None and len(names) == 1: name = names[0] cloud = clouds[0] cm = CmDatabase() try: vm = cm.find_name(name, "vm")[0] except IndexError: Console.error(f"could not find vm {name}") return "" # VERBOSE(vm) cloud = vm["cm"]["cloud"] provider = Provider(name=cloud) try: provider.ssh(vm=vm) except KeyError: vms = provider.list() provider.Print(vms, output=arguments.output, kind="vm") provider.ssh(vm=vm) return "" else: # command on all vms if clouds is None or names is None or command is None: return "" else: for cloud in clouds: p = Provider(cloud) for name in names: cm = CmDatabase() try: vm = cm.find_name(name, "vm")[0] except IndexError: Console.error(f"could not find vm {name}") continue r = p.ssh(vm=vm, command=command) print(r) return "" elif arguments.console: # why is this not vm clouds, names, command = Arguments.get_commands( "ssh", arguments, variables) print(clouds) print(names) print(command) for cloud in clouds: p = Provider(cloud) for name in names: cm = CmDatabase() try: vm = cm.find_name(name, "vm")[0] except IndexError: Console.error(f"could not find vm {name}") continue r = p.console(vm=vm) print(r) return "" elif arguments.log: # why is this not vm clouds, names, command = Arguments.get_commands( "ssh", arguments, variables) print(clouds) print(names) print(command) for cloud in clouds: p = Provider(cloud) for name in names: cm = CmDatabase() try: vm = cm.find_name(name, "vm")[0] except IndexError: Console.error(f"could not find vm {name}") continue r = p.log(vm=vm) print(r) return "" elif arguments.wait: """ vm wait [--cloud=CLOUD] [--interval=INTERVAL] [--timeout=TIMEOUT] """ # why is this not vm clouds, names, command = Arguments.get_commands( "ssh", arguments, variables) # print (clouds) # print (names) # print (command) for cloud in clouds: p = Provider(cloud) for name in names: cm = CmDatabase() try: vm = cm.find_name(name, "vm")[0] except IndexError: Console.error(f"could not find vm {name}") continue r = p.wait(vm=vm, interval=arguments.interval, timeout=arguments.timeout) if r: Console.ok("Instance available for SSH") else: Console.error( f"Instance unavailable after timeout of {arguments.timeout}" ) # print(r) return "" elif arguments.put: """ vm put SOURCE DESTINATION """ clouds, names, command = Arguments.get_commands( "ssh", arguments, variables) key = variables['key'] source = arguments['SOURCE'] destination = arguments['DESTINATION'] for cloud in clouds: p = Provider(name=cloud) cm = CmDatabase() for name in names: try: vms = cm.find_name(name, "vm") except IndexError: Console.error(f"could not find vm {name}") return "" # VERBOSE(vm) for vm in vms: try: ip = vm['public_ips'] except: try: ip = p.get_public_ip(name=name) except: Console.error( f"could not find a public ip for vm {name}", traceflag=True) return Console.error( f"could not find a public ip for vm {name}", traceflag=True) return # get the username try: # username not in vm...guessing imagename = list( cm.collection(cloud + '-image').find( {'ImageId': vm['ImageId']}))[0]['name'] print(imagename) user = Image.guess_username(image=imagename, cloud=cloud) except: try: user = vm['os_profile']['admin_username'] except: Console.error( f"could not find a valid username for " f"{name}, try refreshing the image list", traceflag=True) return Console.error( f"could not find a valid username for {name}, try refreshing the image list" ) return cmd = f'scp -i {key} {source} {user}@{ip}:{destination}' print(cmd) os.system(cmd) return ""
def do_batch(self, args, arguments): """ :: Usage: batch job create --name=NAME --cluster=CLUSTER --script=SCRIPT --executable=EXECUTABLE --destination=DESTINATION --source=SOURCE [--companion-file=COMPANION_FILE] [--outfile-name=OUTPUT_FILE_NAME] [--suffix=SUFFIX] [--overwrite] batch job run [--name=NAMES] [--output=OUTPUT] batch job fetch [--name=NAMES] batch job remove [--name=NAMES] batch job clean [--name=NAMES] batch job set [--name=NAMES] PARAMETER=VALUE batch job list [--name=NAMES] [--depth=DEPTH] batch connection_test --job=JOB batch cluster list [--cluster=CLUSTERS] [--depth=DEPTH] batch cluster remove [--cluster=CLUSTERS] batch cluster set [--cluster=CLUSTERS] PARAMETER=VALUE Arguments: FILE a file name INPUT_TYPE tbd Options: -f specify the file --depth=DEPTH [default: 1] --output=OUTPUT [default: table] Description: This command allows to submit batch jobs to queuing systems hosted in an HBC center as a service directly form your commandline. We assume that a number of experiments are conducted with possibly running the script multiple times. Each experiment will save the batch script in its own folder. The output of the script can be saved in a destination folder. A virtual directory is used to coordinate all saved files. The files can be located due to the use of the virtual directory on multiple different data or file services Authentication to the Batch systems is done viw the underlaying HPC center authentication. We assume that the user has an account to submit on these systems. (SSH, 2 factor, XSEDE-account) TBD. Experiments: experiments are jobs that can be run multiple times and create input and output file sin them cloudmesh: experiment: job: name: {cloudmesh.profile.user.name}-01 directory: ~/experiment/{experiment.job.name} output: {cloudmesh.experiment.job.name}/output input: ~/experiment/{experiment.job.name}/input script: script.sh source ,,, destination: {cloudmesh.experiment.job.directory} - queue associates with server (cloud) - job could be run on queue and is associated with one or multiple servers - experiment is same as job, but gives some facility to run it multiple times I do not know what companion file is Examples: batch job run [--name=NAMES] [--output=OUTPUT] runs jobs with the given names LOTS OF DOCUMENTATION MISSING HERE [--companion-file=COMPANION_FILE] [--outfile-name=OUTPUT_FILE_NAME] [--suffix=SUFFIX] [--overwrite] """ # # create slurm manager so it can be used in all commands # slurm_manager = SlurmCluster() # debug=arguments["--debug"]) arguments["--cloud"] = "test" arguments["NAME"] = "fix" map_parameters(arguments, "cloud", "name", "cluster", "script", "type", "destination", "source", "output") # if not arguments.create # find cluster name from Variables() # if no cluster is defined look it up in yaml in batch default: # if not defined there fail # clusters = Parameter.expand(arguments.cluster) # name = Parameters.expand[argumnets.name) # this will return an array of clusters and names of jobs and all cluster # job or clusterc commands will be executed on them # see the vm # # if active: False in the yaml file for the cluster this cluster is not used and scipped. VERBOSE(arguments) variables = Variables() # do not use print but use ,Console.msg(), Console.error(), Console.ok() if arguments.tester: print("running ... ") slurm_manager.tester() elif arguments.run and arguments.job: # config = Config()["cloudmesh.batch"] names = Parameter.expand(arguments.name) # clouds, names = Arguments.get_cloud_and_names("refresh", arguments, # variables) data = [] for name in names: entry = SlurmCluster.job_specification() data.append(entry) ''' data = { "cm": { "cloud": "karst_debug", "kind": "batch-job", "name": "job012", }, "batch": { "source": "~/.cloudmesh/batch/dir", "destination": "~/.cloudmesh/dir/", "status": "running" } }''' try: raise NotImplementedError except Exception as e: Console.error("Haha", traceflag=True) pprint(data) print(Printer.flatwrite( data, order=["cm.name", "cm.kind", "batch.status"], header=["Name", "Kind", "Status"], output=arguments.output) ) return "" # handling batch job create sample command c # cms batch job create --name newjob1 --cluster slurm-taito # --script ./1_argsin_stdout.slurm --executable # ./1_argsin_stdout_script.sh --destination /home/vafanda --source ~/tmp elif arguments.job and \ arguments.create and \ arguments.name and \ arguments.cluster and \ arguments.script and \ arguments['--executable'] and \ arguments.destination and \ arguments.source: job_name = arguments.name cluster_name = arguments.cluster script_path = Path(arguments.script) if not script_path.exists(): raise FileNotFoundError executable_path = Path(arguments['--executable']) if not executable_path.exists(): raise FileNotFoundError destination = Path(arguments.destination) if not destination.is_absolute(): Console.error("destination path must be absolute", traceflag=True) raise FileNotFoundError source = Path(arguments.source) if not source.exists(): raise FileNotFoundError if arguments.experiment is None: experiment_name = 'job' + self.suffix_generator() else: experiment_name = arguments.experiment + self.suffix_generator() if arguments.get("--companion-file") is None: companion_file = Path() else: companion_file = Path(arguments.get("--companion-file")) slurm_manager.create(job_name, cluster_name, script_path, executable_path, destination, source, experiment_name, companion_file) elif arguments.remove: if arguments.cluster: slurm_manager.remove("cluster", arguments.get("CLUSTER_NAME")) if arguments.job: slurm_manager.remove("job", arguments.get("JOB_NAME")) elif arguments.list: max_depth = 1 if arguments.get("DEPTH") is None else int(arguments.get("DEPTH")) if arguments.get("clusters"): slurm_manager.list("clusters", max_depth) elif arguments.get("jobs"): slurm_manager.list("jobs", max_depth) elif arguments.set: if arguments.get("cluster"): cluster_name = arguments.get("CLUSTER_NAME") parameter = arguments.get("PARAMETER") value = arguments.get("VALUE") slurm_manager.set_param("cluster", cluster_name, parameter, value) if arguments.job: config_name = arguments.get("JOB_NAME") parameter = arguments.get("PARAMETER") value = arguments.get("VALUE") slurm_manager.set_param("job-metadata", config_name, parameter, value) elif arguments.start and arguments.job: job_name = arguments.get("JOB_NAME") slurm_manager.run(job_name) elif arguments.get("fetch"): job_name = arguments.get("JOB_NAME") slurm_manager.fetch(job_name) elif arguments.connection_test: slurm_manager.connection_test(arguments.job) elif arguments.clean: job_name = arguments.get("JOB_NAME") slurm_manager.clean_remote(job_name)
def do_aws(self, args, arguments): """ :: Usage: vm ping [NAMES] [--cloud=CLOUDS] [--count=N] [--processors=PROCESSORS] vm check [NAMES] [--cloud=CLOUDS] [--username=USERNAME] [--processors=PROCESSORS] vm status [NAMES] [--cloud=CLOUDS] vm console [NAME] [--force] vm start [NAMES] [--cloud=CLOUD] [--parallel] [--processors=PROCESSORS] [--dryrun] vm stop [NAMES] [--cloud=CLOUD] [--parallel] [--processors=PROCESSORS] [--dryrun] vm terminate [NAMES] [--cloud=CLOUD] [--parallel] [--processors=PROCESSORS] [--dryrun] vm delete [NAMES] [--cloud=CLOUD] [--parallel] [--processors=PROCESSORS] [--dryrun] vm refresh [--cloud=CLOUDS] vm list [NAMES] [--cloud=CLOUDS] [--output=OUTPUT] [--refresh] vm boot [--name=VMNAMES] [--cloud=CLOUD] [--username=USERNAME] [--image=IMAGE] [--flavor=FLAVOR] [--public] [--secgroup=SECGROUPs] [--key=KEY] [--dryrun] vm boot [--n=COUNT] [--cloud=CLOUD] [--username=USERNAME] [--image=IMAGE] [--flavor=FLAVOR] [--public] [--secgroup=SECGROUPS] [--key=KEY] [--dryrun] vm run [--name=VMNAMES] [--username=USERNAME] [--dryrun] COMMAND vm script [--name=NAMES] [--username=USERNAME] [--dryrun] SCRIPT vm ip assign [NAMES] [--cloud=CLOUD] vm ip show [NAMES] [--group=GROUP] [--cloud=CLOUD], [--output=OUTPUT] [--refresh] vm ip inventory [NAMES] vm ssh [NAMES] [--username=USER] [--quiet] [--ip=IP] [--key=KEY] [--command=COMMAND] [--modify-knownhosts] vm rename [OLDNAMES] [NEWNAMES] [--force] [--dryrun] vm wait [--cloud=CLOUD] [--interval=SECONDS] vm info [--cloud=CLOUD] [--output=OUTPUT] vm username USERNAME [NAMES] [--cloud=CLOUD] vm resize [NAMES] [--size=SIZE] vm debug [NAMES] Arguments: OUTPUT the output format COMMAND positional arguments, the commands you want to execute on the server(e.g. ls -a) separated by ';', you will get a return of executing result instead of login to the server, note that type in -- is suggested before you input the commands NAME server name. By default it is set to the name of last vm from database. NAMES server name. By default it is set to the name of last vm from database. KEYPAIR_NAME Name of the vm keypair to be used to create VM. Note this is not a path to key. NEWNAMES New names of the VM while renaming. OLDNAMES Old names of the VM while renaming. Options: --output=OUTPUT the output format [default: table] -H --modify-knownhosts Do not modify ~/.ssh/known_hosts file when ssh'ing into a machine --username=USERNAME the username to login into the vm. If not specified it will be guessed from the image name and the cloud --ip=IP give the public ip of the server --cloud=CLOUD give a cloud to work on, if not given, selected or default cloud will be used --count=COUNT give the number of servers to start --detail for table, a brief version is used as default, use this flag to print detailed table --flavor=FLAVOR give the name or id of the flavor --group=GROUP give the group name of server --secgroup=SECGROUP security group name for the server --image=IMAGE give the name or id of the image --key=KEY specify a key to use, input a string which is the full path to the private key file --keypair_name=KEYPAIR_NAME Name of the vm keypair to be used to create VM. Note this is not a path to key. --user=USER give the user name of the server that you want to use to login --name=NAME give the name of the virtual machine --force rename/ delete vms without user's confirmation --command=COMMAND specify the commands to be executed --parallel execute commands in parallel Description: commands used to boot, start or delete servers of a cloud vm default [options...] Displays default parameters that are set for vm boot either on the default cloud or the specified cloud. vm boot [options...] Boots servers on a cloud, user may specify flavor, image .etc, otherwise default values will be used, see how to set default values of a cloud: cloud help vm start [options...] Starts a suspended or stopped vm instance. vm stop [options...] Stops a vm instance . vm delete [options...] Delete servers of a cloud, user may delete a server by its name or id, delete servers of a group or servers of a cloud, give prefix and/or range to find servers by their names. Or user may specify more options to narrow the search vm floating_ip_assign [options...] assign a public ip to a VM of a cloud vm ip show [options...] show the ips of VMs vm ssh [options...] login to a server or execute commands on it vm list [options...] same as command "list vm", please refer to it vm status [options...] Retrieves status of last VM booted on cloud and displays it. vm refresh [--cloud=CLOUDS] this command refreshes the data for virtual machines, images and flavors for the specified clouds. vm ping [NAMES] [--cloud=CLOUDS] [--count=N] [--processors=PROCESSORS] pings the specified virtual machines, while using at most N pings. The ping is executed in parallel. If names are specifies the ping is restricted to the given names in parameter format. If clouds are specified, names that are not in these clouds are ignored. If the name is set in the variables this name is used. Tip: give the VM name, but in a hostlist style, which is very convenient when you need a range of VMs e.g. sample[1-3] => ['sample1', 'sample2', 'sample3'] sample[1-3,18] => ['sample1', 'sample2', 'sample3', 'sample18'] Quoting commands: cm vm login gvonlasz-004 --command=\"uname -a\" Limitations: """ map_parameters(arguments, 'active', 'cloud', 'command', 'dryrun', 'flavor', 'force', 'output', 'group', 'image', 'interval', 'ip', 'key', 'modify-knownhosts', 'n', 'name', 'public', 'quiet', 'secgroup', 'size', 'username') # VERBOSE.print(arguments, verbose=9) variables = Variables() # pprint(arguments) # pprint(variables) provider = Provider() database = CmDatabase() # ok, but not tested if arguments.refresh: """vm refresh [--cloud=CLOUDS]""" provider.list() provider.flavors() provider.images() # ok elif arguments.ping: """vm ping [NAMES] [--cloud=CLOUDS] [--count=N] [--processors=PROCESSORS]""" # cms aws ping t --cloud=aws --count=3 --processors=3 if arguments.NAMES: variables['vm'] = arguments.NAMES clouds, names = Arguments.get_cloud_and_names( "ping", arguments, variables) params = {} count = arguments['--count'] if count: params['count'] = int(count) processors = arguments['--processors'] if processors: params['processors'] = int(processors[0]) # gets public ips from database public_ips = [] cursor = database.db['aws-node'] for name in names: for node in cursor.find({'name': name}): public_ips.append(node['public_ips']) public_ips = [y for x in public_ips for y in x] # print(public_ips) Shell3.pings(ips=public_ips, **params) # ok elif arguments.check: """vm check [NAMES] [--cloud=CLOUDS] [--username=USERNAME] [--processors=PROCESSORS]""" # cms aws check t --cloud=aws --username=ubuntu --processors=3 if arguments.NAMES: variables['vm'] = arguments.NAMES clouds, names = Arguments.get_cloud_and_names( "ping", arguments, variables) params = {} params['key'] = provider.p.spec["credentials"][ 'EC2_PRIVATE_KEY_FILE_PATH'] + provider.p.spec["credentials"][ 'EC2_PRIVATE_KEY_FILE_NAME'] params['username'] = arguments['--username'] # or get from db processors = arguments['--processors'] if processors: params['processors'] = int(processors[0]) # gets public ips from database public_ips = [] cursor = database.db['aws-node'] for name in names: for node in cursor.find({'name': name}): public_ips.append(node['public_ips']) public_ips = [y for x in public_ips for y in x] Shell3.checks(hosts=public_ips, **params) # ok elif arguments.status: """vm status [NAMES] [--cloud=CLOUDS]""" # cms aws status t --cloud=aws if arguments.NAMES: variables['vm'] = arguments.NAMES clouds, names = Arguments.get_cloud_and_names( "status", arguments, variables) # gets status from database status = {} cursor = database.db['aws-node'] for name in names: for node in cursor.find({'name': name}): status[name] = node['state'] pprint(status) #ok elif arguments.start: """vm start [NAMES] [--cloud=CLOUD] [--parallel] [--processors=PROCESSORS] [--dryrun]""" # cms aws start t --parallel --processors=3 if arguments.NAMES: variables['vm'] = arguments.NAMES clouds, names = Arguments.get_cloud_and_names( "start", arguments, variables) params = {} processors = arguments['--processors'] if arguments['--parallel']: params['option'] = 'pool' if processors: params['processors'] = int(processors[0]) else: params['option'] = 'iter' if arguments['--dryrun']: print("start nodes {}\noption - {}\nprocessors - {}".format( names, params['option'], processors)) else: pprint(provider.start(names, **params)) #ok elif arguments.stop: """vm stop [NAMES] [--cloud=CLOUD] [--parallel] [--processors=PROCESSORS] [--dryrun]""" # cms aws stop t --parallel --processors=2 if arguments.NAMES: variables['vm'] = arguments.NAMES clouds, names = Arguments.get_cloud_and_names( "stop", arguments, variables) params = {} processors = arguments['--processors'] if arguments['--parallel']: params['option'] = 'pool' if processors: params['processors'] = int(processors[0]) else: params['option'] = 'iter' if arguments['--dryrun']: print("stop nodes {}\noption - {}\nprocessors - {}".format( names, params['option'], processors)) else: vms = provider.stop(names, **params) order = provider.p.output['vm']['order'] header = provider.p.output['vm']['header'] print( Printer.flatwrite(vms, order=order, header=header, output='table')) #ok elif arguments.terminate: """vm terminate [NAMES] [--cloud=CLOUD] [--parallel] [--processors=PROCESSORS] [--dryrun]""" # cms aws terminate t --parallel --processors=2 if arguments.NAMES: variables['vm'] = arguments.NAMES clouds, names = Arguments.get_cloud_and_names( "terminate", arguments, variables) params = {} processors = arguments['--processors'] if arguments['--parallel']: params['option'] = 'pool' if processors: params['processors'] = int(processors[0]) else: params['option'] = 'iter' if arguments['--dryrun']: print( "terminate nodes {}\noption - {}\nprocessors - {}".format( names, params['option'], processors)) else: pprint(provider.destroy(names, **params)) #ok elif arguments.delete: """vm delete [NAMES] [--cloud=CLOUD] [--parallel] [--processors=PROCESSORS] [--dryrun]""" if arguments.NAMES: variables['vm'] = arguments.NAMES clouds, names = Arguments.get_cloud_and_names( "terminate", arguments, variables) params = {} processors = arguments['--processors'] if arguments['--parallel']: params['option'] = 'pool' if processors: params['processors'] = int(processors[0]) else: params['option'] = 'iter' if arguments['--dryrun']: print("delete nodes {}\noption - {}\nprocessors - {}".format( names, params['option'], processors)) else: pprint(provider.destroy(names, **params)) # TODO: username, secgroup elif arguments.boot: """ vm boot [--name=VMNAMES] [--cloud=CLOUD] [--username=USERNAME] [--image=IMAGE] [--flavor=FLAVOR] [--public] [--secgroup=SECGROUPs] [--key=KEY] [--dryrun] vm boot [--n=COUNT] [--cloud=CLOUD] [--username=USERNAME] [--image=IMAGE] [--flavor=FLAVOR] [--public] [--secgroup=SECGROUPS] [--key=KEY] [--dryrun] """ if arguments['--name']: # cms aws boot --name=t --cloud=aws --username=root --image=ami-08692d171e3cf02d6 --flavor=t2.micro --public --secgroup=group1 --key=aws_cert # cms aws boot --name=t --image=ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-20190212 --flavor=t2.micro --key=aws_cert names = Parameter.expand(arguments['--name']) elif arguments['n']: # cms aws boot --n=2 --cloud=aws --username=root --image=ami-08692d171e3cf02d6 --flavor=t2.micro --public --secgroup=group1 --key=aws_cert n = int(arguments['n']) names = [] for i in range(n): # generate random names m = hashlib.blake2b(digest_size=8) m.update(str(datetime.utcnow()).encode('utf-8')) names.append(m.hexdigest()) else: print("please provide name or count to boot vm") # username = arguments['--username'] image = arguments['--image'] flavor = arguments['--flavor'] params = {} public = arguments['--public'] if public: params['ex_assign_public_ip'] = public secgroup = Parameter.expand(arguments['--secgroup']) if secgroup: params['ex_security_groups'] = secgroup key = arguments['--key'] if key: params['ex_keyname'] = key if arguments['--dryrun']: print("""create nodes {} image - {} flavor - {} assign public ip - {} security groups - {} keypair name - {}""".format(names, image, flavor, public, secgroup, key)) else: order = provider.p.output['vm']['order'] header = provider.p.output['vm']['header'] vm = provider.create(names=names, image=image, size=flavor, **params) print( Printer.write(vm, order=order, header=header, output='table')) #ok elif arguments.list: """ vm list [NAMES] [--cloud=CLOUDS] [--output=OUTPUT] [--refresh] """ if arguments.NAMES: variables['vm'] = arguments.NAMES clouds, names = Arguments.get_cloud_and_names( "list", arguments, variables) params = {} params['order'] = provider.p.output['vm']['order'] params['header'] = provider.p.output['vm']['header'] params['output'] = 'table' if arguments['--refresh']: provider.list() if arguments.NAMES: vms = [] for name in names: vms += database.find(collection='aws-node', name=name) else: vms = database.find(collection='aws-node') print(Printer.flatwrite(vms, **params)) # TODO elif arguments.info: """ vm info [--cloud=CLOUD] [--output=OUTPUT] """ print("functionality not implemented") # TODO elif arguments.rename: """vm rename [OLDNAMES] [NEWNAMES] [--force] [--dryrun]""" print("functionality not implemented") # TODO elif arguments.ip and arguments.show: """vm ip show [NAMES] [--group=GROUP] [--cloud=CLOUD] [--output=OUTPUT] [--refresh] """ clouds, names = Arguments.get_cloud_and_names( "ip", arguments, variables) pprint(get_publicIPs(names)) # TODO elif arguments.ip and arguments.assign: """ vm ip assign [NAMES] [--cloud=CLOUD] """ clouds, names = Arguments.get_cloud_and_names( "ip", arguments, variables) pprint(provider.assign_public_ip(names)) # TODO elif arguments.ip and arguments.inventory: """vm ip inventory [NAMES]""" print("list ips that could be assigned") # TODO elif arguments.default: """vm default [options...]""" print("functionality not implemented") # ok elif arguments.run: """vm run [--name=VMNAMES] [--username=USERNAME] [--dryrun] [COMMAND ...]""" # cms aws run --name=t --username=ubuntu uname clouds, names = Arguments.get_cloud_and_names( "run", arguments, variables) username = arguments['--username'] command = arguments.COMMAND name_ips = {} cursor = database.db['aws-node'] for name in names: for node in cursor.find({'name': name}): name_ips[name] = node['public_ips'] if arguments['--dryrun']: print("run command {} on vms: {}".format(command, names)) else: provider.ssh(name_ips, username=username, command=command) # BUG in call command elif arguments.script: """vm script [--name=NAMES] [--username=USERNAME] [--dryrun] SCRIPT""" # cms aws script --name=t --username=ubuntu tests/test_aws.sh clouds, names = Arguments.get_cloud_and_names( "run", arguments, variables) username = arguments['--username'] script = arguments.SCRIPT name_ips = {} cursor = database.db['aws-node'] for name in names: for node in cursor.find({'name': name}): name_ips[name] = node['public_ips'] if arguments['--dryrun']: print("run script {} on vms: {}".format(script, names)) else: provider.ssh(name_ips, username=username, script=script) # TODO elif arguments.resize: """vm resize [NAMES] [--size=SIZE]""" pass # TODO # shh run command in implemented as aws run # not sure what to do with this command # since ssh into multiple vms at the same time doesn't make a lot of sense elif arguments.ssh: """ vm ssh [NAMES] [--username=USER] [--quiet] [--ip=IP] [--key=KEY] [--command=COMMAND] [--modify-knownhosts] """ if arguments.NAMES: variables['vm'] = arguments.NAMES clouds, names = Arguments.get_cloud_and_names( "list", arguments, variables) ips = {} cursor = database.db['aws-node'] for name in names: for node in cursor.find({'name': name}): pprint(node) username = arguments['--username'] ip = arguments['--ip'] params = {} quiet = arguments['--quiet'] if quiet: params['quiet'] = quiet command = arguments['--command'] if command: params['command'] = command modify_host = arguments['--modify-knownhosts'] if modify_host: params['modify_host'] = modify_host provider.ssh(username=username, ip=ip, **params) # TODO elif arguments.wait: """vm wait [--cloud=CLOUD] [--interval=SECONDS]""" print("waits for the vm till its ready and one can login") # TODO elif arguments.username: """vm username USERNAME [NAMES] [--cloud=CLOUD]""" print("sets the username for the vm") elif arguments.debug: pprint(provider.p.driver) # print(provider.p.cloudman.ex_list_floating_ips()) # print(provider.loop(names, abs, option='iter',processors=3)) return
def do_objstorage(self, args, arguments): """ :: Usage: objstorage [--service=SERVICE] create dir DIRECTORY objstorage [--service=SERVICE] copy SOURCE DESTINATION [--recursive] objstorage [--service=SERVICE] get SOURCE DESTINATION [--recursive] objstorage [--service=SERVICE] put SOURCE DESTINATION [--recursive] objstorage [--service=SERVICE] list SOURCE [--recursive] [--output=OUTPUT] objstorage [--service=SERVICE] delete SOURCE objstorage [--service=SERVICE] search DIRECTORY FILENAME [--recursive] [--output=OUTPUT] This command does some useful things. Arguments: SOURCE BUCKET | OBJECT can be a source bucket or object name or file DESTINATION BUCKET | OBJECT can be a destination bucket or object name or file DIRECTORY DIRECTORY refers to a folder or bucket on the cloud service for ex: awss3 Options: -h, --help --service=SERVICE specify the cloud service name like aws-s3 Description: commands used to upload, download, list files on different cloud objstorage services. objstorage put [options..] Uploads the file specified in the filename to specified cloud from the SOURCEDIR. objstorage get [options..] Downloads the file specified in the filename from the specified cloud to the DESTDIR. objstorage delete [options..] Deletes the file specified in the filename from the specified cloud. objstorage list [options..] lists all the files from the container name specified on the specified cloud. objstorage create dir [options..] creates a folder with the directory name specified on the specified cloud. objstorage search [options..] searches for the source in all the folders on the specified cloud. Example: set objstorage=s3object objstorage put SOURCE DESTINATION --recursive is the same as objstorage --service=s3object put SOURCE DESTINATION --recursive Create a multi file directy in a bucket $ cms set objstorge=awss3 $ tree a/a1.tx a/b/b1.txt cms objstorage create a/b/ cms objstorage put a/b/b1.txt /a/b """ # arguments.CONTAINER = arguments["--container"] map_parameters(arguments, "recursive", "objstorage") VERBOSE.print(arguments, verbose=9) if arguments.service is None: try: v = Variables() arguments.service = v['objstorage'] except Exception as e: arguments.service = None raise ValueError("objstorage provider is not defined") arguments.service = Parameter.expand(arguments.service) print(arguments) provider = Provider(arguments.service) if arguments.copy: result = provider.copy(arguments.SOURCE, arguments.DESTINATION, arguments.recursive) if arguments.get: result = provider.get(arguments.SOURCE, arguments.DESTINATION, arguments.recursive) elif arguments.put: result = provider.put(arguments.SOURCE, arguments.DESTINATION, arguments.recursive) elif arguments.create and arguments.dir: result = provider.createdir(arguments.DIRECTORY) elif arguments.list: for objstorage in arguments.service: provider = Provider(objstorage) result = provider.list(arguments.SOURCE, arguments.recursive) elif arguments.delete: for objstorage in arguments.service: provider = Provider(objstorage) provider.delete(arguments.SOURCE) elif arguments.search: for objstorage in arguments.service: provider = Provider(objstorage) provider.search(arguments.DIRECTORY, arguments.FILENAME, arguments.recursive) return ""
def do_transfer(self, args, arguments): """ :: Usage: transfer copy --source=awss3:source_obj --target=azure:target_obj transfer list --target=awss3:target_obj transfer delete --target=awss3:target_obj This command is part of Cloudmesh's multi-cloud storage service. Command allows users to transfer files/directories from storage of one Cloud Service Provider (CSP) to storage of other CSP. Current implementation is to transfer data between Azure blob storage and AWS S3 bucket. AWS S3/ Azure Blob storage credentials and container details will be fetched from storage section of "~\.cloudmesh\cloudmesh.yaml" Arguments: awss3:source_obj Combination of cloud name and the source object name source_obj Source object. Can be file or a directory. azure:target_obj Combination of cloud name and the target object name target_obj Target object. Can be file or a directory. transfer_id A unique id/name assigned by cloudmesh to each transfer instance. Options: -h Help function. --source=awss3:source_obj Specify source cloud and source object. --target=azure:target_obj Specify target cloud and target object. -r Recursive transfer for folders. Description: transfer copy --source=<awss3:source_obj> . --target=<azure:target_obj> [-r] Copy file/folder from source to target. Source/target CSPs and name of the source/target objects to be provided. Optional argument "-r" indicates recursive copy. transfer list --target=awss3:target_obj Enlists available files on target CSP at target object transfer delete --target=awss3:target_obj Deletes target object from the target CSP. Examples: transfer copy --source=awss3:sampleFileS3.txt . --target=azure:sampleFileBlob.txt """ print("EXECUTING: ") map_parameters(arguments, "source", "target") VERBOSE(arguments) # Extract source and target details from the arguments if arguments.source: source_CSP, source_obj = arguments.source.split(':') else: source_CSP, source_obj = None, None if arguments.target: # print("************** ", arguments.target) target_CSP, target_obj = arguments.target.split(':') # print("************** ", target_CSP, target_obj ) else: target_CSP, target_obj = None, None banner(f'''Working on: source CSP = {source_CSP} source object = {source_obj} target CSP = {target_CSP} target object = {target_obj}''') # return if arguments.FILE: print("option a") elif arguments.list: banner(f"Executing List command for {target_CSP} provider on " f"{target_obj}.") provider = Provider(source=None, source_obj=None, target=target_CSP, target_obj=target_obj) provider.list(source=None, source_obj=None, target=target_CSP, target_obj=target_obj, recursive=True) elif arguments.delete: banner(f"Executing Delete command for {target_CSP} provider on " f"{target_obj}") provider = Provider(source=None, source_obj=None, target=target_CSP, target_obj=target_obj) provider.delete(source=None, source_obj=None, target=target_CSP, target_obj=target_obj, recursive=True) elif arguments.copy: # This flow is designed in such a way that copy command is run # in target CSP's provider banner(f"Executing Copy command from {source_CSP} to {target_CSP} " f"providers for {source_obj}") provider = Provider(source=source_CSP, source_obj=source_obj, target=target_CSP, target_obj=target_obj) provider.copy(source=source_CSP, source_obj=source_obj, target=target_CSP, target_obj=target_obj, recursive=True) else: Console.error("Invalid argument provided.") return ""
def do_provider(self, args, arguments): """ :: Usage: provider list [--output=OUTPUT] provider info SERVICE NAME WHAT Arguments: NAME The name of the key. Options: --output=OUTPUT the format of the output [default: table] Description: What: output, sample Examples: Getting the sample and output from provides via a command cms provider info compute openstack sample cms provider info compute openstack output cms provider list --output=json cms provider list q """ map_parameters(arguments, 'output') if arguments.info: try: service = arguments.SERVICE name = arguments.NAME what = arguments.WHAT services = find() for provider in services: try: if provider['service'] == service and provider[ 'name'] == name: if arguments.WHAT == 'sample': print( textwrap.dedent( provider["provider"].sample)) elif arguments.WHAT == 'output': print( json.dumps(provider["provider"].output, indent=4)) print() except Exception as e: print(e) except: Console.error("Problem getting the Provider info") return "" elif arguments.list: print(arguments.output) _paths = find() for entry in _paths: del entry["provider"] # can not be printed print( Printer.write(_paths, order=["service", "name", "active", "path"], output=arguments.output)) return ""
def do_source(self, args, arguments): """ :: Usage: source list source install [--protocol=PROTOCOL] This command does some useful things. Arguments: KEY a file name Options: -f specify the file --protocol=PROTOCOL [default: ssh] --output=OUTPUT [default: table] Description: you cen specify in your yaml file the location of where you manage your source code. cloudmesh: source: common: ~/Desktop/github/cloudmesh cmd5: ~/Desktop/github/cloudmesh openapi: ~/Desktop/github/cloudmesh sys: ~/Desktop/github/cloudmesh cm: ~/Desktop/github/cloudmesh-community Once you have this included and cms cm is installed, you can than for example do an update from source with cms source install This has the advantage that all cms directories pull the new code from git. It assumes that you have installed the source previously with pip install -e . in each of the directories """ # map_parameters(arguments, "source", "protocol") config = Config() m = Source(config, protocol=arguments.protocol) if arguments.list: print(Printer.attribute(config, output=arguments["output"])) elif arguments["install"]: m.install() elif arguments.clone: m.clone() elif arguments.update: m.update() return ""
def do_openapi(self, args, arguments): """ :: Usage: openapi generate [FUNCTION] --filename=FILENAME [--serverurl=SERVERURL] [--yamlfile=YAML] [--import_class] [--all_functions] [--enable_upload] [--verbose] [--basic_auth=CREDENTIALS] openapi server start YAML [NAME] [--directory=DIRECTORY] [--port=PORT] [--server=SERVER] [--host=HOST] [--verbose] [--debug] [--fg] [--os] openapi server stop NAME openapi server list [NAME] [--output=OUTPUT] openapi server ps [NAME] [--output=OUTPUT] openapi register add NAME ENDPOINT openapi register filename NAME openapi register delete NAME openapi register list [NAME] [--output=OUTPUT] openapi register protocol PROTOCOL openapi TODO merge [SERVICES...] [--dir=DIR] [--verbose] openapi TODO doc FILE --format=(txt|md)[--indent=INDENT] openapi TODO doc [SERVICES...] [--dir=DIR] openapi sklearn FUNCTION MODELTAG openapi sklearnreadfile FUNCTION MODELTAG openapi sklearn upload --filename=FILENAME Arguments: FUNCTION The name for the function or class MODELTAG The arbirtary name choosen by the user to store the Sklearn trained model as Pickle object FILENAME Path to python file containing the function or class SERVERURL OpenAPI server URL Default: https://localhost:8080/cloudmesh YAML Path to yaml file that will contain OpenAPI spec. Default: FILENAME with .py replaced by .yaml DIR The directory of the specifications FILE The specification Options: --import_class FUNCTION is a required class name instead of an optional function name --all_functions Generate OpenAPI spec for all functions in FILENAME --debug Use the server in debug mode --verbose Specifies to run in debug mode [default: False] --port=PORT The port for the server [default: 8080] --directory=DIRECTORY The directory in which the server is run --server=SERVER The server [default: flask] --output=OUTPUT The outputformat, table, csv, yaml, json [default: table] --srcdir=SRCDIR The directory of the specifications --destdir=DESTDIR The directory where the generated code is placed Description: This command does some useful things. openapi TODO doc FILE --format=(txt|md|rst) [--indent=INDENT] Sometimes it is useful to generate teh openaopi documentation in another format. We provide fucntionality to generate the documentation from the yaml file in a different formt. openapi TODO doc --format=(txt|md|rst) [SERVICES...] Creates a short documentation from services registered in the registry. openapi TODO merge [SERVICES...] [--dir=DIR] [--verbose] Merges tow service specifications into a single servoce TODO: do we have a prototype of this? openapi sklearn sklearn.linear_model.LogisticRegression Generates the .py file for the Model given for the generator openapi sklearnreadfile sklearn.linear_model.LogisticRegression Generates the .py file for the Model given for the generator which supports reading files openapi generate [FUNCTION] --filename=FILENAME [--serverurl=SERVERURL] [--yamlfile=YAML] [--import_class] [--all_functions] [--enable_upload] [--verbose] [--basic_auth=CREDENTIALS] Generates an OpenAPI specification for FUNCTION in FILENAME and writes the result to YAML. Use --import_class to import a class with its associated class methods, or use --all_functions to import all functions in FILENAME. These options ignore functions whose names start with '_'. Use --enable_upload to add file upload functionality to a copy of your python file and the resulting yaml file. For optional basic authorization, we support (temporarily) a single user credential. CREDENTIALS should be formatted as follows: user:password Example: --basic_auth=admin:secret openapi server start YAML [NAME] [--directory=DIRECTORY] [--port=PORT] [--server=SERVER] [--host=HOST] [--verbose] [--debug] [--fg] [--os] starts an openapi web service using YAML as a specification TODO: directory is hard coded as None, and in server.py it defaults to the directory where the yaml file lives. Can we just remove this argument? openapi server stop NAME stops the openapi service with the given name TODO: where does this command has to be started from openapi server list [NAME] [--output=OUTPUT] Provides a list of all OpenAPI services in the registry openapi server ps [NAME] [--output=OUTPUT] list the running openapi service openapi register add NAME ENDPOINT Openapi comes with a service registry in which we can register openapi services. openapi register filename NAME In case you have a yaml file the openapi service can also be registerd from a yaml file openapi register delete NAME Deletes the names service from the registry openapi register list [NAME] [--output=OUTPUT] Provides a list of all registerd OpenAPI services """ #print(arguments) map_parameters(arguments, 'fg', 'os', 'output', 'verbose', 'port', 'directory', 'yamlfile', 'serverurl', 'name', 'import_class', 'all_functions', 'enable_upload', 'host', 'basic_auth') arguments.debug = arguments.verbose #VERBOSE(arguments) if arguments.generate: if arguments.import_class and arguments.all_functions: Console.error('Cannot generate openapi with both --import_class and --all_functions') if arguments.import_class and not arguments.FUNCTION: Console.error('FUNCTION parameter (class name) is required when using --import_class') try: p = Parameter(arguments) p.Print() filename = p.filename # ./dir/myfile.py yamlfile = p.yamlfile # ./dir/myfile.yaml directory = p.yamldirectory # ./dir function = p.function # myfunction serverurl = p.serverurl # http://localhost:8080/cloudmesh/ module_name = p.module_name # myfile basic_auth = p.basic_auth # user:password # If statement here for mode with basic_auth enable_upload = arguments.enable_upload # append the upload function to the end of a copy of the file if not already done if enable_upload: uploadPython = textwrap.dedent(""" from cloudmesh.openapi.registry.fileoperation import FileOperation def upload() -> str: filename=FileOperation().file_upload() return filename #### upload functionality added """) upload_added = False for line in open(filename): if '#### upload functionality added' in line: upload_added = True if not upload_added: filename_upload = filename.replace('.py', '_upload-enabled.py') copyfile(filename, filename_upload) Console.info(f'copied {filename} to {filename_upload}') filename = filename_upload module_name = module_name + '_upload-enabled' with open(filename, 'a') as f: f.write('\n') f.write(uploadPython) Console.info(f'added upload functionality to {filename}') if basic_auth: user, password = basic_auth.split(':') BasicAuth.reset_users() BasicAuth.add_user(user, password) module_name, filename = BasicAuth.write_basic_auth( filename=filename, module_name=module_name) # Parameter() takes care of putting the filename in the path imported_module = import_module(module_name) dataclass_list = [] for attr_name in dir(imported_module): attr = getattr(imported_module, attr_name) if is_dataclass(attr): dataclass_list.append(attr) # not currently supporting multiple functions or all functions # could do comma-separated function/class names if enable_upload: upload_obj = getattr(imported_module, 'upload') setattr(sys.modules[module_name], 'upload', upload_obj) if arguments.import_class: class_obj = getattr(imported_module, function) # do we maybe need to do this here? # setattr(sys.modules[module_name], function, class_obj) class_description = class_obj.__doc__.strip().split("\n")[0] func_objects = {} for attr_name in dir(class_obj): attr = getattr(class_obj, attr_name) if isinstance(attr, types.MethodType) and attr_name[0] != '_': # are we sure this is right? # would probably create a valid openapi yaml, but not technically accurate # module.function may work but it should be module.Class.function setattr(sys.modules[module_name], attr_name, attr) func_objects[attr_name] = attr elif is_dataclass(attr): dataclass_list.append(attr) openAPI = generator.Generator() Console.info( 'Generating openapi for class: ' + class_obj.__name__) openAPI.generate_openapi_class(class_name=class_obj.__name__, class_description=class_description, filename=filename, func_objects=func_objects, serverurl=serverurl, outdir=directory, yamlfile=yamlfile, dataclass_list=dataclass_list, all_function=False, enable_upload=enable_upload, basic_auth_enabled=basic_auth, write=True) elif arguments.all_functions: func_objects = {} for attr_name in dir(imported_module): if type(getattr(imported_module, attr_name)).__name__ == 'function' and attr_name[0] != '_': func_obj = getattr(imported_module, attr_name) setattr(sys.modules[module_name], attr_name, func_obj) func_objects[attr_name] = func_obj openAPI = generator.Generator() Console.info( 'Generating openapi for all functions in file: ' + filename) openAPI.generate_openapi_class(class_name=module_name, class_description="No description provided", filename=filename, func_objects=func_objects, serverurl=serverurl, outdir=directory, yamlfile=yamlfile, dataclass_list=dataclass_list, all_function=True, enable_upload=enable_upload, basic_auth_enabled=basic_auth, write=True) else: func_obj = getattr(imported_module, function) setattr(sys.modules[module_name], function, func_obj) openAPI = generator.Generator() Console.info( 'Generating openapi for function: ' + func_obj.__name__) openAPI.generate_openapi(f=func_obj, filename=filename, serverurl=serverurl, outdir=directory, yamlfile=yamlfile, dataclass_list=dataclass_list, enable_upload=enable_upload, basic_auth_enabled=basic_auth, write=True) except Exception as e: Console.error("Failed to generate openapi yaml") print(e) elif arguments.server and arguments.start and arguments.os: try: s = Server( name=arguments.NAME, spec=path_expand(arguments.YAML), directory=path_expand( arguments.directory) or arguments.directory, port=arguments.port, server=arguments.wsgi, debug=arguments.debug ) pid = s.run_os() VERBOSE(arguments, label="Server parameters") print(f"Run PID: {pid}") except FileNotFoundError: Console.error("specification file not found") except Exception as e: print(e) elif arguments.server and arguments.list: try: result = Server.list(name=arguments.NAME) # BUG: order= nt yet defined print(Printer.list(result)) except ConnectionError: Console.error("Server not running") elif arguments.server and arguments.ps: try: print() Console.info("Running Cloudmesh OpenAPI Servers") print() result = Server.ps(name=arguments.NAME) print(Printer.list(result, order=["name", "pid", "spec"])) print() except ConnectionError: Console.error("Server not running") elif arguments.register and arguments.add: registry = Registry() result = registry.add(name=arguments.NAME, url=arguments.BASEURL, pid=arguments.PID) registry.Print(data=result, output=arguments.output) elif arguments.register and arguments.delete: registry = Registry() result = registry.delete(name=arguments.NAME) if result == 0: Console.error("Entry could not be found") elif result is not None: Console.ok("Ok. Entry deleted") else: Console.error("Could not delete entry") elif arguments.register and arguments.list: registry = Registry() result = registry.list(name=arguments.NAME) registry.Print(data=result, output=arguments.output) elif arguments.register and arguments.protocol: result = Registry.protocol(protocol=arguments.PROTOCOL) Console.ok(f"Using Registry Protocol: {result}") elif arguments.register and arguments['filename']: registry = Registry() result = [registry.add_form_file(arguments['filename'])] registry.Print(data=result, output=arguments.output) elif arguments.server and arguments.start: # VERBOSE(arguments) try: s = Server( name=arguments.NAME, spec=path_expand(arguments.YAML), directory=None, #directory=path_expand( # arguments.directory) or arguments.directory, port=arguments.port, host=arguments.host, server=arguments.wsgi, debug=arguments.debug) pid = s.start(name=arguments.NAME, spec=path_expand(arguments.YAML), foreground=arguments.fg) if pid is None: pass else: print(f"Run PID: {pid}") except FileNotFoundError: Console.error("specification file not found") except Exception as e: print(e) elif arguments.server and arguments.stop: try: print() Console.info("Stopping Cloudmesh OpenAPI Server") print() Server.stop(name=arguments.NAME) print() except ConnectionError: Console.error("Server not running") elif arguments.sklearn and not arguments.upload: try: Sklearngenerator(input_sklibrary=arguments.FUNCTION, model_tag=arguments.MODELTAG) except Exception as e: print(e) elif arguments.sklearnreadfile and not arguments.upload: try: SklearngeneratorFile(input_sklibrary=arguments.FUNCTION, model_tag=arguments.MODELTAG) except Exception as e: print(e) #TODO: implement this? elif arguments.sklearn and arguments.upload: try: openAPI = generator.Generator() openAPI.fileput() except Exception as e: print(e) '''