class Provider(VolumeABC): kind = "oracle" sample = """ cloudmesh: volume: {name}: cm: active: true heading: {name} host: TBD label: {name} kind: oracle version: TBD service: volume credentials: version: TBD user: TBD fingerprint: TBD key_file: oci_api_key.pem pass_phrase: TBD tenancy: TBD compartment_id: TBD region: TBD availability_domain: TBD default: """ output = { "volume": { "sort_keys": ["cm.name"], "order": ["cm.name", "cm.cloud", "cm.kind", "availability_domain", "time_created", "size_in_gbs", "lifecycle_state", "id" ], "header": ["Name", "Cloud", "Kind", "Availability Zone", "Created At", "Size(Gb)", "Status", "Id" ], } } def update_dict(self, results): """ This function adds a cloudmesh cm dict to each dict in the list elements. Libcloud returns an object or list of objects With the dict method this object is converted to a dict. Typically this method is used internally. :param results: the original dicts. :return: The list with the modified dicts """ if results is None: return None d = [] for entry in results: display_name = entry.__getattribute__("display_name") availability_domain = entry.__getattribute__("availability_domain") time_created = entry.__getattribute__("time_created") size_in_gbs = entry.__getattribute__("size_in_gbs") lifecycle_state = entry.__getattribute__("lifecycle_state") attribute_id = entry.__getattribute__("id") entry = { "availability_domain": availability_domain, "time_created": time_created, "size_in_gbs": size_in_gbs, "id": attribute_id, "lifecycle_state": lifecycle_state } if "cm" not in entry: entry['cm'] = {} entry["cm"].update({ "cloud": self.cloud, "kind": "volume", "name": display_name, }) d.append(entry) return d def __init__(self, name): """ Initialize provider. The default parameters are read from the configuration file that is defined in yaml format. :param name: name of cloud """ self.cloud = name self.config = Config()["cloudmesh.volume.oracle.credentials"] self.defaults = Config()["cloudmesh.volume.oracle.default"] self.cm = CmDatabase() def get_volume_id_from_name(self, block_storage, name): """ This function get volume id from volume name :param block_storage: Block storage client object :param name: volume name :return: volume id """ v = block_storage.list_volumes(self.config['compartment_id']) results = v.data volume_id = None for entry in results: display_name = entry.__getattribute__("display_name") if name == display_name: volume_id = entry.__getattribute__("id") break return volume_id def get_attachment_id_from_name(self, block_storage, name): """ This function get attachment id from volume name :param block_storage: Block storage client object :param name: Name of the volume :return: Volume attachment id """ v = block_storage.list_volumes(self.config['compartment_id']) results = v.data attachment_id = None for entry in results: display_name = entry.__getattribute__("display_name") if name == display_name: tags = entry.__getattribute__("freeform_tags") attachment_id = tags['attachment_id'] break return attachment_id def status(self, name): """ This function get volume status, such as "in-use", "available" :param name: Volume name :return: Volume_status """ try: block_storage = oci.core.BlockstorageClient(self.config) v = block_storage.list_volumes(self.config['compartment_id']) volumes = v.data result = [] entry = None for entry in volumes: display_name = entry.__getattribute__("display_name") if name == display_name: break result.append(entry) result = self.update_dict(result) except Exception as e: Console.error("Problem finding status", traceflag=True) print(e) raise RuntimeError return result def list(self, **kwargs): """ This function list all volumes as following: If NAME (volume_name) is specified, it will print out info of NAME If NAME (volume_name) is not specified, it will print out info of all volumes :param kwargs: contains name of volume :return: Dictionary of volumes """ try: if kwargs and kwargs['refresh'] is False: result = self.cm.find(cloud=self.cloud, kind='volume') for key in kwargs: if key == 'NAME' and kwargs['NAME']: result = self.cm.find_name(name=kwargs['NAME']) elif key == 'NAMES' and kwargs['NAMES']: result = self.cm.find_names(names=kwargs['NAMES']) else: block_storage = oci.core.BlockstorageClient(self.config) if kwargs and kwargs['NAME']: v = block_storage.list_volumes( self.config['compartment_id']) results = v.data entry = None for entry in results: display_name = entry.__getattribute__("display_name") if kwargs["NAME"] == display_name: break result = [entry] result = self.update_dict(result) else: v = block_storage.list_volumes( self.config['compartment_id']) results = v.data result = self.update_dict(results) except Exception as e: Console.error("Problem listing volume", traceflag=True) print(e) raise RuntimeError return result def create(self, **kwargs): """ This function creates a new volume with default size of 50gb. Default parameters are read from self.config. :param kwargs: Contains Volume name :return: Volume dictionary """ try: arguments = dotdict(kwargs) block_storage = oci.core.BlockstorageClient(self.config) result = block_storage.create_volume( oci.core.models.CreateVolumeDetails( compartment_id=self.config['compartment_id'], availability_domain=self.config['availability_domain'], display_name=arguments.NAME )) # wait for availability of volume oci.wait_until( block_storage, block_storage.get_volume(result.data.id), 'lifecycle_state', 'AVAILABLE' ).data v = block_storage.list_volumes(self.config['compartment_id']) results = v.data result = self.update_dict(results) except Exception as e: Console.error("Problem creating volume", traceflag=True) print(e) raise RuntimeError return result def attach(self, names=None, vm=None): """ This function attaches a given volume to a given instance :param names: Names of Volumes :param vm: Instance name :return: Dictionary of volumes """ try: compute_client = oci.core.ComputeClient(self.config) # get instance id from VM name i = compute_client.list_instances(self.config['compartment_id']) instances = i.data instance_id = None for entry in instances: display_name = entry.__getattribute__("display_name") if vm == display_name: instance_id = entry.__getattribute__("id") break # get volumeId from Volume name block_storage = oci.core.BlockstorageClient(self.config) volume_id = self.get_volume_id_from_name(block_storage, names[0]) # attach volume to vm a = compute_client.attach_volume( oci.core.models.AttachIScsiVolumeDetails( display_name='IscsiVolAttachment', instance_id=instance_id, volume_id=volume_id ) ) # tag volume with attachment id. This needed during detach. block_storage.update_volume( volume_id, oci.core.models.UpdateVolumeDetails( freeform_tags={'attachment_id': a.data.id}, )) # wait until attached oci.wait_until( compute_client, compute_client.get_volume_attachment( a.data.id), 'lifecycle_state', 'ATTACHED' ) # return result after attach v = block_storage.list_volumes(self.config['compartment_id']) results = v.data results = self.update_dict(results) except Exception as e: Console.error("Problem attaching volume", traceflag=True) print(e) raise RuntimeError return results def detach(self, name=None): """ This function detaches a given volume from an instance :param name: Volume name :return: Dictionary of volumes """ try: compute_client = oci.core.ComputeClient(self.config) block_storage = oci.core.BlockstorageClient(self.config) attachment_id = self.get_attachment_id_from_name(block_storage, name) compute_client.detach_volume(attachment_id) # wait for detachment oci.wait_until( compute_client, compute_client.get_volume_attachment(attachment_id), 'lifecycle_state', 'DETACHED' ) # return result after detach v = block_storage.list_volumes(self.config['compartment_id']) results = v.data results = self.update_dict(results) except Exception as e: Console.error("Problem detaching volume", traceflag=True) print(e) raise RuntimeError return results[0] def delete(self, name=None): """ This function delete one volume. :param name: Volume name :return: Dictionary of volumes """ try: block_storage = oci.core.BlockstorageClient(self.config) volume_id = self.get_volume_id_from_name(block_storage, name) if volume_id is not None: block_storage.delete_volume(volume_id=volume_id) # wait for termination oci.wait_until( block_storage, block_storage.get_volume(volume_id), 'lifecycle_state', 'TERMINATED' ).data v = block_storage.list_volumes(self.config['compartment_id']) results = v.data result = self.update_dict(results) except Exception as e: Console.error("Problem deleting volume", traceflag=True) print(e) raise RuntimeError return result def add_tag(self, **kwargs): """ This function add tag to a volume. :param kwargs: NAME: name of volume key: name of tag value: value of tag :return: Dictionary of volume """ try: name = kwargs['NAME'] key = kwargs['key'] value = kwargs['value'] block_storage = oci.core.BlockstorageClient(self.config) volume_id = self.get_volume_id_from_name(block_storage, name) block_storage.update_volume( volume_id, oci.core.models.UpdateVolumeDetails( freeform_tags={key: value}, ) ) result = self.list(NAME=name, refresh=True)[0] except Exception as e: Console.error("Problem adding tag", traceflag=True) print(e) raise RuntimeError return result def migrate(self, name=None, fvm=None, tvm=None, fregion=None, tregion=None, fservice=None, tservice=None, fcloud=None, tcloud=None, cloud=None, region=None, service=None): """ Migrate volume from one vm to another vm. :param name: name of volume :param fvm: name of vm where volume will be moved from :param tvm: name of vm where volume will be moved to :param fregion: the region where the volume will be moved from :param tregion: region where the volume will be moved to :param fservice: the service where the volume will be moved from :param tservice: the service where the volume will be moved to :param fcloud: the provider where the volume will be moved from :param tcloud: the provider where the volume will be moved to :param cloud: the provider where the volume will be moved within :param region: the region where the volume will be moved within :param service: the service where the volume will be moved within :return: dict """ raise NotImplementedError def sync(self, volume_id=None, zone=None, cloud=None): """ sync contents of one volume to another volume :param volume_id: id of volume A :param zone: zone where new volume will be created :param cloud: the provider where volumes will be hosted :return: str """ raise NotImplementedError
def __init__(self): print("init {name}".format(name=self.__class__.__name__)) self.cm = CmDatabase() self.col = self.cm.db['local-vdir'] self.directory = 'vdir'
def do_vm(self, args, arguments): """ :: Usage: vm ping [NAMES] [--cloud=CLOUDS] [--count=N] vm check [NAMES] [--cloud=CLOUDS] [--username=USERNAME] vm status [NAMES] [--cloud=CLOUDS] [--output=OUTPUT] vm console [NAME] [--force] vm log [NAME] [--force] vm stop [NAMES] [--dryrun] vm start [NAMES] [--dryrun] vm terminate [NAMES] [--cloud=CLOUD] [--dryrun] vm delete [NAMES] [--cloud=CLOUD] [--dryrun] vm list [NAMES] [--cloud=CLOUDS] [--output=OUTPUT] [--refresh] [--short] vm boot [--n=COUNT] [--name=NAMES] [--label=LABEL] [--cloud=CLOUD] [--username=USERNAME] [--image=IMAGE] [--flavor=FLAVOR] [--network=NETWORK] [--public] [--secgroup=SECGROUP] [--group=GROUP] [--key=KEY] [--dryrun] [-v] vm meta list [NAME] vm meta set [NAME] KEY=VALUE... vm meta delete [NAME] KEY... vm script [NAMES] [--username=USERNAME] [--key=KEY] [--dryrun] [--dir=DESTINATION] SCRIPT vm ip assign [NAMES] [--cloud=CLOUD] vm ip show [NAMES] [--group=GROUP] [--cloud=CLOUD] [--output=OUTPUT] [--refresh] vm ip inventory [NAMES] vm ssh [NAMES] [--username=USER] [--quiet] [--ip=IP] [--key=KEY] [--command=COMMAND] vm put SOURCE DESTINATION [NAMES] vm get SOURCE DESTINATION [NAMES] vm rename [OLDNAMES] [NEWNAMES] [--force] [--dryrun] vm wait [--cloud=CLOUD] [--interval=INTERVAL] [--timeout=TIMEOUT] vm info [NAMES] [--cloud=CLOUD] [--output=OUTPUT] [--dryrun] vm username USERNAME [NAMES] [--cloud=CLOUD] vm resize [NAMES] [--size=SIZE] Arguments: OUTPUT the output format COMMAND positional arguments, the commands you want to execute on the server(e.g. ls -a) separated by ';', you will get a return of executing result instead of login to the server, note that type in -- is suggested before you input the commands NAME server name. By default it is set to the name of last vm from database. NAMES server name. By default it is set to the name of last vm from database. KEYPAIR_NAME Name of the vm keypair to be used to create VM. Note this is not a path to key. NEWNAMES New names of the VM while renaming. OLDNAMES Old names of the VM while renaming. Options: -v verbose, prints the dict at the end --output=OUTPUT the output format -H --modify-knownhosts Do not modify ~/.ssh/known_hosts file when ssh'ing into a machine --username=USERNAME the username to login into the vm. If not specified it will be guessed from the image name and the cloud --ip=IP give the public ip of the server --cloud=CLOUD give a cloud to work on, if not given, selected or default cloud will be used --count=COUNT give the number of servers to start --detail for table, a brief version is used as default, use this flag to print detailed table --flavor=FLAVOR give the name or id of the flavor --group=GROUP give the group name of server --secgroup=SECGROUP security group name for the server --image=IMAGE give the name or id of the image --key=KEY specify a key to use, input a string which is the full path to the private key file --keypair_name=KEYPAIR_NAME Name of the vm keypair to be used to create VM. Note this is not a path to key. --user=USER give the user name of the server that you want to use to login --name=NAME give the name of the virtual machine --force rename/ delete vms without user's confirmation --command=COMMAND specify the commands to be executed Description: commands used to boot, start or delete servers of a cloud vm default [options...] Displays default parameters that are set for vm boot either on the default cloud or the specified cloud. vm boot [options...] Boots servers on a cloud, user may specify flavor, image .etc, otherwise default values will be used, see how to set default values of a cloud: cloud help vm start [options...] Starts a suspended or stopped vm instance. vm stop [options...] Stops a vm instance . vm delete [options...] Delete servers of a cloud, user may delete a server by its name or id, delete servers of a group or servers of a cloud, give prefix and/or range to find servers by their names. Or user may specify more options to narrow the search vm floating_ip_assign [options...] assign a public ip to a VM of a cloud vm ip show [options...] show the ips of VMs vm ssh [options...] login to a server or execute commands on it vm list [options...] same as command "list vm", please refer to it vm status [options...] Retrieves status of last VM booted on cloud and displays it. vm refresh [--cloud=CLOUDS] this command refreshes the data for virtual machines, images and flavors for the specified clouds. vm ping [NAMES] [--cloud=CLOUDS] [--count=N] [--processors=PROCESSORS] pings the specified virtual machines, while using at most N pings. The ping is executed in parallel. If names are specifies the ping is restricted to the given names in parameter format. If clouds are specified, names that are not in these clouds are ignored. If the name is set in the variables this name is used. cms vm ssh --command=\"uname -a\" executes the uname command on the last booted vm vm script [--name=NAMES] [--username=USERNAME] [--key=KEY] [--dryrun] [--dir=DESTINATION] [--shell=SHELL] SCRIPT The script command copies a shell script to the specified vms into the DESTINATION directory and than execute it. With SHELL you can set the shell for executing the command, this coudl even be a python interpreter. Examples for SHELL are /bin/sh, /usr/bin/env python vm put SOURCE DESTINATION [NAMES] puts the file defined by SOURCE into the DESINATION folder on the specified machines. If the file exists it is overwritten, so be careful. vm get SOURCE DESTINATION [NAMES] gets the file defined by SOURCE into the DESINATION folder on the specified machines. The SOURCE is on the remote machine. If one machine is specified, the SOURCE is the same name as on the remote machine. If multiple machines are specified, the name of the machine will be a prefix to the filename. If the filenames exists, they will be overwritten, so be careful. Tip: give the VM name, but in a hostlist style, which is very convenient when you need a range of VMs e.g. sample[1-3] => ['sample1', 'sample2', 'sample3'] sample[1-3,18] => ['sample1', 'sample2', 'sample3', 'sample18'] Quoting commands: cm vm login gregor-004 --command=\"uname -a\" Limitations: Azure: rename is not supported """ map_parameters(arguments, 'active', 'cloud', 'label', 'command', 'dryrun', 'flavor', 'force', 'group' 'output', 'group', 'image', 'interval', 'timeout', 'key', 'modify-knownhosts', 'n', 'name', 'public', 'quiet', 'secgroup', 'size', 'output', 'count', 'network', 'refresh') variables = Variables() database = CmDatabase() arguments.output = Parameter.find("output", arguments, variables, "table") arguments.refresh = Parameter.find_bool("refresh", arguments, variables) if arguments.meta: name = arguments.NAME if arguments.NAME is None: name = variables['vm'] if name is None: Console.error("No vm specified") clouds = Arguments.get_clouds(arguments, variables) cloud = clouds[0] or "chameleon" provider = Provider(name=cloud) if arguments.list: print(f"List metadata for {name} on {cloud}") r = provider.get_server_metadata(name) elif arguments.set: print(f"Set metadata for {name} on {cloud}") metadata = {} pairs = arguments['KEY=VALUE'] for pair in pairs: key, value = pair.split("=", 1) ##cm cannot be updated using vm meta set if key == 'cm': Console.warning( "Update of cm metadata is not allowed.") else: metadata[key] = value if len(metadata) < 1: Console.info("No metadata to update.") return provider = Provider(name=cloud) provider.set_server_metadata(name, **metadata) r = provider.get_server_metadata(name) elif arguments.delete: print(f"Delete metadata for {name} on {cloud}") metadata = {} keys = arguments['KEY'] for key in keys: ##cm cannot be delete using vm meta set if key == 'cm': Console.warning( "Deleting of cm metadata is not allowed.") else: provider.delete_server_metadata(name, key) r = provider.get_server_metadata(name) if r: provider.Print(r, output=arguments.output, kind="metadata") elif arguments.list and arguments.refresh: names = [] clouds, names = Arguments.get_cloud_and_names( "list", arguments, variables) for cloud in clouds: print(f"cloud {cloud}") provider = Provider(name=cloud) vms = provider.list() provider.Print(vms, output=arguments.output, kind="vm") return "" elif arguments.list: names = [] clouds, names = Arguments.get_cloud_and_names( "list", arguments, variables) try: if clouds is None: Console.error("No cloud specified") return "" for cloud in clouds: print(f"List {cloud}") p = Provider(cloud) kind = p.kind collection = "{cloud}-vm".format(cloud=cloud, kind=p.kind) db = CmDatabase() vms = db.find(collection=collection) p.Print(vms, output=arguments.output, kind="vm") except Exception as e: Console.error("Error in listing ", traceflag=True) VERBOSE(e) return "" elif arguments.ping: """ vm ping [NAMES] [--cloud=CLOUDS] [--count=N] """ if arguments.NAMES: variables['vm'] = arguments.NAMES if arguments['--cloud']: variables['cloud'] = arguments['--cloud'] clouds, names = Arguments.get_cloud_and_names( "status", arguments, variables) count = arguments.count if arguments.count: count = int(count) else: count = 1 def get_ips(): ips = [] for cloud in clouds: params = {} # gets public ips from database cursor = database.db[f'{cloud}-vm'] for name in names: for node in cursor.find({'name': name}): ips.append(node['ip_public']) ips = list(set(ips)) pprint(ips) return ips ips = get_ips() if len(ips) == 0: Console.warning("no public ip found.") for cloud in clouds: print(f"refresh for cloud {cloud}") provider = Provider(name=cloud) vms = provider.list() ips = get_ips() if len(ips) == 0: Console.error("No vms with public IPS found.") Console.error(" Make sure to use cms vm list --refresh") for ip in ips: result = Shell.ping(host=ip, count=count) banner(ip) print(result) print() elif arguments.check: raise NotImplementedError """ vm check [NAMES] [--cloud=CLOUDS] [--username=USERNAME] """ """ THIS IS ALL WRONG AS PROVIDER DEPENDENT !!! if arguments.NAMES: variables['vm'] = arguments.NAMES if arguments['--cloud']: variables['cloud'] = arguments['--cloud'] clouds, names = Arguments.get_cloud_and_names("status", arguments, variables) for cloud in clouds: provider = Provider(cloud) params = {} params['key'] = \ provider.p.spec["credentials"]['EC2_PRIVATE_KEY_FILE_PATH'] + \ provider.p.spec["credentials"]['EC2_PRIVATE_KEY_FILE_NAME'] params['username'] = arguments['--username'] # or get from db processors = arguments['--processors'] if processors: params['processors'] = int(processors[0]) # gets public ips from database public_ips = [] cursor = database.db['{cloud}-vm'] for name in names: for node in cursor.find({'name': name}): public_ips.append(node['public_ips']) public_ips = [y for x in public_ips for y in x] Host.check(hosts=public_ips, **params) """ elif arguments.status: if arguments.NAMES: variables['vm'] = arguments.NAMES if arguments['--cloud']: variables['cloud'] = arguments['--cloud'] clouds, names = Arguments.get_cloud_and_names( "status", arguments, variables) # gets status from database for cloud in clouds: provider = Provider(cloud) status = [] cursor = database.db[f'{cloud}-vm'] print(cloud) for name in names: for node in cursor.find({'name': name}): status.append(node) provider.Print(status, output=arguments.output, kind="status") return "" elif arguments.start: # TODO: not tested if arguments.NAMES: names = variables['vm'] = arguments.NAMES if arguments['--cloud']: variables['cloud'] = arguments['--cloud'] clouds, names = Arguments.get_cloud_and_names( "stop", arguments, variables) cloud = clouds[0] print(cloud) print(names) for name in names: provider = Provider(cloud) if arguments['--dryrun']: print(f"start node {name}") else: vms = provider.start(name=name, cloud=cloud) provider.Print(vms, output=arguments.output, kind="vm") return "" elif arguments.stop: # TODO: not tested if arguments.NAMES: variables['vm'] = arguments.NAMES if arguments['--cloud']: variables['cloud'] = arguments['--cloud'] clouds, names = Arguments.get_cloud_and_names( "stop", arguments, variables) for cloud in clouds: params = {} provider = Provider(cloud) if arguments['--dryrun']: Console.ok(f"Dryrun stop: " f" {cloud}\n" f" {names}" f" {provider}") else: for name in names: vms = provider.stop(name) provider.Print(vms, output=arguments.output, kind="vm") elif arguments.terminate: # TODO: not tested if arguments.NAMES: variables['vm'] = arguments.NAMES if arguments['--cloud']: variables['cloud'] = arguments['--cloud'] clouds, names = Arguments.get_cloud_and_names( "stop", arguments, variables) for cloud in clouds: params = {} provider = Provider(cloud) if arguments['--dryrun']: Console.ok(f"Dryrun terminate: " f" {cloud}\n" f" {names}" f" {provider}") else: for name in names: vms = provider.destroy(name) provider.Print(vms, output=arguments.output, kind="vm") elif arguments.delete: if arguments.NAMES: variables['vm'] = arguments.NAMES if arguments['--cloud']: variables['cloud'] = arguments['--cloud'] clouds, names = Arguments.get_cloud_and_names( "stop", arguments, variables) if names is not None: pass elif clouds is not None: for cloud in clouds: provider = Provider(cloud) vms = provider.list() for vm in vms: r = provider.destroy(name=vm) return "" else: return "" for cloud in clouds: provider = Provider(cloud) vms = provider.list() for vm in vms: name = vm["cm"]["name"] if name in names: r = provider.destroy(name=name) # TODO: username, secgroup elif arguments.boot: # not everything works """ vm boot [--n=COUNT] [--name=NAMES] [--label=LABEL] [--cloud=CLOUD] [--username=USERNAME] [--image=IMAGE] [--flavor=FLAVOR] [--network=NETWORK] [--public] [--secgroup=SECGROUP] [--group=GROUP] [--key=KEY] [--dryrun] [-v] """ # for name in names: # node = p.create(name=name, size=flavor, image=image) # VERBOSE(arguments) parameters = dotdict() names = Parameter.expand(arguments.name) cloud = Parameter.find("cloud", arguments, variables.dict()) defaults = Config()[f"cloudmesh.cloud.{cloud}.default"] groups = Parameter.find("group", arguments, variables.dict(), {"group": "default"}) parameters = dotdict() # parameters.names = arguments.name parameters.group = groups for attribute in [ "image", "username", "flavor", "key", "network", "secgroup" ]: if attribute is not None: parameters[attribute] = Parameter.find( attribute, arguments, variables.dict(), defaults) if arguments["--username"] is None: parameters.user = Image.guess_username(parameters.image) provider = Provider(name=cloud) parameters.secgroup = arguments.secgroup or "default" # # determine names # if names and arguments.n and len(names) > 1: Console.error( f"When using --n={arguments.n}, you can only specify one name" ) return "" # cases # # only name --name = "a[1,2]" # name and count # --name="a" --n=3, names must be of length 1 # only count --n=2 names are read form var # nothing, just use one vm # determin names _names = [] if not names: if not arguments.n: count = 1 else: count = int(arguments.n) for i in range(0, count): if names is None: n = Name() n.incr() name = str(n) else: n = names[i] name = str(n) _names.append(name) names = _names elif len(names) == 1 and arguments.n: name = names[0] for i in range(0, int(arguments.n)): _names.append(f"{name}-{i}") names = _names # pprint(parameters) for name in names: parameters.name = name label = arguments.get("label") or arguments.name parameters["label"] = label if arguments['--dryrun']: banner("boot") pprint(parameters) Console.ok(f"Dryrun boot {name}: \n" f" label={label}\n" f" cloud={cloud}\n" f" names={names}\n" f" provider={provider}") print() for attribute in parameters: value = parameters[attribute] Console.ok(f" {attribute}={value}") else: # parameters.progress = len(parameters.names) < 2 try: vms = provider.create(**parameters) except TimeoutError: Console.error( f"Timeout during vm creation. There may be a problem with the cloud {cloud}" ) except Exception as e: Console.error("create problem", traceflag=True) print(e) return "" variables['vm'] = str(name) if arguments["-v"]: banner("Details") pprint(vms) # provider.Print(arguments.output, "vm", vms) elif arguments.info: """ vm info [NAMES] [--cloud=CLOUD] [--output=OUTPUT] [--dryrun] """ cloud, names = Arguments.get_cloud_and_names( "info", arguments, variables) cloud_kind = cloud[0] for name in names: #Get Cloud Provider. provider = Provider(cloud_kind) if arguments['--dryrun']: print(f"info node {name}") else: vms = provider.info(name=name) provider.Print(vms, output=arguments.output, kind="vm") return "" elif arguments.rename: raise NotImplementedError # Not tested print("rename the vm") v = Variables() cloud = v["cloud"] p = Provider(cloud) try: oldnames = Parameter.expand(arguments["OLDNAMES"]) newnames = Parameter.expand(arguments["NEWNAMES"]) force = arguments["--force"] if oldnames is None or newnames is None: Console.error("Wrong VMs specified for rename", traceflag=False) elif len(oldnames) != len(newnames): Console.error("The number of VMs to be renamed is wrong", traceflag=False) else: print(oldnames) print(newnames) for i in range(0, len(oldnames)): oldname = oldnames[i] newname = newnames[i] if arguments["--dryrun"]: Console.ok("Rename {} to {}".format( oldname, newname)) else: print(f"rename {oldname} -> {newname}") p.rename(source=oldname, destination=newname) msg = "info. OK." Console.ok(msg) except Exception as e: Error.traceback(e) Console.error("Problem renaming instances", traceflag=True) elif arguments.ip and arguments["show"]: raise NotImplementedError print("show the ips") """ vm ip show [NAMES] [--group=GROUP] [--cloud=CLOUD] [--output=OUTPUT] [--refresh] """ elif arguments.ip and arguments["assign"]: raise NotImplementedError """ vm ip assign [NAMES] [--cloud=CLOUD] """ print("assign the public ip") elif arguments.ip and arguments["inventory"]: raise NotImplementedError """ vm ip inventory [NAMES] """ print("list ips that could be assigned") elif arguments.default: raise NotImplementedError print("sets defaults for the vm") elif arguments.script: raise NotImplementedError clouds, names = Arguments.get_cloud_and_names( "run", arguments, variables) username = arguments['--username'] script = arguments.SCRIPT for cloud in clouds: provider = Provider(cloud) name_ips = {} cursor = database.db['{}-node'.format(cloud)] for name in names: for node in cursor.find({'name': name}): name_ips[name] = node['public_ips'] if arguments['--dryrun']: print("run script {} on vms: {}".format(script, names)) else: provider.ssh(name_ips, username=username, script=script) elif arguments.username: raise NotImplementedError """ vm username USERNAME [NAMES] [--cloud=CLOUD] """ print("sets the username for the vm") elif arguments.resize: raise NotImplementedError """ vm resize [NAMES] [--size=SIZE] """ pass elif arguments.ssh: """ vm ssh [NAMES] [--username=USER] [--quiet] [--ip=IP] [--key=KEY] [--command=COMMAND] """ # VERBOSE(arguments) clouds, names, command = Arguments.get_commands( "ssh", arguments, variables) # print (clouds) # print(names) # print (command) if arguments.command is None and len(names) > 1: Console.error("Interactive shell can only be done on one vm") return "" elif arguments.command is None and len(names) == 1: name = names[0] cloud = clouds[0] cm = CmDatabase() try: vm = cm.find_name(name, "vm")[0] except IndexError: Console.error(f"could not find vm {name}") return "" # VERBOSE(vm) cloud = vm["cm"]["cloud"] provider = Provider(name=cloud) try: provider.ssh(vm=vm) except KeyError: vms = provider.list() provider.Print(vms, output=arguments.output, kind="vm") provider.ssh(vm=vm) return "" else: # command on all vms if clouds is None or names is None or command is None: return "" else: for cloud in clouds: p = Provider(cloud) for name in names: cm = CmDatabase() try: vm = cm.find_name(name, "vm")[0] except IndexError: Console.error(f"could not find vm {name}") continue r = p.ssh(vm=vm, command=command) print(r) return "" elif arguments.console: # why is this not vm clouds, names, command = Arguments.get_commands( "ssh", arguments, variables) print(clouds) print(names) print(command) for cloud in clouds: p = Provider(cloud) for name in names: cm = CmDatabase() try: vm = cm.find_name(name, "vm")[0] except IndexError: Console.error(f"could not find vm {name}") continue r = p.console(vm=vm) print(r) return "" elif arguments.log: # why is this not vm clouds, names, command = Arguments.get_commands( "ssh", arguments, variables) print(clouds) print(names) print(command) for cloud in clouds: p = Provider(cloud) for name in names: cm = CmDatabase() try: vm = cm.find_name(name, "vm")[0] except IndexError: Console.error(f"could not find vm {name}") continue r = p.log(vm=vm) print(r) return "" elif arguments.wait: """ vm wait [--cloud=CLOUD] [--interval=INTERVAL] [--timeout=TIMEOUT] """ # why is this not vm clouds, names, command = Arguments.get_commands( "ssh", arguments, variables) # print (clouds) # print (names) # print (command) for cloud in clouds: p = Provider(cloud) for name in names: cm = CmDatabase() try: vm = cm.find_name(name, "vm")[0] except IndexError: Console.error(f"could not find vm {name}") continue r = p.wait(vm=vm, interval=arguments.interval, timeout=arguments.timeout) if r: Console.ok("Instance available for SSH") else: Console.error( f"Instance unavailable after timeout of {arguments.timeout}" ) # print(r) return "" elif arguments.put: """ vm put SOURCE DESTINATION """ clouds, names, command = Arguments.get_commands( "ssh", arguments, variables) key = variables['key'] source = arguments['SOURCE'] destination = arguments['DESTINATION'] for cloud in clouds: p = Provider(name=cloud) cm = CmDatabase() for name in names: try: vms = cm.find_name(name, "vm") except IndexError: Console.error(f"could not find vm {name}") return "" # VERBOSE(vm) for vm in vms: try: ip = vm['public_ips'] except: try: ip = p.get_public_ip(name=name) except: Console.error( f"could not find a public ip for vm {name}", traceflag=True) return Console.error( f"could not find a public ip for vm {name}", traceflag=True) return # get the username try: # username not in vm...guessing imagename = list( cm.collection(cloud + '-image').find( {'ImageId': vm['ImageId']}))[0]['name'] print(imagename) user = Image.guess_username(image=imagename, cloud=cloud) except: try: user = vm['os_profile']['admin_username'] except: Console.error( f"could not find a valid username for " f"{name}, try refreshing the image list", traceflag=True) return Console.error( f"could not find a valid username for {name}, try refreshing the image list" ) return cmd = f'scp -i {key} {source} {user}@{ip}:{destination}' print(cmd) os.system(cmd) return ""
def do_image(self, args, arguments): """ :: Usage: image list [NAMES] [--cloud=CLOUD] [--refresh] [--output=OUTPUT] [--query=QUERY] Options: --output=OUTPUT the output format [default: table] --cloud=CLOUD the cloud name --refresh live data taken from the cloud Description: image list image list --cloud=aws --refresh image list --output=csv image list 58c9552c-8d93-42c0-9dea-5f48d90a3188 --refresh """ map_parameters(arguments, "query", "refresh", "cloud", "output") variables = Variables() arguments.output = Parameter.find("output", arguments, variables, "table") arguments.refresh = Parameter.find_bool("refresh", arguments, variables) if arguments.list and arguments["--query"]: names = [] clouds, names = Arguments.get_cloud_and_names( "list", arguments, variables) for cloud in clouds: print(f"cloud {cloud} query={arguments.query}") provider = Provider(name=cloud) if arguments.query is not None: query = eval(arguments.query) images = provider.images(**query) else: images = provider.images() provider.Print(images, output=arguments.output, kind="image") return "" if arguments.list and arguments.refresh: names = [] clouds, names = Arguments.get_cloud_and_names( "list", arguments, variables) for cloud in clouds: print(f"cloud {cloud}") provider = Provider(name=cloud) images = provider.images() provider.Print(images, output=arguments.output, kind="image") return "" elif arguments.list: clouds, names = Arguments.get_cloud_and_names( "list", arguments, variables) print(clouds) print("find images") try: for cloud in clouds: print(f"List {cloud} images") provider = Provider(name=cloud) db = CmDatabase() images = db.find(collection=f"{cloud}-image") provider.Print(images, output=arguments.output, kind="image") except Exception as e: VERBOSE(e) return ""
class Provider(VolumeABC): kind = "volume" sample = """ cloudmesh: volume: {name}: cm: active: '1' heading: multipass host: TBD kind: multipass version: TBD service: volume default: path: /Volumes/multipass """ output = { "volume": { "sort_keys": ["cm.name"], "order": [ "cm.name", "cm.cloud", "cm.kind", "State", "path", 'machine_path', "AttachedToVm", "tags", "time" ], "header": [ "Name", "Cloud", "Kind", "State", "Path", 'Machine Path', "AttachedToVm", "Tags", "Update Time" ] } } def generate_volume_info(self, NAME, path): """ generate volume info dict. info['AttachedToVm'] is a list of vm names where the volume is attached to. (volume can attach to multiple vm and vm can have multiple attachments) info['machine_path'] is the volume path in vm info['time"] is the created time, will be updated as updated time :param NAME: volume name :param path: volume path :return: dict """ info = { 'tags': [], 'name': NAME, 'path': path, 'AttachedToVm': [], 'State': 'available', 'machine_path': None, 'time': datetime.datetime.now() } return info def update_volume_after_attached_to_vm(self, info, vms): """ Update volume info after attached to a vm. info['AttachedToVm'] is a list of vm names where the volume is attached to. info['machine_path'] is the volume path in vm info['time"] is the updated as updated time :param info: volume info got from MongoDB database :param vms: attached to vms :return: list of one dict """ path = info[0]['path'] path_list = path.split(sep='/') machine_path_list = ["~", "Home"] machine_path_list.extend(path_list[3:]) info[0]['machine_path'] = "/".join(machine_path_list) info[0]['AttachedToVm'] = vms info[0]['State'] = 'in-use' info[0]['time'] = datetime.datetime.now() return info def update_volume_after_detach(self, info, vms): """ update volume info after detaching from a vm info['AttachedToVm'] is a list of vm names where the volume is attached to. info['time"] is the updated time :param info: volume info :param vms: attached to vms :return: list of one dict """ info[0]['AttachedToVm'] = vms if len(vms) == 0: info[0]['machine_path'] = None info[0]['State'] = 'available' info[0]['time'] = datetime.datetime.now() return info def update_volume_tag(self, info, key, value): """ Update volume tag. Tags is a key-value pair, with key as tag name and value as tag value, tag = {key: value}. A volume can have multipale tags. If given duplicated tag name, update the value to the current tag value. :param value: value :param key: key :param info: volume info :param vms: attached to vms :return: list of one dict """ keys = [] for tag in info[0]['tags']: if key == list(tag.keys())[0]: if len(value) == 0: info[0]['tags'].remove(tag) keys.append(list(tag.keys())[0]) else: tag.update({key: value}) keys.append(list(tag.keys())[0]) if key not in keys: tag = {key: value} info[0]['tags'].append(tag) info[0]['time'] = datetime.datetime.now() return info def __init__(self, name): """ Initialize provider. set cloudtype to "multipass", get the default dict, create a cloudmesh database object. :param name: name of cloud """ self.cloud = name self.cloudtype = "multipass" config = Config() self.default = config[f"cloudmesh.volume.{self.cloud}.default"] self.cm = CmDatabase() def update_dict(self, elements, kind=None): """ converts the dict into a list. :param elements: the list of original dicts. If elements is a single dict a list with a single element is returned. :param kind: "multipass" :return: The list with the modified dicts """ if elements is None: return None d = [] for element in elements: if "cm" not in element.keys(): element['cm'] = {} element["cm"].update({ "kind": "volume", "cloud": self.cloud, "name": element['name'], }) d.append(element) return d def create(self, **kwargs): """ This function create a new volume. Default parameters from self.default, such as: path="/Users/username/multipass". Note: Windows users should also use "/" in file path. :param NAME (string): the name of volume :param path (string): path of volume :return: dict """ for key in self.default.keys(): if key not in kwargs.keys(): kwargs[key] = self.default[key] elif kwargs[key] is None: kwargs[key] = self.default[key] name = kwargs['NAME'] path = Path(kwargs['path']) new_path = Path(f'{path}/{name}') result = os.system(f"mkdir {new_path}") if result == 0: result = self.generate_volume_info(NAME=name, path=kwargs['path']) result = self.update_dict([result]) return result def delete(self, name): """ Delete volumes. If name is not given, delete the most recent volume. :param name: volume name :return: """ result = self.cm.find_name(name) path = result[0]['path'] delete_path = Path(f'{path}/{name}') try: os.system(f"rmdir {delete_path}") result[0]['State'] = 'deleted' result = self.update_dict(result) except: Console.error("volume is either not empty or not exist") return result def list(self, **kwargs): """ This function list all volumes as following: If NAME (volume name) is specified, it will print out info of NAME. If NAME (volume name) is not specified, it will print out info of all volumes under current cloud. If vm is specified, it will print out all the volumes attached to vm. If region(path) is specified, it will print out all the volumes in that region. i.e. /Users/username/multipass :param NAME: name of volume :param vm: name of vm :param region: for multipass, it is the same with "path" :return: dict """ if kwargs: result = self.cm.find(cloud='multipass', kind='volume') for key in kwargs: if key == 'NAME' and kwargs['NAME']: result = self.cm.find_name(name=kwargs['NAME']) elif key == 'NAMES' and kwargs['NAMES']: result = self.cm.find_names(names=kwargs['NAMES']) elif key == 'vm' and kwargs['vm']: result = self.cm.find(collection=f"{self.cloud}-volume", query={'AttachedToVm': kwargs['vm']}) elif key == 'region' and kwargs['region']: result = self.cm.find(collection=f"{self.cloud}-volume", query={'path': kwargs['region']}) else: result = self.cm.find(cloud='multipass', kind='volume') return result def _get_vm_status(self, name=None) -> dict: """ Get vm status. :param name (string): vm name :return: dict """ dict_result = {} result = Shell.run(f"multipass info {name} --format=json") if f'instance "{name}" does not exist' in result: dict_result = {'name': name, 'status': "instance does not exist"} else: result = json.loads(result) dict_result = { 'name': name, 'status': result["info"][name]['State'] } return dict_result def attach(self, names, vm): """ This function attach one or more volumes to vm. It returns info of updated volume. The updated dict with "AttachedToVm" showing the name of vm where the volume attached to. :param names (string): names of volumes :param vm (string): name of vm :return: dict """ results = [] for name in names: volume_info = self.cm.find_name(name) if volume_info and volume_info[0]['State'] != "deleted": vms = volume_info[0]['AttachedToVm'] path = volume_info[0]['path'] if vm in vms: Console.error(f"{name} already attached to {vm}") else: result = self.mount(path=f"{path}/{name}", vm=vm) mounts = result['mounts'] if f"{path}/{name}" in mounts.keys(): vms.append(vm) result = self.update_volume_after_attached_to_vm( info=volume_info, vms=vms) results.append(result) else: Console.error( "volume is not existed or volume had been deleted") return results[0] def mount(self, path=None, vm=None): """ mount volume to vm :param path (string): path of volume :param vm (string): name of vm :return: dict """ os.system(f"multipass mount {path} {vm}") dict_result = self._get_mount_status(vm=vm) return dict_result def _get_mount_status(self, vm=None): """ Get mount status of vm :param vm (string): name of vm :return: """ result = Shell.run(f"multipass info {vm} --format=json") if f'instance "{vm}" does not exist' in result: dict_result = {'name': vm, 'status': "instance does not exist"} else: result = json.loads(result) dict_result = { 'name': vm, 'status': result["info"][vm]['state'], 'mounts': result["info"][vm]['mounts'] } return dict_result def unmount(self, path=None, vm=None): """ Unmount volume from vm :param path (string): path of volume :param vm (string): name of vm :return: """ os.system(f"multipass unmount {vm}:{path}") dict_result = self._get_mount_status(vm=vm) return dict_result def detach(self, name): """ This function detach a volume from vm. It returns the info of the updated volume. The vm under "AttachedToVm" will be removed if volume is successfully detached. Will detach volume from all vms. :param name: name of volume to be detached :return: dict """ volume_info = self.cm.find_name(name) if volume_info and volume_info[0]['State'] != "deleted": vms = volume_info[0]['AttachedToVm'] path = volume_info[0]['path'] if len(vms) == 0: Console.error(f"{name} is not attached to any vm") else: removed = [] for vm in vms: result = self.unmount(path=f"{path}/{name}", vm=vm) mounts = result['mounts'] if f"{path}/{name}" not in mounts.keys(): removed.append(vm) for vm in removed: vms.remove(vm) result = self.update_volume_after_detach(volume_info, vms) return result[0] else: Console.error("volume does not exist or volume had been deleted") def add_tag(self, **kwargs): """ This function add tag to a volume. If volume name is not specified, then tag will be added to the last volume. :param NAME: name of volume :param key: name of tag :param value: value of tag :return: dict """ key = kwargs['key'] value = kwargs['value'] volume_info = self.cm.find_name(name=kwargs['NAME']) volume_info = self.update_volume_tag(info=volume_info, key=key, value=value) return volume_info[0] def status(self, name=None): """ This function get volume status, such as "in-use", "available", "deleted" :param name: volume name :return: dict """ volume_info = self.cm.find_name(name) if volume_info: status = volume_info[0]['State'] else: Console.error("volume is not existed") return volume_info def migrate(self, **kwargs): """ Migrate volume from one vm to another vm. "region" is volume path. If vm and volume are in the same region (path), migrate within the same region (path). If vm and volume are in different regions, migrate between two regions (path) :param NAME (string): the volume name :param vm (string): the vm name :return: dict """ volume_name = kwargs['NAME'] vm = kwargs['vm'] volume_info = self.cm.find_name(name=volume_name) volume_attached_vm = volume_info[0]['AttachedToVm'] vm_info = Shell.run(f"multipass info {vm} --format=json") vm_info = json.loads(vm_info) vm_status = vm_info["info"][vm]['state'] if vm_status == 'running': param = {'NAME': volume_name} self.detach(**param) self.attach(**param, vm=vm) try: for old_vm in volume_attached_vm: volume_info[0]['AttachedToVm'].remove(old_vm) except: pass volume_info[0]['AttachedToVm'].append(vm) return volume_info def sync(self, **kwargs): """ sync contents of one volume to another volume :param names (list): list of volume names :return: list of dict """ volume_1 = kwargs['NAMES'][0] volume_2 = kwargs['NAMES'][1] path1 = f"{self.cm.find_name(name=volume_1)[0]['path']}/{volume_1}/" path2 = f"{self.cm.find_name(name=volume_2)[0]['path']}/{volume_2}/" os.system(f"rsync -avzh {path2} {path1}") kwargs1 = {'NAME': volume_1, 'key': "sync_with", 'value': volume_2} volume_info1 = self.add_tag(**kwargs1) result = [volume_info1] return result
def __init__(self, cloud="local", kind="keygroup"): self.kind = kind self.cloud = cloud self.cm = CmDatabase() self.collection = f"{self.cloud}-{kind}"
def ssh(self, vm=None, command=None): def key_selector(keys): ''' This is a helper method for ssh key selection THIS IS JUST A SAFETY MEASURE, PLEASE DON'T MIND IT :param keys: :return: ''' tmp_keys = keys[:] # indices = range(1,len(tmp_keys)+1) for key_idx, key in enumerate(keys): key['idx'] = key_idx + 1 print( Printer.flatwrite( tmp_keys, sort_keys=["idx"], order=['idx', 'KeyName', 'KeyFingerprint'], header=['Index', 'Key Name', "Key Fingerprint"], output="table", humanize=None)) # Console.msg("Please select one of the AWS key indices from the table above: ") picked = 0 while picked < 1 or picked > len(keys): try: picked = int( input( "Please select one of the AWS key indices from the table above: " )) except ValueError: pass return keys[picked - 1] cm = CmDatabase() ip = vm['public_ips'] try: key_name = vm['KeyName'] keys = cm.find_all_by_name(name=key_name, kind="key") for k in keys: if 'location' in k.keys(): if 'private' in k['location'].keys(): key = k['location']['private'] break except (KeyError, IndexError): aws_keys = cm.find(kind='key', cloud='aws') if len(aws_keys) == 0: Console.error( f"Could not find a key for the AWS instance '{vm['name']}'" ) Console.error( f"Use `cms help key` to learn how to add and upload a key for AWS" ) return aws_key = key_selector(aws_keys) for sshkey in cm.find_all_by_name(name=aws_key['KeyName'], kind="key"): if "location" in sshkey.keys(): key = sshkey['location']['private'] break user = "******" # needs to be set on creation. if command is None: command = "" if user is None: location = ip else: location = user + '@' + ip cmd = "ssh " \ "-o StrictHostKeyChecking=no " \ "-o UserKnownHostsFile=/dev/null " \ f"-i {key} {location} {command}" cmd = cmd.strip() print(cmd) # VERBOSE(cmd) if command == "": if platform.lower() == 'win32': class disable_file_system_redirection: _disable = ctypes.windll.kernel32.Wow64DisableWow64FsRedirection _revert = ctypes.windll.kernel32.Wow64RevertWow64FsRedirection def __enter__(self): self.old_value = ctypes.c_long() self.success = self._disable( ctypes.byref(self.old_value)) def __exit__(self, type, value, traceback): if self.success: self._revert(self.old_value) with disable_file_system_redirection(): os.system(cmd) else: os.system(cmd) else: if platform.lower() == 'win32': class disable_file_system_redirection: _disable = ctypes.windll.kernel32.Wow64DisableWow64FsRedirection _revert = ctypes.windll.kernel32.Wow64RevertWow64FsRedirection def __enter__(self): self.old_value = ctypes.c_long() self.success = self._disable( ctypes.byref(self.old_value)) def __exit__(self, type, value, traceback): if self.success: self._revert(self.old_value) with disable_file_system_redirection(): ssh = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) else: ssh = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) result = ssh.stdout.read().decode("utf-8") if not result: error = ssh.stderr.readlines() print("ERROR: %s" % error) else: return result
def test_remove_collection(self): HEADING() cm = CmDatabase() Benchmark.Start() collection = cm.clear(collection="debug-file") # noqa: F841 Benchmark.Stop()
def __init__(self, collection="cloudmesh", replace=False): self.database = CmDatabase() self.replace = replace self.collection = collection self.name = Name()
def _create(self, **arguments): arguments = dotdict(arguments) r = [] StopWatch.start(f"create vm {arguments.name}") label = arguments.get("label") or arguments.name cm = { 'kind': "vm", 'name': arguments.name, 'label': label, 'group': arguments.group, 'cloud': self.cloudname(), 'status': 'booting' } entry = {} entry.update(cm=cm, name=arguments.name) result = CmDatabase.UPDATE(entry, progress=False)[0] data = {} dryrun = False if "dryrun" in arguments: dryrun = arguments.dryrun data = {"dryrun": True} else: arguments.timeout = 360 data = self.p.create(**arguments) # print('entry') # pprint(entry) # print('data') # pprint(data) entry.update(data) StopWatch.stop(f"create vm {arguments.name}") t = format(StopWatch.get(f"create vm {arguments.name}"), '.2f') cm['creation'] = t entry.update({'cm': cm}) if arguments.metadata: entry.update({"metadata": arguments.metadata}) else: entry.update({ "metadata": str({ "cm": cm, "image": arguments.image, "size": arguments.size }) }) cm['status'] = 'available' try: # # due to metadata limitation in openstack do not add the creation time # if 'created' in cm: del cm['created'] self.p.set_server_metadata(arguments.name, cm) #self.set_server_metadata(arguments.name, cm) except Exception as e: Console.error("Openstack reported the following error") Console.error(79 * "-") print(e) Console.error(79 * "-") result = CmDatabase.UPDATE(entry, progress=False)[0] return result
class SlurmCluster(object): def __init__(self): """ Initializes the SlurmCluster class """ # current_path = os.path.dirname(os.path.realpath(__file__)) # self.workspace = os.path.join(current_path, "batch_workspace/slurm_batch.yaml") # if not os.path.exists(os.path.dirname(self.workspace)): # os.makedirs(os.path.dirname(self.workspace)) self.cm_config = Config() # self.batch_config = GenericConfig(self.workspace) self.all_jobIDs = [] self.slurm_cluster = {} self.job = { 'job_name' : None, 'cluster_name': None, 'script_path': None, 'executable_path': None, 'destination': None, 'source': None, 'experiment_name': None, 'companion_file': None, } self.database = CmDatabase() @staticmethod def job_specification(): # self.job_validator() data = { "cm": { "cloud": "karst_debug", "kind": "batch-job", "name": "job012", }, "batch": { "source": "~/.cloudmesh/batch/dir", "destination": "~/.cloudmesh/dir/", "status": "running" } } return data # @DatabaseUpdate # def status(self,job_name): # return { # "cloud": self.job.cluster_name, # # } # noinspection PyDictCreation @DatabaseUpdate() def create(self, job_name, cluster_name, script_path, executable_path, destination, source, experiment_name, companion_file): """ This method is used to create a job for running on remote slurm cluster :param job_name: name of the job to create :param cluster_name: slurm cluster on which the job is gonna run :param script_path: path of the slurm script :param executable_path: path of the executable that is going to be run on the cluster via slurm script :param destination: path in the remotes on which the scripts is gonna be copied to and ran from :param source: local path to which the results are gonna be copied :param experiment_name: experiment name and suffix of the filenames in the job :param companion_file: path of the file that has to be passed to the file as an argument if any :param overwrite: if the job already exists, this flag overwrites the previous job with the same name :return: """ # if self.batch_config.get('job-metadata') is not None and job_name in \ # list(self.batch_config.get('job-metadata').keys()) and overwrite is False: # raise RuntimeError("The job {} exists in the configuration file, if you want to overwrite the job, \ # use --overwrite argument.".format(job_name)) # tmp_cluster = {cluster_name: dict(slurm_cluster)} # slurm_cluster = self.cm_config.get('cloudmesh').get('cluster')[cluster_name] # self.batch_config.deep_set(['slurm_cluster'], tmp_cluster) name = Name(order=["name","experiment_name"], name=job_name, experiment_name=experiment_name) uid = name.id(name=job_name, experiment_name=experiment_name) print(uid) # return # TODO: remove cloud and kind after fixing CmDatabased update self.job = { 'uid': uid, "cloud": cluster_name, "kind": "batch-job", "name" :job_name, "cm": { "cloud": cluster_name, "kind": "batch-job", "name": job_name, "cluster": self.cm_config.get('cloudmesh').get('cluster')[cluster_name] }, "batch": { "status": "pending", 'script_path': script_path.as_posix(), 'executable_path': executable_path.as_posix(), 'destination': destination.as_posix(), 'source': source.as_posix(), 'experiment_name': experiment_name, 'companion_file': companion_file.as_posix() } } # self.job = { # "cloud": cluster_name, # "kind": "batch-job", # "name": job_name, # "cluster": self.cm_config.get('cloudmesh').get('cluster')[ # cluster_name], # "status": "pending", # 'script_path': script_path.as_posix(), # 'executable_path': executable_path.as_posix(), # 'destination': destination.as_posix(), # 'source': source.as_posix(), # 'experiment_name': experiment_name, # 'companion_file': companion_file.as_posix() # } # job['destination'] = os.path.join(job['remote_path'], job['script_name']) # job['remote_slurm_script_path'] = os.path.join(job['remote_path'], job['slurm_script_name']) # job_metadata = {job_name: job} # self.batch_config.deep_set(['job-metadata'], job_metadata) # data = self.job_specification() if self.database.exists(self.job)[0]: Console.error("Job already exists") return return [self.job] @staticmethod def _execute_in_parallel(func_args): """ This is a method used for running methods in parallel :param func_args: :return: """ target_class = func_args[0] method_to_call = getattr(target_class, func_args[1]) args = list(func_args[2:]) return method_to_call(*args) def _fetch_results_in_parallel(self, job_metadata, job_id, all_job_ids): """ This method is used to fetch the results from remote nodes. :param job_metadata: the dictionary containing the information about the previously submitted job :param job_id: the tuple containing destination node, destination pid and destination node index when the job was submitted :param all_job_ids: :return: """ dest_node_info = self.slurm_cluster path = path_expand(dest_node_info['credentials']['sshconfigpath']) dest_job_id = job_id ssh_caller = lambda *x: self._ssh(dest_node_info['name'], path, *x) scp_caller = lambda *x: self._scp(dest_node_info['name'], path, *x) # # use the qstat from cloudmesh, we have a whole library for that # ps_output = ssh_caller("qstat -u $USER | grep %s" % job_id) if len(ps_output) == 0 or ' c ' in ps_output.lower(): if not os.path.exists(job_metadata['local_path']): os.makedirs(job_metadata['local_path']) # TODO: REPLACE WITH .format scp_caller('-r', '%s:%s' % (dest_node_info['name'], job_metadata['remote_path']), os.path.join(job_metadata['local_path'], '')) os.remove(os.path.join(job_metadata['local_path'], os.path.basename(os.path.normpath(job_metadata['remote_path'])), job_metadata['script_name'])) os.remove(os.path.join(job_metadata['local_path'], os.path.basename(os.path.normpath(job_metadata['remote_path'])), job_metadata['slurm_script_name'])) if job_metadata['input_type'] == 'params+file': os.remove(os.path.join(job_metadata['local_path'], os.path.basename(os.path.normpath(job_metadata['remote_path'])), job_metadata['argfile_name'])) all_job_ids.remove(dest_job_id) # TODO: REPLACE WITH .format print("Results collected from %s for jobID %s" % (dest_node_info['name'], dest_job_id)) @staticmethod def _ssh(hostname, sshconfigpath, *args): """ This method is used to create remove ssh connections :param hostname: hostname :param sshconfigpath: path to sshconfig for connecting to remote node :param args: the argument to be submitted via ssh :return: """ hide_errors_flag = False if type(args[-1]) == bool: hide_errors_flag = True args = args[:-1] # # should we use cloudmesh.common.Shell # shoudl we have a better version of that # # (stdout, stderr) = SimpleShell(...) # ssh = subprocess.Popen(["ssh", hostname, '-F', sshconfigpath, *args], stdout=subprocess.PIPE, stderr=subprocess.PIPE) result = ssh.stdout.readline() if not result: error = ssh.stderr.readlines() if len(error) > 0 and hide_errors_flag == False: # TODO: REPLACE WITH .format print("ERROR in host %s: %s" % (hostname, error)) return [] else: try: return ''.join([chr(x) for x in result]) except AttributeError: return [result.decode('utf-8').strip('\n')] @staticmethod def _scp(hostname, sshconfigpath, *args): """ This method is used for scp from and to remote :param hostname: hostname :param sshconfigpath: ssh config file :param args:arguments for using while copying :return: """ ssh = subprocess.Popen(["scp", '-F', sshconfigpath, *args], stdout=subprocess.PIPE, stderr=subprocess.PIPE) middle_result = ssh.stdout.readlines() if not middle_result: error = ssh.stderr.readlines() if len(error) > 0: print("ERROR in host %s: %s" % (hostname, error)) @staticmethod def add_suffix_to_path(path, suffix): """ This method is used to add suffix to a path :param path: path :param suffix: suffix :return: """ dir_path = os.path.dirname(path) full_filename = os.path.basename(path) filename, fileextention = os.path.splitext(full_filename) full_filename_new = filename + suffix + fileextention new_path = os.path.join(dir_path, full_filename_new) return new_path def clean_remote(self, job_name): """ This method is used to spawn processes for cleaning the remote nodes :param job_name: name of previously submitted job for which the nodes are going to be cleaned :return: """ job_metadata = self.batch_config.get('job-metadata')[job_name] target_cluster_info = self.batch_config.get('slurm_cluster')[job_metadata['slurm_cluster_name']] remote_path = job_metadata['remote_path'] ssh_caller = lambda *x: self._ssh(target_cluster_info['name'], os.path.expanduser(target_cluster_info['credentials'] \ ['sshconfigpath']), *x, True) ssh_caller('rm -rf {}'.format(remote_path)) if len(ssh_caller('ls {}'.format(remote_path))) == 0: print("Job {} cleaned successfully.".format(job_name)) else: print("Error: Job {} could not be cleaned.".format(job_name)) def connection_test(self, slurm_cluster_name): """ This method is used for testing the connection to the slurm cluster connection node :param slurm_cluster_name: name of the slurm cluster which is going to be tested :return: """ r = self.database.find_name("job_20190327_22265228") print(r) return target_node_info = self.batch_config.get('slurm_cluster')[slurm_cluster_name] ssh_caller = lambda *x: self._ssh(target_node_info['name'], os.path.expanduser(target_node_info['credentials'] \ ['sshconfigpath']), *x) if len(ssh_caller('uname -a')) > 0: print("Slurm Cluster {} is accessible.".format(target_node_info['name'])) else: print("Error: Slurm Cluster {} cannot be accessed.".format(target_node_info['name'])) def remove(self, target, key): """ Used to remove virtual clusters and runtime configs :param target: type of entity to be removed :param key: keyname of the entity to be removed :return: """ if target == 'slurm-cluster': self.batch_config.remove(['slurm_cluster'], key) print("Slurm-cluster {} removed successfully.".format(key)) elif target == 'job': self.batch_config.remove(['job-metadata'], key) print("Job {} removed successfully.".format(key)) else: raise ValueError("Target to remove not found.") def fetch(self, job_name): """ This method is used to fetch results from remote nodes :param job_name: the previously submitted job name :return: """ job_metadata = self.batch_config.get('job-metadata')[job_name] self.slurm_cluster = self.batch_config.get('slurm_cluster')[job_metadata['slurm_cluster_name']] loaded_all_job_ids = [x for x in job_metadata['jobIDs']] all_job_ids = Manager().list() all_job_ids.extend(loaded_all_job_ids) pool = Pool(processes=1) print("collecting results") while len(all_job_ids) > 0: time.sleep(1) all_running_jobs = [(self, '_fetch_results_in_parallel', job_metadata, jobID, all_job_ids) for \ jobID in loaded_all_job_ids if jobID in all_job_ids] pool.map(self._execute_in_parallel, all_running_jobs) print("waiting for other results if any...") print("All of the remote results collected.") ''' @DatabaseUpdate def list(self, target, max_depth, current_depth=1, input_dict=None): """ listing the target slurm clusters or job-metadata :param target: name of the virtual cluster to be listed :param max_depth: depth of information to be shown :param current_depth: current depth of printing information :param input_dict: used for recursion for depth of higher than 1 :return: """ if target == 'slurm-clusters' and input_dict is None: input_dict = self.batch_config.get('slurm_cluster') if target == 'jobs' and input_dict is None: input_dict = self.batch_config.get('job-metadata') elif input_dict is None: raise ValueError("Target of listing not found.") if max_depth >= current_depth: if type(input_dict) == dict: for key in input_dict: key_to_print = key + ':' if max_depth >= current_depth else key indent = current_depth if current_depth > 1 else current_depth - 1 print('\t' * indent, key_to_print) if type(input_dict.get(key)) != dict: print('\t' * (indent + 1), input_dict.get(key)) else: for value in input_dict.get(key): value_to_print = value + ':' if max_depth > current_depth else value print('\t' * (indent + 1), value_to_print) self.list(target, max_depth, input_dict=input_dict[key][value], current_depth=current_depth + 1) else: indent = current_depth if current_depth > 1 else current_depth - 1 print('\t' * indent, input_dict) data = [{}, {}] return data ''' def run(self, job_name): """ This method is used to create a job, validate it and run it on remote nodes :param job_name: name of the job to create :return: """ job_metadata = self.batch_config.get('job-metadata')[job_name] all_job_ids = Manager().list() cluster_name = job_metadata['slurm_cluster_name'] slurm_cluster = self.batch_config.get('slurm_cluster').get(cluster_name) path = path_expand(slurm_cluster['credentials']['sshconfigpath']) ssh_caller = lambda *x: self._ssh(slurm_cluster['name'], path, *x) scp_caller = lambda *x: self._scp(slurm_cluster['name'], path, *x) # TODO replace with .format ssh_caller('cd %s && mkdir job%s' % (job_metadata['raw_remote_path'], job_metadata['suffix']), True) scp_caller(job_metadata['slurm_script_path'], '%s:%s' % (slurm_cluster['name'], job_metadata['remote_slurm_script_path'])) scp_caller(job_metadata['job_script_path'], '%s:%s' % (slurm_cluster['name'], job_metadata['remote_script_path'])) ssh_caller('chmod +x', job_metadata['remote_script_path']) if job_metadata['input_type'].lower() == 'params+file': scp_caller(job_metadata['argfile_path'], '%s:%s' % (slurm_cluster['name'], job_metadata['remote_path'])) remote_job_id = ssh_caller("cd %s && qsub %s && qstat -u $USER | tail -n 1 | awk '{print $1}'" % (job_metadata['remote_path'], job_metadata['remote_slurm_script_path'])) remote_job_id = remote_job_id.strip('\n') all_job_ids.append(remote_job_id) print('Remote job ID: %s' % remote_job_id) self.batch_config.deep_set(['job-metadata', job_name, 'jobIDs'], [pid for pid in all_job_ids]) def set_param(self, target, name, parameter, value): """ Used to set a specific parameter in the configuration :param target: the entity type on which the parameter is going to be set, e.g. runtime-config :param name: the entity name on which the parameter is going to be set, e.g. test-config32 :param parameter: name of the parameter to be set :param value: value of that parameter to be set :return: """ # TODO: .format see if .format(**local) works if target == 'slurm-cluster': self.batch_config.deep_set(['slurm_cluster', name, parameter], value) print("slurm-cluster parameter {} set to {} successfully.".format(parameter, value)) elif target == 'job-metadata': self.batch_config.deep_set(['job-metadata', name, parameter], value) print("Job-metadata parameter {} set to {} successfully.".format(parameter, value)) else: raise ValueError("Target of variable set not found.")
def create(self, **kwargs): arguments = dotdict(kwargs) name = arguments.name cloud = arguments.cloud if name is None: name_generator = Name() vms = [str(name_generator)] else: vms = self.expand(name) # # Step 0, find the cloud # variables = Variables() if cloud is None: arguments.cloud = cloud = variables['cloud'] # Step 1. iterate through the names to see if they already exist in # the DB and fail if one of them already exists database = CmDatabase() defaults = Config()[f"cloudmesh.cloud.{cloud}.default"] duplicates = [] for vm in vms: query = {"name": vm} duplicates += database.find(collection=f'{cloud}-node', query=query) database.close_client() if len(duplicates) > 0: print( Printer.flatwrite(duplicates, order=['cm.name', 'cm.cloud'], header=['Name', 'Cloud'], output='table')) raise Exception("these vms already exists") # Step 2. identify the image and flavor from kwargs and if they do # not exist read them for that cloud from the yaml file if arguments.image is None: arguments.image = self.find_attribute('image', [variables, defaults]) if arguments.image is None: raise ValueError("image not specified") if arguments.group is None: arguments.group = self.find_attribute('group', [variables, defaults]) if arguments.group is None: arguments.group = "default" if arguments.size is None: arguments.size = self.find_attribute('size', [variables, defaults]) if arguments.size is None: raise ValueError("size not specified") # Step 3: use the create command to create the vms # created = self.loop(vms, self.p.create, **arguments) arguments['name'] = vms created = self.loop(self._create, **arguments) VERBOSE(created) self.list() return created
def benchmark(self): #get current cloud and create provider var_list = Variables(filename="~/.cloudmesh/var-data") cloud = var_list['cloud'] name = var_list['vm'] newProvider = Provider(name=cloud) #get vm cm = CmDatabase() try: vm = cm.find_name(name, "vm")[0] except IndexError: Console.error(f"could not find vm {name}") # get file path of the benchmark filepath = path.dirname(path.dirname( path.abspath(__file__))) + '/api/benchmark.py' filepath = filepath.replace('\\', '/') # prepare command to run the file vmcom = VmCommand() try: Console.msg('waiting for vm to be reachable...') Console.msg('wait') newProvider.wait(vm=vm) except: Console.msg('could not reach vm for benchmark') return try: Console.msg(f'moving benchmark file to vm...') Console.msg(f'put ' + filepath + ' /home/ubuntu') vmcom.do_vm('put ' + filepath + ' /home/ubuntu') except: Console.msg( f'could not ssh into vm, make sure one is running and reachable' ) return try: Console.msg(f'executing the benchmark...') Console.msg( 'ssh --command=\"chmod +x benchmark.py;./benchmark.py;rm benchmark.py;exit\"' ) benchtime = newProvider.ssh( vm=vm, command= "chmod +x benchmark.py;./benchmark.py;rm benchmark.py;exit") except: Console.msg( f'could not ssh into vm, make sure one is running and reachable' ) return print("successfully benchmarked") benchtime = float(benchtime.strip()) #add the benchmark, cloud, vm, and time to db benchdict = {} benchdict['cloud'] = cloud benchdict['name'] = name benchdict['ImageId'] = vm['ImageId'] benchdict['flavor'] = vm['InstanceType'] benchdict['region'] = vm['Placement']['AvailabilityZone'] benchdict['BenchmarkTime'] = benchtime benchdict['updated'] = str(datetime.utcnow()) benchdict["cm"] = { "kind": 'frugal-benchmark', "driver": cloud, "cloud": cloud, "name": name, "updated": str(datetime.utcnow()), } cm.update(benchdict, progress=True) return ""
def list(self, order='price', resultssize=25, refresh=False, printit=True, benchmark=False, cloud=None): clouds = ['aws', 'azure', 'gcp'] if cloud in clouds: clouds = [cloud] if benchmark: # get benchmarks cm = CmDatabase() benchmarks = [] for cloud in clouds: print("searching " + cloud) benchmarktemp = list( cm.collection(cloud + '-frugal-benchmark').find()) benchmarks = benchmarks + benchmarktemp print( Printer.write(benchmarks, order=[ 'cloud', 'name', 'region', 'ImageId', 'flavor', 'updated', 'BenchmarkTime' ])) return else: #check to make sure that order is either price, cores, or memory if order not in ['price', 'cores', 'memory']: Console.error( f'order argument must be price, cores, or memory') return printlist = [] if 'aws' in clouds: # get aws pricing info printlist = printlist + list( aws_frugal.get_aws_pricing(refresh=refresh).find()) if 'gcp' in clouds: # get gcp pricing info printlist = printlist + list( gcp_frugal.get_google_pricing(refresh=refresh).find()) if 'azure' in clouds: # get azure pricing info printlist = printlist + list( azure_frugal.get_azure_pricing(refresh=refresh).find()) # turn numpy array into a pandas dataframe, assign column names, and remove na values flavor_frame = pd.DataFrame(printlist)[[ 'provider', 'machine-name', 'location', 'cores', 'core/price', 'memory', 'memory/price', 'price' ]] flavor_frame = flavor_frame.replace([np.inf, -np.inf], np.nan).dropna() # sort the dataframe by order if order == 'cores': flavor_frame = flavor_frame.sort_values(by=['core/price'], ascending=False) elif order == 'memory': flavor_frame = flavor_frame.sort_values(by=['memory/price'], ascending=False) else: flavor_frame = flavor_frame.sort_values(by=['price'], ascending=True) # print out the dataframe if printit, print results limited by resultssize if printit: print( Printer.write( flavor_frame.head(resultssize).to_dict('records'), order=[ 'provider', 'machine-name', 'location', 'cores', 'core/price', 'memory', 'memory/price', 'price' ])) # return the final sorted data frame return flavor_frame
def ssh(self, vm=None, command=None): # # TODO: fix user name issue, should be stored in db # # VERBOSE(vm) ip = vm['ip_public'] key_name = vm['key_name'] image = vm['metadata']['image'] user = Image.guess_username(image) cm = CmDatabase() keys = cm.find_all_by_name(name=key_name, kind="key") for k in keys: if 'location' in k.keys(): if 'private' in k['location'].keys(): key = k['location']['private'] break cm.close_client() if command is None: command = "" if user is None: location = ip else: location = user + '@' + ip cmd = "ssh " \ "-o StrictHostKeyChecking=no " \ "-o UserKnownHostsFile=/dev/null " \ f"-i {key} {location} {command}" cmd = cmd.strip() # VERBOSE(cmd) if command == "": if platform.lower() == 'win32': class disable_file_system_redirection: _disable = ctypes.windll.kernel32.Wow64DisableWow64FsRedirection _revert = ctypes.windll.kernel32.Wow64RevertWow64FsRedirection def __enter__(self): self.old_value = ctypes.c_long() self.success = self._disable( ctypes.byref(self.old_value)) def __exit__(self, type, value, traceback): if self.success: self._revert(self.old_value) with disable_file_system_redirection(): os.system(cmd) else: os.system(cmd) else: if platform.lower() == 'win32': class disable_file_system_redirection: _disable = ctypes.windll.kernel32.Wow64DisableWow64FsRedirection _revert = ctypes.windll.kernel32.Wow64RevertWow64FsRedirection def __enter__(self): self.old_value = ctypes.c_long() self.success = self._disable( ctypes.byref(self.old_value)) def __exit__(self, type, value, traceback): if self.success: self._revert(self.old_value) with disable_file_system_redirection(): ssh = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) else: ssh = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) result = ssh.stdout.read().decode("utf-8") if not result: error = ssh.stderr.readlines() print("ERROR: %s" % error) else: return result
def do_key(self, args, arguments): """ :: Usage: key -h | --help key list --cloud=CLOUDS [--output=OUTPUT] key list --source=ssh [--dir=DIR] [--output=OUTPUT] key list --source=git [--output=OUTPUT] [--username=USERNAME] key list [--output=OUTPUT] key init key add NAME --filename=FILENAME [--output=OUTPUT] key add [NAME] [--source=FILENAME] key add [NAME] [--source=git] key add [NAME] [--source=ssh] key delete NAMES [--cloud=CLOUDS] [--dryrun] key upload [NAMES] [--cloud=CLOUDS] [--dryrun] key upload [NAMES] [VMS] [--dryrun] key group upload [NAMES] [--group=GROUPNAMES] [--cloud=CLOUDS] [--dryrun] key group add [NAMES] [--group=GROUPNAMES] [--cloud=CLOUDS] [--dryrun] key group delete [--group=GROUPNAMES] [NAMES] [--dryrun] key group list [--group=GROUPNAMES] [--output=OUTPUT] key group export --group=GROUNAMES --filename=FILENAME key gen (ssh | pem) [--filename=FILENAME] [--nopass] [--set_path] [--force] key reformat (ssh | pem) [--filename=FILENAME] [--format=FORMAT] [--nopass] [--pub] key verify (ssh | pem) [--filename=FILENAME] [--pub] [--check_pass] Arguments: VMS Parameterized list of virtual machines CLOUDS The clouds NAME The name of the key. SOURCE db, ssh, all OUTPUT The format of the output (table, json, yaml) FILENAME The filename with full path in which the key is located FORMAT Desired key format (SubjectInfo, SSH, OpenSSL, PKCS8) Options: --dir=DIR the directory with keys [default: ~/.ssh] --check_pass Flag where program query user for password --filename=FILENAME the name and full path to the file --nopass Flag indicating if the key has no password --output=OUTPUT the format of the output [default: table] --pub Indicates that the public key is passed in --set_path Sets the cloudmesh encryption key path to the full path of the generated keys --source=SOURCE the source for the keys --username=USERNAME the source for the keys [default: none] Description: Please note that some values are read from the cloudmesh.yaml file. One such value is cloudmesh.profile.user Management of public keys is an essential component of accessing virtual machines in the cloud. There are a number of sources where you can find public keys. This includes the ~/.ssh directory and for example github. If you do not already have a public-private key pair they can be generated using cloudmesh key gen ssh This will create the public-private keypair of ~/.ssh/id_rsa and ~/.ssh/id_rsa.pub in OpenSSH format key gen pem This will create the public-private keypair of ~/.ssh/id_rsa and ~/.ssh/id_rsa.pub in PEM format key gen (ssh | pem) --filename=~/.cloudmesh/foobar This will generate the public-private key pair of ~/.cloudmesh/foobar and ~/.cloudmesh/foobar.pub key gen (ssh | pem) --filename=~/.cloudmesh/foobar --set_path This will generate the keys as stated above, but it will also set cloudmesh to use these keys for encryption. Keys can also be verified for their formatting and passwords. By default cloudmesh checks ~/.ssh/id_rsa and ~/.ssh/id_rsa.pub If the key is password protected the formatting can only be verified if the password is provided (--check_pass argument) key verify pem Verifies that ~/.ssh/id_rsa has PEM format key verify ssh --pub Verifies that ~/.ssh/id_rsa.pub has OpenSSH format key verify pem --filename=~/.cloudmesh/foobar Verifies if the private key located at ~/.cloudmesh/foobar is password protected key verify pem --filenam=~/.cloudmesh/foobar --check_pass Request the password to the file, then checks if the key is in proper PEM format You may find the need to keep the values of your keys but different encodings or formats. These aspects of your key can also be changed using cloudmesh. key reformat pem Will reformat the ~/.id_rsa.pub key from PEM to OpenSSH key reformat ssh Will reformat the ~/.id_rsa.pub key from OpenSSH to PEM key reformat --filename=~/.id_rsa --format=PKCS8 Will reformat the private key to PKCS8 format Keys will be uploaded into cloudmesh database with the add command under the given NAME. If the name is not specified the name cloudmesh.profile.user is assumed. key add NAME --source=ssh adds the default key in ~/.ssh/id_rsa.pub key add NAME --source=FILENAME adds the key specified by the filename with the given name key add NAME --git --username=username adds a named github key from a user with the given github username. key set adds the ~/.ssh/id_rsa.pub key with the name specified in cloudmesh.profile.user. It also sets the variable key to that user. Once the keys are uploaded to github, they can be listed To list these keys the following list functions are provided. key list --source=git [--username=USERNAME] lists all keys in git for the specified user. If the name is not specified it is read from cloudmesh.yaml key list --source=ssh [--dir=DIR] [--output=OUTPUT] lists all keys in the directory. If the directory is not specified the default will be ~/.ssh key list NAMES lists all keys in the named virtual machines. List command can use the [--output=OUTPUT] option list the keys loaded to cloudmesh in the given format: json, yaml, table. table is default. The NAME can be specified and if omitted the name cloudmesh.profile.user is assumed. To get keys from the cloudmesh database the following commands are available: key delete NAMES deletes the Named keys. This may also have an impact on groups key rename NAME NEW renames the key from NAME to NEW in the cloudmesh database. Group management of keys is an important concept in cloudmesh, allowing multiple users to be added to virtual machines while managing the keys associated with them. The keys must be uploaded to cloudmesh database with a name so they can be used in a group. The --dryrun option executes the command without uploading the information to the clouds. If no group name is specified the group name default is assumed. If no cloudnamesh are specified, all active clouds are assumed. active clouds can be set in the cloudmesh.yaml file. key group delete [GROUPNAMES] [NAMES] [--dryrun] deletes the named keys from the named groups. key group list [GROUPNAMES] [--output=OUTPUT] list the key names and details in the group. key group upload [GROUPNAMES] [CLOUDS] [--dryrun] uploads the named groups to the specified clouds. In some cases you may want to store the public keys in files. For this reason we support the following commands. key group add --group=GROUPNAME --file=FILENAME the command adds the keys to the given group. The keys are written in the files in yaml format. key group export --group=GROUNAMES --filename=FILENAME the command exports the keys to the given group. The keys are written in the files in yaml format. The yaml format is as follows: cloudmesh: keys: NAMEOFKEY: name: NAMEOFKEY key: ssh-rsa AAAA..... comment group: - GROUPNAME ... If a key is included in multiple groups they will be added to the grouplist of the key """ def print_keys(keys): print( Printer.write( keys, sort_keys=["name"], order=["name", "type", "fingerprint", "comment"], header=["Name", "Type", "Fingerprint", "Comment"], output=arguments.output)) map_parameters(arguments, 'check_pass', 'cloud', 'dir', 'dryrun', 'filename', 'force', 'format', 'name', 'nopass', 'output', 'pub', 'pwd', 'set_path', 'source') variables = Variables() if arguments.list and arguments.source == "git": config = Config() username = config["cloudmesh.profile.github"] keys = SSHkey().get_from_git(username) print_keys(keys) return "" elif arguments.list and arguments.source == "ssh": # this is much simpler sshkey = SSHkey() print_keys([sshkey]) return "" elif arguments.list and arguments.cloud: clouds = Parameter.expand(arguments.cloud) if len(clouds) == 0: variables = Variables() cloudname = variables['cloud'] clouds = [cloudname] keys = [] for cloud in clouds: print(f"cloud {cloud}") provider = Provider(name=cloud) keys = provider.keys() provider.Print(keys, output=arguments.output, kind="key") return "" elif arguments.list: cloud = "local" db = CmDatabase() keys = db.find(collection=f"{cloud}-key") print_keys(keys) return "" elif arguments.add: """ key add [NAME] [--source=FILENAME] # NOT IMPLEMENTED YET key add [NAME] [--source=git] key add [NAME] [--source=ssh] """ key = Key() if arguments["--source"] == "ssh": name = arguments.NAME or "ssh" key.add(name, "ssh") elif arguments["--source"] == "git": name = arguments.NAME or "git" key.add("git", "git") else: config = Config() name = config["cloudmesh.profile.user"] kind = "ssh" key.add(name, kind) elif arguments.init: """ key init """ config = Config() username = config["cloudmesh.profile.user"] if username == "TBD": Console.error( "Please set cloudmesh.profile.user in ~/.cloudmesh.yaml") u = os.environ["USER"].lower().replace(" ", "") Console.msg( f"To change it you can use the command. Define a NAME such as '{u}' e.g." ) Console.msg("") Console.msg(f" cms config set cloudmesh.profile.user={u}") Console.msg("") return "" key = Key() key.add(username, "ssh") variables['key'] = username elif arguments.upload: """ key upload [NAMES] [--cloud=CLOUDS] [--dryrun] key upload [NAMES] [VMS] [--dryrun] """ names = Parameter.expand(arguments.NAMES) # this may have a bug if NAMES is ommitted # # Step 0. Set keyname to variable # if names is None or len(names) == 0: config = Config() username = config["cloudmesh.profile.user"] names = [username] if len(names) == 1: name = names[0] variables = Variables() if "key" in variables: old = variables["key"] if old != name: Console.msg( f"Changing default key from {old} to {name}") variables["key"] = name # # Step 1. keys = find keys to upload # cloud = "local" db = CmDatabase() db_keys = db.find(collection=f"{cloud}-key") keys = [] for key in db_keys: if key["name"] in names: keys.append(key) if len(keys) == 0: Console.error( f"No keys with the names {names} found in cloudmesh. \n" " Use the command 'key add' to add the key.") # # Step 2. iterate over the clouds to upload # clouds, vmnames = Arguments.get_cloud_and_names( "list", arguments, variables) for cloud in clouds: print(f"cloud {cloud}") provider = Provider(name=cloud) for key in db_keys: name = key['name'] if name in names: try: r = provider.key_upload(key) Console.ok(f"upload key '{name} successful'. ") except ValueError as e: Console.error( f"key '{name} already exists in {cloud}.") return "" elif arguments.delete and arguments.cloud and arguments.NAMES: # key delete NAMES --cloud=CLOUDS [--dryrun] names = Parameter.expand(arguments.NAMES) clouds = Parameter.expand(arguments.cloud) for cloud in clouds: provider = Provider(name=cloud) for name in names: if arguments.dryrun: Console.ok(f"Dryrun: delete {name} in {cloud}") else: images = provider.key_delete(name) return "" elif arguments.gen: """ key gen (ssh | pem) [--filename=FILENAME] [--nopass] [--set_path] [--force] Generate an RSA key pair with pem or ssh encoding for the public key. The private key is always encoded as a PEM file. """ config = Config() # Check if password will be requested ap = not arguments.nopass if not ap: Console.warning("Private key will NOT have a password") cnt = yn_choice(message="Continue, despite risk?", default="N") if not cnt: sys.exit() # Discern the name of the public and private keys rk_path = None uk_path = None if arguments.filename: fp = path_expand(arguments.filename) fname, fext = os.path.splitext(fp) if fext == ".pub" or fext == ".ssh": rk_path = fname uk_path = fp elif fext == ".priv" or fext == ".pem": rk_path = fp uk_path = fname + ".pub" else: rk_path = fp uk_path = rk_path + ".pub" else: rk_path = path_expand("~/.ssh/id_rsa") uk_path = rk_path + ".pub" # Check if the file exist, if so confirm overwrite def check_exists(path): if os.path.exists(path): Console.info(f"{path} already exists") ovwr_r = yn_choice(message=f"overwrite {path}?", default="N") if not ovwr_r: Console.info(f"Not overwriting {path}. Quitting") sys.exit() if not arguments.force: check_exists(rk_path) check_exists(uk_path) # Set the path if requested if arguments.set_path: config['cloudmesh.security.privatekey'] = rk_path config['cloudmesh.security.publickey'] = uk_path config.save() Console.msg(f"\nPrivate key: {rk_path}") Console.msg(f"Public key: {uk_path}\n") # Generate the Private and Public keys kh = KeyHandler() r = kh.new_rsa_key() u = kh.get_pub_key(priv=r) # Serialize and write the private key to the path sr = kh.serialize_key(key=r, key_type="PRIV", encoding="PEM", format="PKCS8", ask_pass=ap) # Force write the key (since we check file existence above) kh.write_key(key=sr, path=rk_path, force=True) # Determine the public key format and encoding enc = None forma = None if arguments.ssh: enc = "SSH" forma = "SSH" elif arguments.pem: enc = "PEM" forma = "SubjectInfo" # Serialize and write the public key to the path su = kh.serialize_key(key=u, key_type="PUB", encoding=enc, format=forma, ask_pass=False) # Force write the key (since we check file existence above) kh.write_key(key=su, path=uk_path, force=True) Console.ok("Success") elif arguments.verify: """ key verify (ssh | pem) [--filename=FILENAME] [--pub] [--check_pass] Verifies the encoding (pem or ssh) of the key (private or public) """ # Initialize variables kh = KeyHandler() # Determine filepath fp = None if arguments.filename is None: config = Config() kp = path_expand("~/.ssh/id_rsa") if arguments.pub: fp = kp + ".pub" else: fp = kp else: fp = arguments.filename # Discern key type kt = enc = None ap = True if arguments.pub: # Load the public key, if no error occurs formatting is correct kt, kta, ap = "public", "PUB", False # Discern public key encoding if arguments.ssh: enc, e = "OpenSSH", "SSH" elif arguments.pem: # PEM encoding enc = e = "PEM" else: # Load the private key to verify the format and password of the # key file. If no error occurs the format and pwd are correct kt, kta = "private", "PRIV" enc = e = "PEM" ap = False if arguments.check_pass: ap = True try: k = kh.load_key(path=fp, key_type=kta, encoding=e, ask_pass=ap) m = f"Success the {kt} key {fp} has proper {enc} format" Console.ok(m) except ValueError as e: # The formatting was incorrect m = f"Failure, {kt} key {fp} does not have proper {enc} format" Console.error(m) raise e except TypeError as e: # Success, we didn't ask the user for the key password and # we received an error for not entering the password, thus # the key is password protectd if not arguments.check_pass: Console.ok("The key is password protected") else: # Error Message handled in kh.load_key() raise e elif arguments.reformat: """ key reformat (ssh | pem) [--filename=FILENAME] [--format=FORMAT] [--nopass] [--pub] Restructures a key's format, encoding, and password """ # Initialize variables kh = KeyHandler() # Determine key type fname, fext = os.path.splitext(arguments.filename) kt = "PRIV" if arguments.pub or fext == ".pub": kt = "PUB" # Determine new encoding use_pem = True if arguments.ssh: use_pem = False kh.reformat_key(path=arguments.filename, key_type=kt, use_pem=use_pem, new_format=arguments.format, ask_pass=not arguments.nopass) elif arguments.delete and arguments.NAMES: # key delete NAMES [--dryrun] names = Parameter.expand(arguments.NAMES) cloud = "local" db = CmDatabase() db_keys = db.find(collection=f"{cloud}-key") error = [] for key in db_keys: name = key['name'] if name in names: if arguments.dryrun: Console.ok(f"Dryrun: delete {name}") else: db.delete(collection="local-key", name=name) Console.ok(f"delete {name}") return "" elif arguments.group: raise NotImplementedError return ""
class Queue(object): def __init__(self): """ Initializes the Queue class """ self.cm_config = Config() self.info = munch.munchify({ 'uid': None, "cloud": None, "kind": "batch-queue", "name": None, "cm": {}, "queue": { 'policy': None, 'status': None, 'active': False, 'charge': None, 'unit': None, "numJobs": 0, "numRunningJobs": 0, 'joblist': [] } }) # list of parameters that can be set self.settable_params = ['policy', 'charge', 'unit'] self.database = CmDatabase() @DatabaseUpdate() def create(self, queue_name, cloud_name, policy, charge=None, unit=None): """ This method is used to create a queue :param queue_name: name of the queue to create :param cloud_name: slurm cluster on which the job is gonna run :param policy: policy of the queue :param charge: charge of the queue :param unit: unit of the charge for the queue :return: """ name = Name(order=["cloud", "name"], cloud=cloud_name, name=queue_name) uid = name.id(cloud=cloud_name, name=queue_name) # print(uid) self.info = munch.munchify({ 'uid': uid, "cloud": cloud_name, "kind": "batch-queue", "name": queue_name, "cm": { "cloud": cloud_name, "kind": "batch-queue", "name": queue_name, "cluster": self.cm_config.get('cloudmesh').get('cluster')[cloud_name] }, "queue": { 'policy': policy, 'status': 'EMPTY', 'active': False, 'charge': charge, 'unit': unit, "numJobs": 0, "numRunningJobs": 0, } }) # Console.error(self.info) self.policyFunctionMap = munch.munchify({ 'FIFO': self.popFIFO, 'FILO': self.popFILO }) if self.database.exists(self.info)[0]: Console.error("Queue already exists") return return [self.info] def findQueue(self, cloud_name, queue_name): ''' finds a queue in the database based on the name :param name: name of the queue :return: ''' # if self.database.exists(self.info)[0]: # Console.error("Queue already exists") name = Name(order=["cloud", "name"], cloud=cloud_name, name=queue_name) uid = name.id(cloud=cloud_name, name=queue_name) queue = self.database.find_by_KeyValue( collection_name="{cloud}-{kind}".format(cloud=cloud_name, kind='batch-queue'), KeyValue={'uid': uid}) if type(queue) is cursor.Cursor: self.info = munch.munchify(queue[0]) return True # # queue found elif type(queue) is list and len(queue) == 0: return False # queue not found def findClouds(self): ''' finds all queues in the database based on the name :return: ''' for collection in self.database.collections(): if 'batch-queue' in collection: print(collection) # all_queues = self.database.db.find() # print(all_queues) def findQueues(self, cloud_name): ''' finds all queues in the database based on the name :return: ''' # TODO: find all queues info from the DB based on the ['cm'] all_queues = self.database.find_by_KeyValue(collection_name=cloud_name) all_queues = [munch.munchify(queue) for queue in all_queues] for queue in all_queues: print(queue.uid) def listJobs(self): ''' list the jobs in the current queue :return: ''' return def removeQueue(self): ''' remove the queue from the database :return: ''' # TODO: remove the queues info from the DB based on the ['cm'] return @DatabaseUpdate() # this should update the record not create a new one def push(self, job): ''' push job to stack :param job: :return: ''' self.info.queue.joblist.append(job) self.info.queue.numJobs += 1 self.updateStatus() return self.info @DatabaseUpdate() # this should update the record not create a new one def pop(self): ''' pop job from stack based on the policy :param job: :return: ''' self.info.queue.numJobs -= 1 self.updateStatus() policy = self.info.queue.policy return self.policyFunctionMap[policy]() def popFIFO(self): ''' pop job from stack based on FIFO policy :param job: :return: ''' return self.info['queue']['joblist'].pop(0) def popFILO(self): ''' pop job from stack based on FIFO policy :param job: :return: ''' return self.info['queue']['joblist'].pop() def isEmpty(self): ''' checks if the queue is empty :return: ''' if self.info.queue.numJobs > 0: return False return True @DatabaseUpdate() # this should update the record not create a new one def activate(self): ''' activates the queue :return: ''' # TODO: activating a queue should start submitting jobs self.info.queue.active = True return self.info @DatabaseUpdate() # this should update the record not create a new one def deactivate(self): ''' deactivates the queue :return: ''' # TODO: stop all jobs self.info.queue.active = False return self.info @DatabaseUpdate() # this should update the record not create a new one def updateStatus(self): ''' checks number of jobs and updates queue status :return: ''' if self.info.queue.numJobs > 0: self.info.queue.status = 'FULL' return self.info @DatabaseUpdate() def setParam(self, param, val): ''' set a particular parameter in the queue :param param: the parameter :param val: value of the parameter :return: ''' if param in self.settable_params: self.info.queue[param] = val else: Console.error("Only the following parameters could be set in a " "queue: \n" + ', '.join(self.settable_params)) return self.info
class DatabaseUpdate: """ The data base decorator automatically replaces an entry in the database with the dictionary returned by a function. It is added to a MongoDB collection. The location is determined from the values in the dictionary. The name of the collection is determined from cloud and kind: cloud-kind In addition each entry in the collection has a name that must be unique in that collection. In most examples it is pest to separate the upload from the actual return class. This way we essentially provide two functions one that provide the dict and another that is responsible for the upload to the database. Example: cloudmesh.example.foo contains: class Provider(object) def entries(self): return { "cm": { "cloud": "foo", "kind"": "entries", "name": "test01" "test": "hello"} } "cloud": "foo", "kind"": "entries", "name": "test01" "test": "hello"} cloudmesh.example.bar contains: class Provider(object) def entries(self): return { "cloud": "bar", "kind"": "entries", "name": "test01" "test": "hello"} cloudmesh.example.provider.foo: from cloudmesh.example.foo import Provider as FooProvider from cloudmesh.example.foo import Provider as BarProvider class Provider(object) def __init__(self, provider): if provider == "foo": provider = FooProvider() elif provider == "bar": provider = BarProvider() @DatabaseUpdate def entries(self): provider.entries() Separating the database and the dictionary creation allows the developer to implement different providers but only use one class with the same methods to interact for all providers with the database. In the combined provider a find function to for example search for entries by name across collections could be implemented. """ # noinspection PyUnusedLocal def __init__(self, **kwargs): self.database = CmDatabase() def __call__(self, f): def wrapper(*args, **kwargs): current = f(*args, **kwargs) if type(current) == dict: current = [current] if current is None: return [] result = self.database.update(current) self.database.close_client() return result return wrapper
def do_vm(self, args, arguments): """ :: Usage: vm ping [NAMES] [--cloud=CLOUDS] [--count=N] [--processors=PROCESSORS] vm check [NAMES] [--cloud=CLOUDS] [--processors=PROCESSORS] vm status [NAMES] [--cloud=CLOUDS] vm console [NAME] [--force] vm start [NAMES] [--cloud=CLOUD] [--dryrun] vm stop [NAMES] [--cloud=CLOUD] [--dryrun] vm terminate [NAMES] [--cloud=CLOUD] [--dryrun] vm delete [NAMES] [--cloud=CLOUD] [--dryrun] vm refresh [--cloud=CLOUDS] vm list [NAMES] [--cloud=CLOUDS] [--output=OUTPUT] [--refresh] vm boot [--name=NAME] [--cloud=CLOUD] [--username=USERNAME] [--image=IMAGE] [--flavor=FLAVOR] [--public] [--secgroup=SECGROUPs] [--key=KEY] [--dryrun] vm boot [--n=COUNT] [--cloud=CLOUD] [--username=USERNAME] [--image=IMAGE] [--flavor=FLAVOR] [--public] [--secgroup=SECGROUPS] [--key=KEY] [--dryrun] vm run [--name=NAMES] [--username=USERNAME] [--dryrun] COMMAND vm script [--name=NAMES] [--username=USERNAME] [--dryrun] SCRIPT vm ip assign [NAMES] [--cloud=CLOUD] vm ip show [NAMES] [--group=GROUP] [--cloud=CLOUD] [--output=OUTPUT] [--refresh] vm ip inventory [NAMES] vm ssh [NAMES] [--username=USER] [--quiet] [--ip=IP] [--key=KEY] [--command=COMMAND] [--modify-knownhosts] vm rename [OLDNAMES] [NEWNAMES] [--force] [--dryrun] vm wait [--cloud=CLOUD] [--interval=SECONDS] vm info [--cloud=CLOUD] [--output=OUTPUT] vm username USERNAME [NAMES] [--cloud=CLOUD] vm resize [NAMES] [--size=SIZE] Arguments: OUTPUT the output format COMMAND positional arguments, the commands you want to execute on the server(e.g. ls -a) separated by ';', you will get a return of executing result instead of login to the server, note that type in -- is suggested before you input the commands NAME server name. By default it is set to the name of last vm from database. NAMES server name. By default it is set to the name of last vm from database. KEYPAIR_NAME Name of the vm keypair to be used to create VM. Note this is not a path to key. NEWNAMES New names of the VM while renaming. OLDNAMES Old names of the VM while renaming. Options: --output=OUTPUT the output format [default: table] -H --modify-knownhosts Do not modify ~/.ssh/known_hosts file when ssh'ing into a machine --username=USERNAME the username to login into the vm. If not specified it will be guessed from the image name and the cloud --ip=IP give the public ip of the server --cloud=CLOUD give a cloud to work on, if not given, selected or default cloud will be used --count=COUNT give the number of servers to start --detail for table, a brief version is used as default, use this flag to print detailed table --flavor=FLAVOR give the name or id of the flavor --group=GROUP give the group name of server --secgroup=SECGROUP security group name for the server --image=IMAGE give the name or id of the image --key=KEY specify a key to use, input a string which is the full path to the private key file --keypair_name=KEYPAIR_NAME Name of the vm keypair to be used to create VM. Note this is not a path to key. --user=USER give the user name of the server that you want to use to login --name=NAME give the name of the virtual machine --force rename/ delete vms without user's confirmation --command=COMMAND specify the commands to be executed Description: commands used to boot, start or delete servers of a cloud vm default [options...] Displays default parameters that are set for vm boot either on the default cloud or the specified cloud. vm boot [options...] Boots servers on a cloud, user may specify flavor, image .etc, otherwise default values will be used, see how to set default values of a cloud: cloud help vm start [options...] Starts a suspended or stopped vm instance. vm stop [options...] Stops a vm instance . vm delete [options...] Delete servers of a cloud, user may delete a server by its name or id, delete servers of a group or servers of a cloud, give prefix and/or range to find servers by their names. Or user may specify more options to narrow the search vm floating_ip_assign [options...] assign a public ip to a VM of a cloud vm ip show [options...] show the ips of VMs vm ssh [options...] login to a server or execute commands on it vm list [options...] same as command "list vm", please refer to it vm status [options...] Retrieves status of last VM booted on cloud and displays it. vm refresh [--cloud=CLOUDS] this command refreshes the data for virtual machines, images and flavors for the specified clouds. vm ping [NAMES] [--cloud=CLOUDS] [--count=N] [--processors=PROCESSORS] pings the specified virtual machines, while using at most N pings. The ping is executed in parallel. If names are specifies the ping is restricted to the given names in parameter format. If clouds are specified, names that are not in these clouds are ignored. If the name is set in the variables this name is used. Tip: give the VM name, but in a hostlist style, which is very convenient when you need a range of VMs e.g. sample[1-3] => ['sample1', 'sample2', 'sample3'] sample[1-3,18] => ['sample1', 'sample2', 'sample3', 'sample18'] Quoting commands: cm vm login gvonlasz-004 --command=\"uname -a\" Limitations: Azure: rename is not supported """ map_parameters(arguments, 'active', 'cloud', 'command', 'dryrun', 'flavor', 'force', 'output', 'group', 'image', 'interval', 'ip', 'key', 'modify-knownhosts', 'n', 'name', 'public', 'quiet', 'secgroup', 'size', 'username') VERBOSE.print(arguments, verbose=9) variables = Variables() if arguments.refresh: names = [] clouds, names = Arguments.get_cloud_and_names( "refresh", arguments, variables) return "" elif arguments.ping: # TODO: IMPLEMENT names = [] pings = int(arguments.N or 3) names = [] clouds, names = Arguments.get_cloud_and_names( "ping", arguments, variables) for name in names: ping = Shell.live("ping -c {N} {name}".format(name=name, N=arguments.N)) print(ping) else: return True return "" elif arguments.check: names = [] clouds, names = Arguments.get_cloud_and_names( "check", arguments, variables) return "" elif arguments.status: names = [] clouds, names = Arguments.get_cloud_and_names( "status", arguments, variables) return "" elif arguments.start: names = [] clouds, names = Arguments.get_cloud_and_names( "start", arguments, variables) return "" elif arguments.stop: names = [] clouds, names = Arguments.get_cloud_and_names( "stop", arguments, variables) return "" elif arguments.terminate: names = [] clouds, names = Arguments.get_cloud_and_names( "terminate", arguments, variables) return "" elif arguments.delete: clouds, names = Arguments.get_cloud_and_names( "delete", arguments, variables) return "" elif arguments.boot: print("boot the vm") elif arguments.list: # vm list [NAMES] # [--cloud=CLOUDS] # [--output=OUPTUT] # [--refresh] # if no clouds find the clouds of all specified vms by name # find all vms of the clouds, # print only those vms specified by name, if no name is given print all for the cloud # print("list the vms") clouds, names = Arguments.get_cloud_and_names( "list", arguments, variables) # print("Clouds:", clouds) if arguments.NAMES is not None: names = Parameter.expand(arguments.NAMES) Console.error("NAMES, not yet implemented" + str(names)) try: if arguments["--refresh"]: pass # find all clouds in db # iterate over the clouds # for each name in name queue, find it and add it to the cloud vm list # for each cloud print the vms else: pass # find all clouds in db # iterate over all clouds # find the vm with the name # add it to the cloud list # for each cloud print the vms except Exception as e: VERBOSE.print(e, verbose=9) return "" else: try: if arguments["--refresh"]: for cloud in clouds: Console.ok("refresh " + cloud) p = Provider(cloud) vms = p.list() order = p.p.output['vm']['order'] # not pretty header = p.p.output['vm']['header'] # not pretty print( Printer.flatwrite(vms, sort_keys=["cm.name"], order=order, header=header, output=arguments.output)) else: for cloud in clouds: p = Provider(cloud) kind = p.kind # pprint(p.__dict__) # pprint(p.p.__dict__) # not pretty collection = "{cloud}-node".format(cloud=cloud, kind=p.kind) db = CmDatabase() vms = db.find(collection=collection) # pprint(vms) # print(arguments.output) # print(p.p.output['vm']) order = p.p.output['vm']['order'] # not pretty header = p.p.output['vm']['header'] # not pretty print( Printer.flatwrite(vms, sort_keys=["cm.name"], order=order, header=header, output=arguments.output)) except Exception as e: VERBOSE.print(e, verbose=9) return "" elif arguments.info: """ vm info [--cloud=CLOUD] [--output=OUTPUT] """ print("info for the vm") cloud, names = Arguments.get_cloud_and_names( "info", arguments, variables) elif arguments.rename: print("rename the vm") v = Variables() cloud = v["cloud"] p = Provider(cloud) try: oldnames = Parameter.expand(arguments["OLDNAMES"]) newnames = Parameter.expand(arguments["NEWNAMES"]) force = arguments["--force"] if oldnames is None or newnames is None: Console.error("Wrong VMs specified for rename", traceflag=False) elif len(oldnames) != len(newnames): Console.error("The number of VMs to be renamed is wrong", traceflag=False) else: print(oldnames) print(newnames) for i in range(0, len(oldnames)): oldname = oldnames[i] newname = newnames[i] if arguments["--dryrun"]: Console.ok("Rename {} to {}".format( oldname, newname)) else: print(f"rename {oldname} -> {newname}") p.rename(source=oldname, destination=newname) msg = "info. OK." Console.ok(msg) except Exception as e: Error.traceback(e) Console.error("Problem renameing instances", traceflag=True) elif arguments["ip"] and arguments["show"]: print("show the ips") """ vm ip show [NAMES] [--group=GROUP] [--cloud=CLOUD] [--output=OUTPUT] [--refresh] """ elif arguments["ip"] and arguments["assign"]: """ vm ip assign [NAMES] [--cloud=CLOUD] """ print("assign the public ip") elif arguments["ip"] and arguments["inventory"]: """ vm ip inventory [NAMES] """ print("list ips that could be assigned") elif arguments.username: """ vm username USERNAME [NAMES] [--cloud=CLOUD] """ print("sets the username for the vm") elif arguments.default: print("sets defaults for the vm") elif arguments.run: """ vm run [--name=NAMES] [--username=USERNAME] [--dryrun] COMMAND """ pass elif arguments.script: """ vm script [--name=NAMES] [--username=USERNAME] [--dryrun] SCRIPT """ pass elif arguments.resize: """ vm resize [NAMES] [--size=SIZE] """ pass elif arguments.ssh: """ vm ssh [NAMES] [--username=USER] [--quiet] [--ip=IP] [--key=KEY] [--command=COMMAND] [--modify-knownhosts] """ print("ssh the vm") elif arguments.console: # vm console [NAME] [--force] names = Arguments.get_names(arguments, variables) for name in names: # r = vm.console(name,force=argument.force) Console.msg("{label} {name}".format(label="console", name=name)) return elif arguments.wait: """ vm wait [--cloud=CLOUD] [--interval=SECONDS] """ print("waits for the vm till its ready and one can login")
def __init__(self, **kwargs): self.database = CmDatabase()
def do_image(self, args, arguments): """ :: Usage: image list [NAMES] [--cloud=CLOUD] [--refresh] [--output=OUTPUT] Options: --output=OUTPUT the output format [default: table] --cloud=CLOUD the cloud name --refresh live data taken from the cloud Description: cm image list cm image list --output=csv cm image list 58c9552c-8d93-42c0-9dea-5f48d90a3188 --refresh """ map_parameters(arguments, "refresh", "cloud", "output") VERBOSE.print(arguments, verbose=9) variables = Variables() if arguments.list and arguments.refresh: names = [] clouds, names = Arguments.get_cloud_and_names( "list", arguments, variables) print("AAA", clouds, names) for cloud in clouds: print(f"cloud {cloud}") provider = Provider(name=cloud) images = provider.images() order = provider.p.output['vm']['order'] # not pretty header = provider.p.output['vm']['header'] # not pretty print( Printer.flatwrite(images, sort_keys=["name"], order=order, header=header, output=arguments.output)) return "" elif arguments.list: names = [] clouds, names = Arguments.get_cloud_and_names( "list", arguments, variables) print(clouds, names) try: for cloud in clouds: print(f"List {cloud}") p = Provider(cloud) kind = p.kind collection = "{cloud}-image".format(cloud=cloud, kind=p.kind) db = CmDatabase() vms = db.find(collection=collection) order = p.p.output['vm']['order'] # not pretty header = p.p.output['vm']['header'] # not pretty print( Printer.flatwrite(vms, sort_keys=["name"], order=order, header=header, output=arguments.output)) except Exception as e: VERBOSE.print(e, verbose=9) return ""
def do_key(self, args, arguments): """ :: Usage: key -h | --help key list --cloud=CLOUDS [--output=OUTPUT] key list --source=ssh [--dir=DIR] [--output=OUTPUT] key list --source=git [--output=OUTPUT] [--username=USERNAME] key list [--output=OUTPUT] key init key add NAME --filename=FILENAME [--output=OUTPUT] key add [NAME] [--source=FILENAME] key add [NAME] [--source=git] key add [NAME] [--source=ssh] key delete NAMES [--cloud=CLOUDS] [--dryrun] key upload [NAMES] [--cloud=CLOUDS] [--dryrun] key upload [NAMES] [VMS] [--dryrun] key group upload [NAMES] [--group=GROUPNAMES] [--cloud=CLOUDS] [--dryrun] key group add [--group=GROUPNAMES] [--cloud=CLOUDS] [--dryrun] key group add --file=FILENAME key group delete [--group=GROUPNAMES] [NAMES] [--dryrun] key group list [--group=GROUPNAMES] [--output=OUTPUT] key group export --group=GROUNAMES --filename=FILENAME key gen (rsa | ssh) [--filename=FILENAME] [--nopass] [--set_path] key verify (ssh | pem) --filename=FILENAME [--pub] Arguments: VMS Parameterized list of virtual machines CLOUDS The clouds NAME The name of the key. SOURCE db, ssh, all KEYNAME The desired full path name to the key file OUTPUT The format of the output (table, json, yaml) FILENAME The filename with full path in which the key is located Options: --dir=DIR the directory with keys [default: ~/.ssh] --filename=FILENAME the name and full path to the file --nopass Flag indicating if the key has no password --output=OUTPUT the format of the output [default: table] --pub Indicates that the public key is passed in --set_path Sets the security key paths to KEYNAME --source=SOURCE the source for the keys --username=USERNAME the source for the keys [default: none] Description: Please note that some values are read from the cloudmesh.yaml file. One such value is cloudmesh.profile.user Manages public keys is an essential component of accessing virtual machine sin the cloud. There are a number of sources where you can find public keys. This includes teh ~/.ssh directory and for example github. Keys will be uploaded into cloudmesh database with the add command under the given NAME. If the name is not specified the name cloudmesh.profile.user is assumed. key add NAME --source=ssh adds the default key in ~/.ssh/id_rsa.pub key add NAME --source=FILENAME adds the key specified by the filename with the given name key add NAME --git --username=username adds a named github key from a user with the given github username. key set adds the ~/.ssh/id_rsa.pub key with the name specified in cloudmesh.profile.user. It also sets the variable key to that user. Once the keys are uploaded to github, they can be listed To list these keys the following list functions are provided. key list --source=git [--username=USERNAME] lists all keys in git for the specified user. If the name is not specified it is read from cloudmesh.yaml key list --source=ssh [--dir=DIR] [--output=OUTPUT] lists all keys in the directory. If the directory is not specified the default will be ~/.ssh key list NAMES lists all keys in the named virtual machines. List command can use the [--output=OUTPUT] option list the keys loaded to cloudmesh in the given format: json, yaml, table. table is default. The NAME can be specified and if omitted the name cloudmesh.profile.user is assumed. To get keys from the cloudmesh database the following commands are available: key delete NAMES deletes the Named keys. This may also have an impact on groups key rename NAME NEW renames the key from NAME to NEW in the cloudmesh database. Group management of keys is an important concept in cloudmesh, allowing multiple users to be added to virtual machines while managing the keys associated with them. The keys must be uploaded to cloudmesh database with a name so they can be used in a group. The --dryrun option executes the command without uploading the information to the clouds. If no group name is specified the group name default is assumed. If no cloudnamesh are specified, all active clouds are assumed. active clouds can be set in the cloudmesh.yaml file. key group delete [GROUPNAMES] [NAMES] [--dryrun] deletes the named keys from the named groups. key group list [GROUPNAMES] [--output=OUTPUT] list the key names and details in the group. key group upload [GROUPNAMES] [CLOUDS] [--dryrun] uploads the named groups to the specified clouds. In some cases you may want to store the public keys in files. For this reason we support the following commands. key group add --group=GROUPNAME --file=FILENAME the command adds the keys to the given group. The keys are written in the files in yaml format. key group export --group=GROUNAMES --filename=FILENAME the command exports the keys to the given group. The keys are written in the files in yaml format. The yaml format is as follows: cloudmesh: keys: NAMEOFKEY: name: NAMEOFKEY key: ssh-rsa AAAA..... comment group: - GROUPNAME ... If a key is included in multiple groups they will be added to the grouplist of the key """ def print_keys(keys): print( Printer.write( keys, sort_keys=["name"], order=["name", "type", "fingerprint", "comment"], header=["Name", "Type", "Fingerprint", "Comment"], output=arguments.output)) map_parameters(arguments, 'cloud', 'dir', 'dryrun', 'filename', 'name', 'nopass', 'output', 'pub', 'pwd', 'set_path', 'source') variables = Variables() if arguments.list and arguments.source == "git": config = Config() username = config["cloudmesh.profile.github"] keys = SSHkey().get_from_git(username) print_keys(keys) return "" elif arguments.list and arguments.source == "ssh": # this is much simpler sshkey = SSHkey() print_keys([sshkey]) return "" elif arguments.list and arguments.cloud: clouds = Parameter.expand(arguments.cloud) if len(clouds) == 0: variables = Variables() cloudname = variables['cloud'] clouds = [cloudname] keys = [] for cloud in clouds: print(f"cloud {cloud}") provider = Provider(name=cloud) keys = provider.keys() provider.Print(keys, output=arguments.output, kind="key") return "" elif arguments.list: cloud = "local" db = CmDatabase() keys = db.find(collection=f"{cloud}-key") print_keys(keys) return "" elif arguments.add: """ key add [NAME] [--source=FILENAME] # NOT IMPLEMENTED YET key add [NAME] [--source=git] key add [NAME] [--source=ssh] """ key = Key() if arguments["--source"] == "ssh": name = arguments.NAME or "ssh" key.add(name, "ssh") elif arguments["--source"] == "git": name = arguments.NAME or "git" key.add("git", "git") else: config = Config() name = config["cloudmesh.profile.user"] kind = "ssh" key.add(name, kind) elif arguments.init: """ key init """ config = Config() username = config["cloudmesh.profile.user"] if username == "TBD": Console.error( "Please set cloudmesh.profile.user in ~/.cloudmesh.yaml") u = os.environ["USER"].lower().replace(" ", "") Console.msg( f"To change it you can use the command. Define a NAME such as '{u}' e.g." ) Console.msg("") Console.msg(f" cms config set cloudmesh.profile.user={u}") Console.msg("") return "" key = Key() key.add(username, "ssh") variables['key'] = username elif arguments.upload: """ key upload [NAMES] [--cloud=CLOUDS] [--dryrun] key upload [NAMES] [VMS] [--dryrun] """ names = Parameter.expand(arguments.NAMES) # this may have a bug if NAMES is ommitted # # Step 0. Set keyname to variable # if names is None or len(names) == 0: config = Config() username = config["cloudmesh.profile.user"] names = [username] if len(names) == 1: name = names[0] variables = Variables() if "key" in variables: old = variables["key"] if old != name: Console.msg( f"Changing defualt key from {old} to {name}") variables["key"] = name # # Step 1. keys = find keys to upload # cloud = "local" db = CmDatabase() db_keys = db.find(collection=f"{cloud}-key") keys = [] for key in db_keys: if key["name"] in names: keys.append(key) if len(keys) == 0: Console.error( f"No keys with the names {names} found in cloudmesh. \n" " Use the command 'key add' to add the key.") # # Step 2. iterate over the clouds to upload # clouds, names = Arguments.get_cloud_and_names( "list", arguments, variables) for cloud in clouds: print(f"cloud {cloud}") provider = Provider(name=cloud) for key in db_keys: name = key['name'] if name in names: try: r = provider.key_upload(key) Console.ok(f"upload key '{name} successful'. ") except ValueError as e: Console.error( f"key '{name} already exists in {cloud}.") return "" elif arguments.delete and arguments.cloud and arguments.NAMES: # key delete NAMES --cloud=CLOUDS [--dryrun] names = Parameter.expand(arguments.NAMES) clouds = Parameter.expand(arguments.cloud) for cloud in clouds: provider = Provider(name=cloud) for name in names: if arguments.dryrun: Console.ok(f"Dryrun: delete {name} in {cloud}") else: images = provider.key_delete(name) return "" elif arguments.gen: """ key gen (rsa | ssh) [--filename=FILENAME] [--nopass] [--set_path] Generate an RSA key pair with pem or ssh encoding for the public key. The private key is always encoded as a PEM file. """ config = Config() # Check if password will be requested ap = not arguments.nopass if not ap: Console.warning("Private key will NOT have a password") cnt = yn_choice(message="Continue, despite risk?", default="N") if not cnt: sys.exit() # Discern the name of the public and private keys rk_path = None uk_path = None if arguments.filename: if arguments.filename[-4:] == ".pub": rk_path = path_expand(arguments.name[-4:]) uk_path = path_expand(arguments.name) elif arguments.filename[-5:] == ".priv": rk_path = path_expand(arguments.name) uk_path = path_expand(arguments.name[-5:]) else: rk_path = path_expand(arguments.filename) uk_path = rk_path + ".pub" else: rk_path = path_expand(config['cloudmesh.security.privatekey']) uk_path = path_expand(config['cloudmesh.security.publickey']) # Set the path if requested if arguments.set_path and arguments.filename: config['cloudmesh.security.privatekey'] = rk_path config['cloudmesh.security.publickey'] = uk_path config.save() Console.msg(f"\nPrivate key: {rk_path}") Console.msg(f"Public key: {uk_path}\n") # Generate the Private and Public keys kh = KeyHandler() r = kh.new_rsa_key() u = kh.get_pub_key(priv=r) # Serialize and write the private key to the path sr = kh.serialize_key(key=r, key_type="PRIV", encoding="PEM", format="PKCS8", ask_pass=ap) kh.write_key(key=sr, path=rk_path) # Determine the public key format and encoding enc = None forma = None if arguments.ssh: enc = "SSH" forma = "SSH" elif arguments.rsa: enc = "PEM" forma = "SubjectInfo" # Serialize and write the public key to the path su = kh.serialize_key(key=u, key_type="PUB", encoding=enc, format=forma, ask_pass=False) kh.write_key(key=su, path=uk_path) Console.ok("Success") elif arguments.verify: """ key verify (ssh | pem) --filename=FILENAME --pub Verifies the encoding (pem or ssh) of the key (private or public) """ kh = KeyHandler() fp = arguments.filename kt = None enc = None # Discern key type if arguments.pub: kt = "public" # Discern public key encoding if arguments.ssh: enc, e = "OpenSSH", "SSH" elif arguments.pem: #PEM encoding enc = e = "PEM" # Load the public key, if no error occurs formatting is correct u = kh.load_key(path=fp, key_type="PUB", encoding=e, ask_pass=False) else: kt, enc = "private", "PEM" # Load the private key to verify the formatting and password of # the key file. If no error occurs the format and pwd are correct r = kh.load_key(path=fp, key_type="PRIV", encoding=enc, ask_pass=True) m = f"Success the {kt} key {fp} has proper {enc} format" Console.ok(m) elif arguments.delete and arguments.NAMES: # key delete NAMES [--dryrun] names = Parameter.expand(arguments.NAMES) cloud = "local" db = CmDatabase() db_keys = db.find(collection=f"{cloud}-key") error = [] for key in db_keys: name = key['name'] if name in names: if arguments.dryrun: Console.ok(f"Dryrun: delete {name}") else: db.delete(collection="local-key", name=name) Console.ok(f"delete {name}") return "" elif arguments.group: raise NotImplementedError return ""
def do_key(self, args, arguments): """ :: Usage: key -h | --help key list --cloud=CLOUDS [--output=OUTPUT] key list --source=ssh [--dir=DIR] [--output=OUTPUT] key list --source=git [--output=OUTPUT] [--username=USERNAME] key list [--output=OUTPUT] key init key add NAME --filename=FILENAME [--output=OUTPUT] key add [NAME] [--source=FILENAME] key add [NAME] [--source=git] key add [NAME] [--source=ssh] key delete NAMES [--cloud=CLOUDS] [--dryrun] key upload [NAMES] [--cloud=CLOUDS] [--dryrun] key upload [NAMES] [VMS] [--dryrun] key group upload [NAMES] [--group=GROUPNAMES] [--cloud=CLOUDS] [--dryrun] key group add [NAMES] [--group=GROUPNAMES] [--cloud=CLOUDS] [--dryrun] key group delete [--group=GROUPNAMES] [NAMES] [--dryrun] key group list [--group=GROUPNAMES] [--output=OUTPUT] key group export --group=GROUNAMES --filename=FILENAME key gen (ssh | pem) [--filename=FILENAME] [--nopass] [--set_path] [--force] key reformat (ssh | pem) [--filename=FILENAME] [--format=FORMAT] [--nopass] [--pub] key verify (ssh | pem) [--filename=FILENAME] [--pub] [--check_pass] Arguments: VMS Parameterized list of virtual machines CLOUDS The clouds NAME The name of the key. SOURCE db, ssh, all OUTPUT The format of the output (table, json, yaml) FILENAME The filename with full path in which the key is located FORMAT Desired key format (SubjectInfo, SSH, OpenSSL, PKCS8) Options: --dir=DIR the directory with keys [default: ~/.ssh] --check_pass Flag where program query user for password --filename=FILENAME the name and full path to the file --nopass Flag indicating if the key has no password --output=OUTPUT the format of the output [default: table] --pub Indicates that the public key is passed in --set_path Sets the cloudmesh encryption key path to the full path of the generated keys --source=SOURCE the source for the keys --username=USERNAME the source for the keys [default: none] Description: Please note that some values are read from the cloudmesh.yaml file. One such value is cloudmesh.profile.user Management of public keys is an essential component of accessing virtual machines in the cloud. There are a number of sources where you can find public keys. This includes the ~/.ssh directory and for example github. If you do not already have a public-private key pair they can be generated using cloudmesh key gen ssh This will create the public-private keypair of ~/.ssh/id_rsa and ~/.ssh/id_rsa.pub in OpenSSH format key gen pem This will create the public-private keypair of ~/.ssh/id_rsa and ~/.ssh/id_rsa.pub in PEM format key gen (ssh | pem) --filename=~/.cloudmesh/foobar This will generate the public-private key pair of ~/.cloudmesh/foobar and ~/.cloudmesh/foobar.pub key gen (ssh | pem) --filename=~/.cloudmesh/foobar --set_path This will generate the keys as stated above, but it will also set cloudmesh to use these keys for encryption. Keys can also be verified for their formatting and passwords. By default cloudmesh checks ~/.ssh/id_rsa and ~/.ssh/id_rsa.pub If the key is password protected the formatting can only be verified if the password is provided (--check_pass argument) key verify pem Verifies that ~/.ssh/id_rsa has PEM format key verify ssh --pub Verifies that ~/.ssh/id_rsa.pub has OpenSSH format key verify pem --filename=~/.cloudmesh/foobar Verifies if the private key located at ~/.cloudmesh/foobar is password protected key verify pem --filenam=~/.cloudmesh/foobar --check_pass Request the password to the file, then checks if the key is in proper PEM format You may find the need to keep the values of your keys but different encodings or formats. These aspects of your key can also be changed using cloudmesh. key reformat pem Will reformat the ~/.id_rsa.pub key from PEM to OpenSSH key reformat ssh Will reformat the ~/.id_rsa.pub key from OpenSSH to PEM key reformat --filename=~/.id_rsa --format=PKCS8 Will reformat the private key to PKCS8 format Keys will be uploaded into cloudmesh database with the add command under the given NAME. If the name is not specified the name cloudmesh.profile.user is assumed. key add NAME --source=ssh adds the default key in ~/.ssh/id_rsa.pub key add NAME --source=FILENAME adds the key specified by the filename with the given name key add NAME --git --username=username adds a named github key from a user with the given github username. key set adds the ~/.ssh/id_rsa.pub key with the name specified in cloudmesh.profile.user. It also sets the variable key to that user. Once the keys are uploaded to github, they can be listed To list these keys the following list functions are provided. key list --source=git [--username=USERNAME] lists all keys in git for the specified user. If the name is not specified it is read from cloudmesh.yaml key list --source=ssh [--dir=DIR] [--output=OUTPUT] lists all keys in the directory. If the directory is not specified the default will be ~/.ssh key list NAMES lists all keys in the named virtual machines. List command can use the [--output=OUTPUT] option list the keys loaded to cloudmesh in the given format: json, yaml, table. table is default. The NAME can be specified and if omitted the name cloudmesh.profile.user is assumed. To get keys from the cloudmesh database the following commands are available: key delete NAMES deletes the Named keys. This may also have an impact on groups key rename NAME NEW renames the key from NAME to NEW in the cloudmesh database. Group management of keys is an important concept in cloudmesh, allowing multiple users to be added to virtual machines while managing the keys associated with them. The keys must be uploaded to cloudmesh database with a name so they can be used in a group. The --dryrun option executes the command without uploading the information to the clouds. If no group name is specified the group name default is assumed. If no cloudnamesh are specified, all active clouds are assumed. active clouds can be set in the cloudmesh.yaml file. key group delete [GROUPNAMES] [NAMES] [--dryrun] deletes the named keys from the named groups. key group list [GROUPNAMES] [--output=OUTPUT] list the key names and details in the group. key group upload [GROUPNAMES] [CLOUDS] [--dryrun] uploads the named groups to the specified clouds. In some cases you may want to store the public keys in files. For this reason we support the following commands. key group add --group=GROUPNAME --file=FILENAME the command adds the keys to the given group. The keys are written in the files in yaml format. key group export --group=GROUNAMES --filename=FILENAME the command exports the keys to the given group. The keys are written in the files in yaml format. The yaml format is as follows: cloudmesh: keys: NAMEOFKEY: name: NAMEOFKEY key: ssh-rsa AAAA..... comment group: - GROUPNAME ... If a key is included in multiple groups they will be added to the grouplist of the key """ def print_keys(keys): print(Printer.write( keys, sort_keys=["name"], order=["name", "type", "fingerprint", "comment"], header=["Name", "Type", "Fingerprint", "Comment"], output=arguments.output) ) map_parameters(arguments, 'check_pass', 'cloud', 'dir', 'dryrun', 'filename', 'force', 'format', 'name', 'nopass', 'output', 'pub', 'pwd', 'set_path', 'source') variables = Variables() if arguments.list and arguments.source == "git": config = Config() username = config["cloudmesh.profile.github"] keys = SSHkey().get_from_git(username) print_keys(keys) return "" elif arguments.list and arguments.source == "ssh": # this is much simpler sshkey = SSHkey() print_keys([sshkey]) return "" elif arguments.list and arguments.cloud: clouds = Parameter.expand(arguments.cloud) if len(clouds) == 0: variables = Variables() cloudname = variables['cloud'] clouds = [cloudname] keys = [] for cloud in clouds: print(f"cloud {cloud}") provider = Provider(name=cloud) keys = provider.keys() provider.Print(keys, output=arguments.output, kind="key") return "" elif arguments.list: cloud = "local" db = CmDatabase() keys = db.find(collection=f"{cloud}-key") print_keys(keys) return "" elif arguments.add: """ key add [NAME] [--source=FILENAME] # NOT IMPLEMENTED YET key add [NAME] [--source=git] key add [NAME] [--source=ssh] """ key = Key() if arguments["--source"] == "ssh": name = arguments.NAME or "ssh" key.add(name, "ssh") elif arguments["--source"] == "git": name = arguments.NAME or "git" key.add("git", "git") else: config = Config() name = config["cloudmesh.profile.user"] kind = "ssh" key.add(name, kind) elif arguments.init: """ key init """ config = Config() username = config["cloudmesh.profile.user"] if username == "TBD": Console.error( "Please set cloudmesh.profile.user in ~/.cloudmesh.yaml") u = os.environ["USER"].lower().replace(" ", "") Console.msg( f"To change it you can use the command. Define a NAME such as '{u}' e.g.") Console.msg("") Console.msg(f" cms config set cloudmesh.profile.user={u}") Console.msg("") return "" key = Key() key.add(username, "ssh") variables['key'] = username elif arguments.upload: """ key upload [NAMES] [--cloud=CLOUDS] [--dryrun] key upload [NAMES] [VMS] [--dryrun] """ names = Parameter.expand(arguments.NAMES) # this may have a bug if NAMES is ommitted # # Step 0. Set keyname to variable # if names is None or len(names) == 0: config = Config() username = config["cloudmesh.profile.user"] names = [username] if len(names) == 1: name = names[0] variables = Variables() if "key" in variables: old = variables["key"] if old != name: Console.msg( f"Changing default key from {old} to {name}") variables["key"] = name # # Step 1. keys = find keys to upload # cloud = "local" db = CmDatabase() db_keys = db.find(collection=f"{cloud}-key") keys = [] for key in db_keys: if key["name"] in names: keys.append(key) if len(keys) == 0: Console.error( f"No keys with the names {names} found in cloudmesh. \n" " Use the command 'key add' to add the key.") # # Step 2. iterate over the clouds to upload # clouds, vmnames = Arguments.get_cloud_and_names("list", arguments, variables) for cloud in clouds: print(f"cloud {cloud}") provider = Provider(name=cloud) for key in db_keys: name = key['name'] if name in names: try: r = provider.key_upload(key) Console.ok(f"upload key '{name} successful'. ") except ValueError as e: Console.error( f"key '{name} already exists in {cloud}.") return "" elif arguments.delete and arguments.cloud and arguments.NAMES: # key delete NAMES --cloud=CLOUDS [--dryrun] names = Parameter.expand(arguments.NAMES) clouds = Parameter.expand(arguments.cloud) for cloud in clouds: provider = Provider(name=cloud) for name in names: if arguments.dryrun: Console.ok(f"Dryrun: delete {name} in {cloud}") else: images = provider.key_delete(name) return "" elif arguments.group: raise NotImplementedError return ""
from flask import jsonify import connexion import gridfs # http://0.0.0.0:8080/cloudmesh_ai/ui from cloudmesh.mongo.CmDatabase import CmDatabase cmdb = CmDatabase() #db = cmdb.client["ai_services"] #data = db["files"] #data.insert_one({"AI Services": "Logistic regression and Image Classification"}) # Create the application instance app = connexion.App(__name__, specification_dir="/") app.add_api("api.yaml") # create a URL route in our application for "/" @app.route("/") def home(): msg = {"msg": "It's working!"} return jsonify(msg) if __name__ == "__main__": app.run(host="127.0.0.1", port=8080, debug=True)
############################################################### # from cloudmesh.mongo import DatabaseUpdate from pprint import pprint import pytest from cloudmesh.common.Printer import Printer from cloudmesh.common.util import HEADING from cloudmesh.configuration.Config import Config from cloudmesh.management.configuration.name import Name from cloudmesh.mongo.CmDatabase import CmDatabase from cloudmesh.common3.Benchmark import Benchmark from cloudmesh.mongo.DataBaseDecorator import DatabaseUpdate Benchmark.debug() database = CmDatabase() user = Config()["cloudmesh.profile.user"] name_generator = Name(schema=f"{user}-vm", counter=1) # # we need to set a cm = { kind, cloud, name } to use teh new DatabaseUpdate() # @pytest.mark.incremental class TestMongo: def test_find_in_collection(self): HEADING() r = database.find_name("CC-CentOS7")
def clean(self): cm = CmDatabase() cm.delete(collection=f"{self.name}-storage")
def do_flavor(self, args, arguments): """ :: Usage: flavor list [NAMES] [--cloud=CLOUD] [--refresh] [--output=OUTPUT] [--query=QUERY] Options: --output=OUTPUT the output format [default: table] --cloud=CLOUD the ycloud name --refresh refreshes the data before displaying it Description: This lists out the flavors present for a cloud Examples: cm flavor list --refresh cm flavor list cm flavor list --output=csv cm flavor list 58c9552c-8d93-42c0-9dea-5f48d90a3188 --refresh please remember that a uuid or the flavor name can be used to identify a flavor. cms flavor list --refresh --query=\'{\"a\": \"b\"}\' OpenStack Query Example: cms flavor list --refresh --query=\'{\"minDisk\": \"80\"}\' cms flavor list --refresh --query=\'{\"name\": \"m1.large\"}\' supported query parameters for OpenStack: min_disk min_ram name """ map_parameters(arguments, "query", "refresh", "cloud", "output") variables = Variables() arguments.output = Parameter.find("output", arguments, variables, "table") arguments.refresh = Parameter.find_bool("refresh", arguments, variables) if arguments.list and arguments.refresh: names = [] clouds, names = Arguments.get_cloud_and_names( "list", arguments, variables) for cloud in clouds: print(f"cloud {cloud} query={arguments.query}") provider = Provider(name=cloud) if arguments.query is not None: query = eval(arguments.query) flavors = provider.flavors(**query) else: flavors = provider.flavors() provider.Print(flavors, output=arguments.output, kind="flavor") return "" elif arguments.list: names = [] clouds, names = Arguments.get_cloud_and_names( "list", arguments, variables) try: for cloud in clouds: if arguments.output in ["table"]: print(f"List {cloud}") provider = Provider(name=cloud) db = CmDatabase() flavors = db.find(collection=f"{cloud}-flavor") provider.Print(flavors, output=arguments.output, kind="flavor") except Exception as e: VERBOSE(e) return ""
def do_keys(self, args, arguments): """ :: Usage: keys -h | --help keys list --cloud=CLOUDS [--format=FORMAT] keys list --source=ssh [--dir=DIR] [--format=FORMAT] keys list --source=git [--format=FORMAT] [--username=USERNAME] keys list [NAMES] [--format=FORMAT] [--source =db] keys load --filename=FILENAME [--format=FORMAT] keys add [NAME] [--source=FILENAME] keys add [NAME] [--source=git] keys add [NAME] [--source=ssh] keys get NAME [--format=FORMAT] keys default --select keys delete (NAMES | --select | --all) [--dryrun] keys delete NAMES --cloud=CLOUDS [--dryrun] keys upload [NAMES] [--cloud=CLOUDS] [--dryrun] keys upload [NAMES] [VMS] [--dryrun] keys group upload [--group=GROUPNAMES] [--cloud=CLOUDS] [--dryrun] keys group add [--group=GROUPNAMES] [--cloud=CLOUDS] [--dryrun] keys group add --file=FILENAME keys group delete [--group=GROUPNAMES] [NAMES] [--dryrun] keys group list [--group=GROUPNAMES] [--format=FORMAT] keys group export --group=GROUNAMES --filename=FILENAME Arguments: VMS Para,eterized list of virtual machines CLOUDS The clouds NAME The name of the key. SOURCE db, ssh, all KEYNAME The name of a key. For key upload it defaults to the default key name. FORMAT The format of the output (table, json, yaml) FILENAME The filename with full path in which the key is located Options: --dir=DIR the directory with keys [default: ~/.ssh] --format=FORMAT the format of the output [default: table] --source=SOURCE the source for the keys [default: cm] --username=USERNAME the source for the keys [default: none] --name=KEYNAME The name of a key Description: Please note that some values are read from the cloudmesh.yaml file. One such value is cloudmesh.profile.user Manages public keys is an essential component of accessing virtual machine sin the cloud. There are a number of sources where you can find public keys. This includes teh ~/.ssh directory and for example github. To list these keys the following list functions are provided. keys list --source=git [--username=USERNAME] lists all keys in git for the specified user. If the name is not specified it is read from cloudmesh.yaml keys list --source=ssh [--dir=DIR] [--format=FORMAT] lists all keys in the directory. If the directory is not specified the default will be ~/.ssh keys list NAMES lists all keys in the named virtual machines. The keys will be uploaded into cloudmesh with the add command under the given name. If the name is not specified the name cloudmesh.profile.user is assumed. keys add --ssh adds the default key in ~/.ssh/id_rsa.pub keys add NAME --source=FILENAME adds the key specifid by the filename with the given name keys add NAME --git --username=username adds a named github key from a user with the given github username. Once the keys are uploaded to github, they can be listed keys list [NAME] [--format=FORMAT] list the keys loaded to cloudmesh in the giiven format: json, yaml, table. table is default. The NAME cabn be specified and if ommitted the name cloudmesh.profile.user is assumed. keys get NAME Retrieves the key indicated by the NAME parameter from cloudmesh and prints its details. keys default --select Select the default key interactively keys delete NAMES deletes the keys. This may also have an impact on groups keys rename NAME NEW renames the key from NAME to NEW. Group management of keys is an important concept in cloudmesh, allowing multiple users to be added to virtual machines. The keys must be uploaded to cloudmesh with a name so they can be used in a group. The --dryrun option executes the command without uploading the information to the clouds. If no groupname is specified the groupname default is assumed. If no cloudnames are specified, all active clouds are assumed. active clouds can be set in the cloudmesh.yaml file. keys group delete [GROUPNAMES] [NAMES] [--dryrun] deletes the named keys from the named groups. keys group list [GROUPNAMES] [--format=FORMAT] list the key names and details in the group. keys group upload [GROUPNAMES] [CLOUDS] [--dryrun] uploads the named groups to the specified clouds. In some cases you may want to store the public keys in files. For this reason we support the following commands. keys group add --group=GROUPNAME --file=FILENAME the command adds the keys to the given group. The keys are written in the files in yaml format. keys group export --group=GROUNAMES --filename=FILENAME the command exports the keys to the given group. The keys are written in the files in yaml format. The yaml format is as follows: cloudmesh: keys: NAMEOFKEY: name: NAMEOFKEY key: ssh-rsa AAAA..... comment group: - GROUPNAME ... If a key is included in multiple groups they will be added to the grouplist of the key """ arguments.cloud = arguments['--cloud'] arguments.format = arguments['--format'] arguments.source = arguments['--source'] arguments.dir = arguments['--dir'] arguments.NAMES = arguments['NAMES'] pprint(arguments) invalid_names = ['tbd', 'none', "", 'id_rsa'] m = Manager() if arguments.list and arguments.source == "git": # this is much simpler config = Config() username = config["cloudmesh.profile.github"] print("Username:"******"name"], order=["id", "name", "fingerprint", "source"], header=["Id", "Name", "Fingerprint", "Source"])) return "" elif arguments.list and arguments.source == "ssh": # this is much simpler sshkey = SSHkey() print( Printer.flatwrite( [sshkey], sort_keys=["name"], order=["name", "type", "fingerprint", "comment"], header=["Name", "Type", "Fingerprint", "Comment"])) return "" elif arguments.list and arguments.cloud: clouds = Parameter.expand(arguments.cloud) print(clouds) if len(clouds) == 0: variables = Variables() cloudname = variables['cloud'] clouds = [cloudname] cloudkey = [] for cloud in clouds: print(cloud) provider = Provider(clouds) cloudkey.append(provider.keys()) print( Printer.flatwrite( [cloudkey], sort_keys=["name"], order=["name", "type", "fingerprint", "comment"], header=["Name", "Type", "Fingerprint", "Comment"])) return "" elif arguments.list and arguments.source == "db": if arguments.NAMES: names = Parameter.expand(arguments.NAMES) print(names) records = [] for name in names: kwrags = {"name": name} database = CmDatabase() col = database.db["cloudmesh"] entries = col.find(kwrags, {"_id": 0}) for entry in entries: records.append(entry) print(records) print( Printer.flatwrite( records, sort_keys=["name"], order=["name", "type", "fingerprint", "comment"], header=["Name", "Type", "Fingerprint", "Comment"])) print("find the keys of the following vms", names) print("the keys will be read from mongo") return "" return ""
############################################################### # pytest -v --capture=no tests/test_cm_names_find.py # pytest -v tests/test_cm_names_find.py ############################################################### from pprint import pprint import pytest from cloudmesh.common.util import HEADING from cloudmesh.common.variables import Variables from cloudmesh.mongo.CmDatabase import CmDatabase from cloudmesh.common3.Benchmark import Benchmark Benchmark.debug() cm = CmDatabase() variables = Variables() assert variables['cloud'] is not None cloud = variables['cloud'] if 'benchmark_print' in variables: benchmark_print = variables['benchmark_print'] else: benchmark_print = False benchmark_print = False @pytest.mark.incremental class Test_cm_find:
def do_ip(self, args, arguments): """ :: Usage: ip list [--cloud=CLOUD] [--output=OUTPUT] ip create [N] [--cloud=CLOUD] ip delete [IP] [--cloud=CLOUD] ip attach [NAME] [IP] ip detach [NAME] [IP] Options: -h help message --cloud=CLOUD Name of the cloud --output=OUTPUT The output format [default: table] Arguments: N Number of IPS to create IP IP Address NAME Name of the service Description: ip list floating [--cloud=CLOUD] [--output=OUTPUT] returns a list of all the floating IPS in the cloud ip add floating [--cloud=CLOUD] adds a floating ip to the pool of available floating ips ip delete floating [IP] [--cloud=CLOUD] deletes a floating ip to the pool of available floating ips ip add NAME [IP] add the ip to the named vm ip delete NAME [IP] deletes the ip from the vm """ def get_ip(ip): if ip is None: # find a free one try: ip = provider.find_available_public_ip() return ip except Exception as e: Console.error("No free floating ip found") return "" map_parameters(arguments, "cloud", "output") arguments.vm = arguments.NAME variables = Variables() if arguments.list: cloud = Parameter.find("cloud", arguments, variables) print(f"cloud {cloud}") provider = Provider(name=cloud) ips = provider.list_public_ips() provider.Print(ips, output=arguments.output, kind="ip") elif arguments.create: cloud = Parameter.find("cloud", arguments, variables) n = arguments.N or 1 print(f"cloud {cloud}") provider = Provider(name=cloud) for i in range(0, int(n)): ips = provider.create_public_ip() ips = provider.list_public_ips() provider.Print(ips, output=arguments.output, kind="ip") elif arguments.delete: cloud = Parameter.find("cloud", arguments, variables) print(f"cloud {cloud}") provider = Provider(name=cloud) ip = arguments.IP ip = get_ip(arguments.IP) ips = provider.delete_public_ip(ip) ips = provider.list_public_ips() provider.Print(ips, output=arguments.output, kind="ip") elif arguments.attach: name = Parameter.find("vm", arguments, variables) cm = CmDatabase() vm = cm.find_name(name, kind="vm")[0] cloud = vm["cm"]["cloud"] print(f"cloud {cloud}") provider = Provider(name=cloud) ip = get_ip(arguments.IP) try: ips = provider.attach_public_ip(name=name, ip=ip) except Exception as e: print(e) Console.error("Could not assign public ip.") elif arguments.detach: name = Parameter.find("vm", arguments, variables) cm = CmDatabase() vm = cm.find_name(name, kind="vm")[0] cloud = vm["cm"]["cloud"] print(f"cloud {cloud}") provider = Provider(name=cloud) ip = provider.get_public_ip(name=name) print(name, ip) try: ips = provider.detach_public_ip(name=name, ip=ip) except Exception as e: print(e) Console.error("can not detach ip")