def test_001(self): HEADING("assign key=value") v = Variables() n = len(v) v["gregor"] = "gregor" assert (len(v) == n + 1) assert "gregor" in v v.close()
def test_004(self): HEADING("directory and key subtract ") d = {"a": "1", "b": "2"} v = Variables() v + d print(v) assert "a" in v and "b" in v v - d.keys() assert "a" not in v and "b" not in v print(v) v.close()
def postcmd(self, stop, line): StopWatch.stop("command") try: variable = Variables() if "timer" not in variable: variable["timer"] = "off" if variable["timer"].lower() in ['on', 'true']: print("Timer: {:.4f}s ({})".format(StopWatch.get("command"), line.strip())) variable.close() except Exception as e: Error.traceback(error=e) return stop
def setup(self): print() self.user = Config()["cloudmesh"]["profile"]["user"] self.clouduser = '******' self.name_generator = Name(experiment="exp", group="grp", user=self.user, kind="vm", counter=1) self.name = str(self.name_generator) self.name_generator.incr() self.new_name = str(self.name_generator) variables = Variables() clouds = Parameter.expand(variables['cloud']) cloud = clouds[0] self.p = Provider(name=cloud) self.secgroupname = "CM4TestSecGroup" self.secgrouprule = { "ip_protocol": "tcp", "from_port": 8080, "to_port": 8088, "ip_range": "129.79.0.0/16" } self.testnode = None print("\n")
def setup(self): variables = Variables() self.service = Parameter.expand(variables['storage'])[0] self.p = Provider(service=self.service) self.sourcedir = os.path.expanduser( "~/Documents/cloudmesh/storage/test") print()
def get(self, name=None): connection = None if name is None: variables = Variables() # noinspection PyUnusedLocal cloudname = variables['cloud'] kind = self.config.get( "cloudmesh.cloud.{name}.cm.kind".format(name=name)) credentials = self.config.get( "cloudmesh.cloud.{name}.credentials".format(name=name)) # BUG FROM HERE ON WRONG if kind == 'azure': AZDriver = get_driver(Provider.AZURE) connection = AZDriver( subscription_id=credentials['AZURE_SUBSCRIPTION_ID'], key_file=credentials['AZURE_MANAGEMENT_CERT_PATH']) elif kind == 'aws': EC2Driver = get_driver(Provider.EC2) connection = EC2Driver(credentials['EC2_ACCESS_ID'], credentials['EC2_SECRET_KEY']) return connection
def setup(self): banner("setup", c="-") self.user = Config()["cloudmesh"]["profile"]["user"] self.clouduser = '******' self.name_generator = Name( experiment="exp", group="grp", user=self.user, kind="vm", counter=1) self.name = str(self.name_generator) self.name_generator.incr() self.new_name = str(self.name_generator) variables = Variables() # this gives the current default cloud cloud = variables['cloud'] # specify the cloud name to make sure this test # is done for the openstack cloud # self.p = Provider(name="chameleon") self.secgroupname = "CM4TestSecGroup" self.secgrouprule = {"ip_protocol": "tcp", "from_port": 8080, "to_port": 8088, "ip_range": "129.79.0.0/16"} self.testnode = None
def test_003(self): HEADING("directory add ") d = {"a": "1", "b": "2"} v = Variables() v + d print(v) assert "a" in v and "b" in v del v["a"] del v["b"] v + d assert "a" in v and "b" in v v - d assert "a" not in v and "b" not in v print(v) v.close()
def replace_vars(self, line): # self.update_time() variable = Variables() newline = line if len(variable) is not None: for name in variable.data: value = str(variable[name]) newline = newline.replace("$" + name, value) newline = newline.replace("var." + name, value) for v in os.environ: name = v.replace('os.', '') if name in newline: value = os.environ[name] newline = newline.replace("os." + v, value) default = Default() if default is not None: for v in default.data: name = "default." + v.replace(",", ".") value = default.data[v] if name in newline: newline = newline.replace(name, value) # replace if global is missing global_default = default["global"] if global_default is not None: for v in global_default: name = "default." + v value = global_default[v] if name in newline: newline = newline.replace(name, value) default.close() variable.close() newline = path_expand(newline) return line, newline
def __init__(self, config_path='~/.cloudmesh/cloudmesh4.yaml', encrypted=False): """ Initialize the Config class. :param config_path: A local file path to cloudmesh yaml config with a root element `cloudmesh`. Default: `~/.cloudmesh/cloudmesh4.yaml` """ self.__dict__ = self.__shared_state if "data" not in self.__dict__: # VERBOSE("Load config") self.config_path = Path(path_expand(config_path)).resolve() self.config_folder = dirname(self.config_path) self.create(config_path=config_path) with open(self.config_path, "r") as stream: content = stream.read() content = path_expand(content) content = self.spec_replace(content) self.data = yaml.load(content, Loader=yaml.SafeLoader) # self.data is loaded as nested OrderedDict, can not use set or get # methods directly if self.data is None: raise EnvironmentError( "Failed to load configuration file cloudmesh4.yaml, " "please check the path and file locally") # # populate default variables # self.variable_database = Variables(filename="~/.cloudmesh/var-data") self.set_debug_defaults() default = self.default() for name in self.default(): if name not in self.variable_database: self.variable_database[name] = default[name] if "cloud" in default: self.cloud = default["cloud"] else: self.cloud = None
def new(instance, args): # instance.new.__doc__ = doc # noinspection PyUnusedLocal try: argv = shlex.split(args) # pprint(argv) arguments = dotdict(docopt(doc, help=True, argv=argv)) # pprint(arguments) verbose = int(Variables()["verbose"] or 0) if verbose > 9: s = pformat(arguments) banner(s, label="Arguments", color="BLUE") func(instance, args, arguments) except SystemExit as e: if args not in ('-h', '--help'): if "::" in doc: usage = textwrap.dedent(doc.split("::")[1]) else: usage = doc usage = textwrap.dedent(doc.split("Usage:")[1]) print() print("Usage:") for line in usage.split("\n"): if ":" in line: kind = line.split(":")[0] if kind in [ "Arguments", "Options", "Example", "Descriptiom" ]: break print(line) Console.error( "Could not execute the command. Please check usage with") print() Console.msg(" cms help", name.replace("do_", "")) print()
def test_002(self): HEADING("delete") v = Variables() del v["gregor"] assert "gregor" not in v v.close()
def do_aws(self, args, arguments): """ :: Usage: vm ping [NAMES] [--cloud=CLOUDS] [--count=N] [--processors=PROCESSORS] vm check [NAMES] [--cloud=CLOUDS] [--username=USERNAME] [--processors=PROCESSORS] vm status [NAMES] [--cloud=CLOUDS] vm console [NAME] [--force] vm start [NAMES] [--cloud=CLOUD] [--parallel] [--processors=PROCESSORS] [--dryrun] vm stop [NAMES] [--cloud=CLOUD] [--parallel] [--processors=PROCESSORS] [--dryrun] vm terminate [NAMES] [--cloud=CLOUD] [--parallel] [--processors=PROCESSORS] [--dryrun] vm delete [NAMES] [--cloud=CLOUD] [--parallel] [--processors=PROCESSORS] [--dryrun] vm refresh [--cloud=CLOUDS] vm list [NAMES] [--cloud=CLOUDS] [--output=OUTPUT] [--refresh] vm boot [--name=VMNAMES] [--cloud=CLOUD] [--username=USERNAME] [--image=IMAGE] [--flavor=FLAVOR] [--public] [--secgroup=SECGROUPs] [--key=KEY] [--dryrun] vm boot [--n=COUNT] [--cloud=CLOUD] [--username=USERNAME] [--image=IMAGE] [--flavor=FLAVOR] [--public] [--secgroup=SECGROUPS] [--key=KEY] [--dryrun] vm run [--name=VMNAMES] [--username=USERNAME] [--dryrun] COMMAND vm script [--name=NAMES] [--username=USERNAME] [--dryrun] SCRIPT vm ip assign [NAMES] [--cloud=CLOUD] vm ip show [NAMES] [--group=GROUP] [--cloud=CLOUD], [--output=OUTPUT] [--refresh] vm ip inventory [NAMES] vm ssh [NAMES] [--username=USER] [--quiet] [--ip=IP] [--key=KEY] [--command=COMMAND] [--modify-knownhosts] vm rename [OLDNAMES] [NEWNAMES] [--force] [--dryrun] vm wait [--cloud=CLOUD] [--interval=SECONDS] vm info [--cloud=CLOUD] [--output=OUTPUT] vm username USERNAME [NAMES] [--cloud=CLOUD] vm resize [NAMES] [--size=SIZE] vm debug [NAMES] Arguments: OUTPUT the output format COMMAND positional arguments, the commands you want to execute on the server(e.g. ls -a) separated by ';', you will get a return of executing result instead of login to the server, note that type in -- is suggested before you input the commands NAME server name. By default it is set to the name of last vm from database. NAMES server name. By default it is set to the name of last vm from database. KEYPAIR_NAME Name of the vm keypair to be used to create VM. Note this is not a path to key. NEWNAMES New names of the VM while renaming. OLDNAMES Old names of the VM while renaming. Options: --output=OUTPUT the output format [default: table] -H --modify-knownhosts Do not modify ~/.ssh/known_hosts file when ssh'ing into a machine --username=USERNAME the username to login into the vm. If not specified it will be guessed from the image name and the cloud --ip=IP give the public ip of the server --cloud=CLOUD give a cloud to work on, if not given, selected or default cloud will be used --count=COUNT give the number of servers to start --detail for table, a brief version is used as default, use this flag to print detailed table --flavor=FLAVOR give the name or id of the flavor --group=GROUP give the group name of server --secgroup=SECGROUP security group name for the server --image=IMAGE give the name or id of the image --key=KEY specify a key to use, input a string which is the full path to the private key file --keypair_name=KEYPAIR_NAME Name of the vm keypair to be used to create VM. Note this is not a path to key. --user=USER give the user name of the server that you want to use to login --name=NAME give the name of the virtual machine --force rename/ delete vms without user's confirmation --command=COMMAND specify the commands to be executed --parallel execute commands in parallel Description: commands used to boot, start or delete servers of a cloud vm default [options...] Displays default parameters that are set for vm boot either on the default cloud or the specified cloud. vm boot [options...] Boots servers on a cloud, user may specify flavor, image .etc, otherwise default values will be used, see how to set default values of a cloud: cloud help vm start [options...] Starts a suspended or stopped vm instance. vm stop [options...] Stops a vm instance . vm delete [options...] Delete servers of a cloud, user may delete a server by its name or id, delete servers of a group or servers of a cloud, give prefix and/or range to find servers by their names. Or user may specify more options to narrow the search vm floating_ip_assign [options...] assign a public ip to a VM of a cloud vm ip show [options...] show the ips of VMs vm ssh [options...] login to a server or execute commands on it vm list [options...] same as command "list vm", please refer to it vm status [options...] Retrieves status of last VM booted on cloud and displays it. vm refresh [--cloud=CLOUDS] this command refreshes the data for virtual machines, images and flavors for the specified clouds. vm ping [NAMES] [--cloud=CLOUDS] [--count=N] [--processors=PROCESSORS] pings the specified virtual machines, while using at most N pings. The ping is executed in parallel. If names are specifies the ping is restricted to the given names in parameter format. If clouds are specified, names that are not in these clouds are ignored. If the name is set in the variables this name is used. Tip: give the VM name, but in a hostlist style, which is very convenient when you need a range of VMs e.g. sample[1-3] => ['sample1', 'sample2', 'sample3'] sample[1-3,18] => ['sample1', 'sample2', 'sample3', 'sample18'] Quoting commands: cm vm login gvonlasz-004 --command=\"uname -a\" Limitations: """ map_parameters(arguments, 'active', 'cloud', 'command', 'dryrun', 'flavor', 'force', 'output', 'group', 'image', 'interval', 'ip', 'key', 'modify-knownhosts', 'n', 'name', 'public', 'quiet', 'secgroup', 'size', 'username') # VERBOSE.print(arguments, verbose=9) variables = Variables() # pprint(arguments) # pprint(variables) provider = Provider() database = CmDatabase() # ok, but not tested if arguments.refresh: """vm refresh [--cloud=CLOUDS]""" provider.list() provider.flavors() provider.images() # ok elif arguments.ping: """vm ping [NAMES] [--cloud=CLOUDS] [--count=N] [--processors=PROCESSORS]""" # cms aws ping t --cloud=aws --count=3 --processors=3 if arguments.NAMES: variables['vm'] = arguments.NAMES clouds, names = Arguments.get_cloud_and_names( "ping", arguments, variables) params = {} count = arguments['--count'] if count: params['count'] = int(count) processors = arguments['--processors'] if processors: params['processors'] = int(processors[0]) # gets public ips from database public_ips = [] cursor = database.db['aws-node'] for name in names: for node in cursor.find({'name': name}): public_ips.append(node['public_ips']) public_ips = [y for x in public_ips for y in x] # print(public_ips) Shell3.pings(ips=public_ips, **params) # ok elif arguments.check: """vm check [NAMES] [--cloud=CLOUDS] [--username=USERNAME] [--processors=PROCESSORS]""" # cms aws check t --cloud=aws --username=ubuntu --processors=3 if arguments.NAMES: variables['vm'] = arguments.NAMES clouds, names = Arguments.get_cloud_and_names( "ping", arguments, variables) params = {} params['key'] = provider.p.spec["credentials"][ 'EC2_PRIVATE_KEY_FILE_PATH'] + provider.p.spec["credentials"][ 'EC2_PRIVATE_KEY_FILE_NAME'] params['username'] = arguments['--username'] # or get from db processors = arguments['--processors'] if processors: params['processors'] = int(processors[0]) # gets public ips from database public_ips = [] cursor = database.db['aws-node'] for name in names: for node in cursor.find({'name': name}): public_ips.append(node['public_ips']) public_ips = [y for x in public_ips for y in x] Shell3.checks(hosts=public_ips, **params) # ok elif arguments.status: """vm status [NAMES] [--cloud=CLOUDS]""" # cms aws status t --cloud=aws if arguments.NAMES: variables['vm'] = arguments.NAMES clouds, names = Arguments.get_cloud_and_names( "status", arguments, variables) # gets status from database status = {} cursor = database.db['aws-node'] for name in names: for node in cursor.find({'name': name}): status[name] = node['state'] pprint(status) #ok elif arguments.start: """vm start [NAMES] [--cloud=CLOUD] [--parallel] [--processors=PROCESSORS] [--dryrun]""" # cms aws start t --parallel --processors=3 if arguments.NAMES: variables['vm'] = arguments.NAMES clouds, names = Arguments.get_cloud_and_names( "start", arguments, variables) params = {} processors = arguments['--processors'] if arguments['--parallel']: params['option'] = 'pool' if processors: params['processors'] = int(processors[0]) else: params['option'] = 'iter' if arguments['--dryrun']: print("start nodes {}\noption - {}\nprocessors - {}".format( names, params['option'], processors)) else: pprint(provider.start(names, **params)) #ok elif arguments.stop: """vm stop [NAMES] [--cloud=CLOUD] [--parallel] [--processors=PROCESSORS] [--dryrun]""" # cms aws stop t --parallel --processors=2 if arguments.NAMES: variables['vm'] = arguments.NAMES clouds, names = Arguments.get_cloud_and_names( "stop", arguments, variables) params = {} processors = arguments['--processors'] if arguments['--parallel']: params['option'] = 'pool' if processors: params['processors'] = int(processors[0]) else: params['option'] = 'iter' if arguments['--dryrun']: print("stop nodes {}\noption - {}\nprocessors - {}".format( names, params['option'], processors)) else: vms = provider.stop(names, **params) order = provider.p.output['vm']['order'] header = provider.p.output['vm']['header'] print( Printer.flatwrite(vms, order=order, header=header, output='table')) #ok elif arguments.terminate: """vm terminate [NAMES] [--cloud=CLOUD] [--parallel] [--processors=PROCESSORS] [--dryrun]""" # cms aws terminate t --parallel --processors=2 if arguments.NAMES: variables['vm'] = arguments.NAMES clouds, names = Arguments.get_cloud_and_names( "terminate", arguments, variables) params = {} processors = arguments['--processors'] if arguments['--parallel']: params['option'] = 'pool' if processors: params['processors'] = int(processors[0]) else: params['option'] = 'iter' if arguments['--dryrun']: print( "terminate nodes {}\noption - {}\nprocessors - {}".format( names, params['option'], processors)) else: pprint(provider.destroy(names, **params)) #ok elif arguments.delete: """vm delete [NAMES] [--cloud=CLOUD] [--parallel] [--processors=PROCESSORS] [--dryrun]""" if arguments.NAMES: variables['vm'] = arguments.NAMES clouds, names = Arguments.get_cloud_and_names( "terminate", arguments, variables) params = {} processors = arguments['--processors'] if arguments['--parallel']: params['option'] = 'pool' if processors: params['processors'] = int(processors[0]) else: params['option'] = 'iter' if arguments['--dryrun']: print("delete nodes {}\noption - {}\nprocessors - {}".format( names, params['option'], processors)) else: pprint(provider.destroy(names, **params)) # TODO: username, secgroup elif arguments.boot: """ vm boot [--name=VMNAMES] [--cloud=CLOUD] [--username=USERNAME] [--image=IMAGE] [--flavor=FLAVOR] [--public] [--secgroup=SECGROUPs] [--key=KEY] [--dryrun] vm boot [--n=COUNT] [--cloud=CLOUD] [--username=USERNAME] [--image=IMAGE] [--flavor=FLAVOR] [--public] [--secgroup=SECGROUPS] [--key=KEY] [--dryrun] """ if arguments['--name']: # cms aws boot --name=t --cloud=aws --username=root --image=ami-08692d171e3cf02d6 --flavor=t2.micro --public --secgroup=group1 --key=aws_cert # cms aws boot --name=t --image=ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-20190212 --flavor=t2.micro --key=aws_cert names = Parameter.expand(arguments['--name']) elif arguments['n']: # cms aws boot --n=2 --cloud=aws --username=root --image=ami-08692d171e3cf02d6 --flavor=t2.micro --public --secgroup=group1 --key=aws_cert n = int(arguments['n']) names = [] for i in range(n): # generate random names m = hashlib.blake2b(digest_size=8) m.update(str(datetime.utcnow()).encode('utf-8')) names.append(m.hexdigest()) else: print("please provide name or count to boot vm") # username = arguments['--username'] image = arguments['--image'] flavor = arguments['--flavor'] params = {} public = arguments['--public'] if public: params['ex_assign_public_ip'] = public secgroup = Parameter.expand(arguments['--secgroup']) if secgroup: params['ex_security_groups'] = secgroup key = arguments['--key'] if key: params['ex_keyname'] = key if arguments['--dryrun']: print("""create nodes {} image - {} flavor - {} assign public ip - {} security groups - {} keypair name - {}""".format(names, image, flavor, public, secgroup, key)) else: order = provider.p.output['vm']['order'] header = provider.p.output['vm']['header'] vm = provider.create(names=names, image=image, size=flavor, **params) print( Printer.write(vm, order=order, header=header, output='table')) #ok elif arguments.list: """ vm list [NAMES] [--cloud=CLOUDS] [--output=OUTPUT] [--refresh] """ if arguments.NAMES: variables['vm'] = arguments.NAMES clouds, names = Arguments.get_cloud_and_names( "list", arguments, variables) params = {} params['order'] = provider.p.output['vm']['order'] params['header'] = provider.p.output['vm']['header'] params['output'] = 'table' if arguments['--refresh']: provider.list() if arguments.NAMES: vms = [] for name in names: vms += database.find(collection='aws-node', name=name) else: vms = database.find(collection='aws-node') print(Printer.flatwrite(vms, **params)) # TODO elif arguments.info: """ vm info [--cloud=CLOUD] [--output=OUTPUT] """ print("functionality not implemented") # TODO elif arguments.rename: """vm rename [OLDNAMES] [NEWNAMES] [--force] [--dryrun]""" print("functionality not implemented") # TODO elif arguments.ip and arguments.show: """vm ip show [NAMES] [--group=GROUP] [--cloud=CLOUD] [--output=OUTPUT] [--refresh] """ clouds, names = Arguments.get_cloud_and_names( "ip", arguments, variables) pprint(get_publicIPs(names)) # TODO elif arguments.ip and arguments.assign: """ vm ip assign [NAMES] [--cloud=CLOUD] """ clouds, names = Arguments.get_cloud_and_names( "ip", arguments, variables) pprint(provider.assign_public_ip(names)) # TODO elif arguments.ip and arguments.inventory: """vm ip inventory [NAMES]""" print("list ips that could be assigned") # TODO elif arguments.default: """vm default [options...]""" print("functionality not implemented") # ok elif arguments.run: """vm run [--name=VMNAMES] [--username=USERNAME] [--dryrun] [COMMAND ...]""" # cms aws run --name=t --username=ubuntu uname clouds, names = Arguments.get_cloud_and_names( "run", arguments, variables) username = arguments['--username'] command = arguments.COMMAND name_ips = {} cursor = database.db['aws-node'] for name in names: for node in cursor.find({'name': name}): name_ips[name] = node['public_ips'] if arguments['--dryrun']: print("run command {} on vms: {}".format(command, names)) else: provider.ssh(name_ips, username=username, command=command) # BUG in call command elif arguments.script: """vm script [--name=NAMES] [--username=USERNAME] [--dryrun] SCRIPT""" # cms aws script --name=t --username=ubuntu tests/test_aws.sh clouds, names = Arguments.get_cloud_and_names( "run", arguments, variables) username = arguments['--username'] script = arguments.SCRIPT name_ips = {} cursor = database.db['aws-node'] for name in names: for node in cursor.find({'name': name}): name_ips[name] = node['public_ips'] if arguments['--dryrun']: print("run script {} on vms: {}".format(script, names)) else: provider.ssh(name_ips, username=username, script=script) # TODO elif arguments.resize: """vm resize [NAMES] [--size=SIZE]""" pass # TODO # shh run command in implemented as aws run # not sure what to do with this command # since ssh into multiple vms at the same time doesn't make a lot of sense elif arguments.ssh: """ vm ssh [NAMES] [--username=USER] [--quiet] [--ip=IP] [--key=KEY] [--command=COMMAND] [--modify-knownhosts] """ if arguments.NAMES: variables['vm'] = arguments.NAMES clouds, names = Arguments.get_cloud_and_names( "list", arguments, variables) ips = {} cursor = database.db['aws-node'] for name in names: for node in cursor.find({'name': name}): pprint(node) username = arguments['--username'] ip = arguments['--ip'] params = {} quiet = arguments['--quiet'] if quiet: params['quiet'] = quiet command = arguments['--command'] if command: params['command'] = command modify_host = arguments['--modify-knownhosts'] if modify_host: params['modify_host'] = modify_host provider.ssh(username=username, ip=ip, **params) # TODO elif arguments.wait: """vm wait [--cloud=CLOUD] [--interval=SECONDS]""" print("waits for the vm till its ready and one can login") # TODO elif arguments.username: """vm username USERNAME [NAMES] [--cloud=CLOUD]""" print("sets the username for the vm") elif arguments.debug: pprint(provider.p.driver) # print(provider.p.cloudman.ex_list_floating_ips()) # print(provider.loop(names, abs, option='iter',processors=3)) return
def do_objstorage(self, args, arguments): """ :: Usage: objstorage [--service=SERVICE] create dir DIRECTORY objstorage [--service=SERVICE] copy SOURCE DESTINATION [--recursive] objstorage [--service=SERVICE] get SOURCE DESTINATION [--recursive] objstorage [--service=SERVICE] put SOURCE DESTINATION [--recursive] objstorage [--service=SERVICE] list SOURCE [--recursive] [--output=OUTPUT] objstorage [--service=SERVICE] delete SOURCE objstorage [--service=SERVICE] search DIRECTORY FILENAME [--recursive] [--output=OUTPUT] This command does some useful things. Arguments: SOURCE BUCKET | OBJECT can be a source bucket or object name or file DESTINATION BUCKET | OBJECT can be a destination bucket or object name or file DIRECTORY DIRECTORY refers to a folder or bucket on the cloud service for ex: awss3 Options: -h, --help --service=SERVICE specify the cloud service name like aws-s3 Description: commands used to upload, download, list files on different cloud objstorage services. objstorage put [options..] Uploads the file specified in the filename to specified cloud from the SOURCEDIR. objstorage get [options..] Downloads the file specified in the filename from the specified cloud to the DESTDIR. objstorage delete [options..] Deletes the file specified in the filename from the specified cloud. objstorage list [options..] lists all the files from the container name specified on the specified cloud. objstorage create dir [options..] creates a folder with the directory name specified on the specified cloud. objstorage search [options..] searches for the source in all the folders on the specified cloud. Example: set objstorage=s3object objstorage put SOURCE DESTINATION --recursive is the same as objstorage --service=s3object put SOURCE DESTINATION --recursive Create a multi file directy in a bucket $ cms set objstorge=awss3 $ tree a/a1.tx a/b/b1.txt cms objstorage create a/b/ cms objstorage put a/b/b1.txt /a/b """ # arguments.CONTAINER = arguments["--container"] map_parameters(arguments, "recursive", "objstorage") VERBOSE.print(arguments, verbose=9) if arguments.service is None: try: v = Variables() arguments.service = v['objstorage'] except Exception as e: arguments.service = None raise ValueError("objstorage provider is not defined") arguments.service = Parameter.expand(arguments.service) print(arguments) provider = Provider(arguments.service) if arguments.copy: result = provider.copy(arguments.SOURCE, arguments.DESTINATION, arguments.recursive) if arguments.get: result = provider.get(arguments.SOURCE, arguments.DESTINATION, arguments.recursive) elif arguments.put: result = provider.put(arguments.SOURCE, arguments.DESTINATION, arguments.recursive) elif arguments.create and arguments.dir: result = provider.createdir(arguments.DIRECTORY) elif arguments.list: for objstorage in arguments.service: provider = Provider(objstorage) result = provider.list(arguments.SOURCE, arguments.recursive) elif arguments.delete: for objstorage in arguments.service: provider = Provider(objstorage) provider.delete(arguments.SOURCE) elif arguments.search: for objstorage in arguments.service: provider = Provider(objstorage) provider.search(arguments.DIRECTORY, arguments.FILENAME, arguments.recursive) return ""
def do_vm(self, args, arguments): """ :: Usage: vm ping [NAMES] [--cloud=CLOUDS] [--count=N] [--processors=PROCESSORS] vm check [NAMES] [--cloud=CLOUDS] [--processors=PROCESSORS] vm status [NAMES] [--cloud=CLOUDS] vm console [NAME] [--force] vm start [NAMES] [--cloud=CLOUD] [--dryrun] vm stop [NAMES] [--cloud=CLOUD] [--dryrun] vm terminate [NAMES] [--cloud=CLOUD] [--dryrun] vm delete [NAMES] [--cloud=CLOUD] [--dryrun] vm refresh [--cloud=CLOUDS] vm list [NAMES] [--cloud=CLOUDS] [--output=OUTPUT] [--refresh] vm boot [--name=VMNAMES] [--cloud=CLOUD] [--username=USERNAME] [--image=IMAGE] [--flavor=FLAVOR] [--public] [--secgroup=SECGROUPs] [--key=KEY] [--dryrun] vm boot [--n=COUNT] [--cloud=CLOUD] [--username=USERNAME] [--image=IMAGE] [--flavor=FLAVOR] [--public] [--secgroup=SECGROUPS] [--key=KEY] [--dryrun] vm run [--name=VMNAMES] [--username=USERNAME] [--dryrun] COMMAND vm script [--name=NAMES] [--username=USERNAME] [--dryrun] SCRIPT vm ip assign [NAMES] [--cloud=CLOUD] vm ip show [NAMES] [--group=GROUP] [--cloud=CLOUD] [--output=OUTPUT] [--refresh] vm ip inventory [NAMES] vm ssh [NAMES] [--username=USER] [--quiet] [--ip=IP] [--key=KEY] [--command=COMMAND] [--modify-knownhosts] vm rename [OLDNAMES] [NEWNAMES] [--force] [--dryrun] vm wait [--cloud=CLOUD] [--interval=SECONDS] vm info [--cloud=CLOUD] [--output=OUTPUT] vm username USERNAME [NAMES] [--cloud=CLOUD] vm resize [NAMES] [--size=SIZE] Arguments: OUTPUT the output format COMMAND positional arguments, the commands you want to execute on the server(e.g. ls -a) separated by ';', you will get a return of executing result instead of login to the server, note that type in -- is suggested before you input the commands NAME server name. By default it is set to the name of last vm from database. NAMES server name. By default it is set to the name of last vm from database. KEYPAIR_NAME Name of the vm keypair to be used to create VM. Note this is not a path to key. NEWNAMES New names of the VM while renaming. OLDNAMES Old names of the VM while renaming. Options: --output=OUTPUT the output format [default: table] -H --modify-knownhosts Do not modify ~/.ssh/known_hosts file when ssh'ing into a machine --username=USERNAME the username to login into the vm. If not specified it will be guessed from the image name and the cloud --ip=IP give the public ip of the server --cloud=CLOUD give a cloud to work on, if not given, selected or default cloud will be used --count=COUNT give the number of servers to start --detail for table, a brief version is used as default, use this flag to print detailed table --flavor=FLAVOR give the name or id of the flavor --group=GROUP give the group name of server --secgroup=SECGROUP security group name for the server --image=IMAGE give the name or id of the image --key=KEY specify a key to use, input a string which is the full path to the private key file --keypair_name=KEYPAIR_NAME Name of the vm keypair to be used to create VM. Note this is not a path to key. --user=USER give the user name of the server that you want to use to login --name=NAME give the name of the virtual machine --force rename/ delete vms without user's confirmation --command=COMMAND specify the commands to be executed Description: commands used to boot, start or delete servers of a cloud vm default [options...] Displays default parameters that are set for vm boot either on the default cloud or the specified cloud. vm boot [options...] Boots servers on a cloud, user may specify flavor, image .etc, otherwise default values will be used, see how to set default values of a cloud: cloud help vm start [options...] Starts a suspended or stopped vm instance. vm stop [options...] Stops a vm instance . vm delete [options...] Delete servers of a cloud, user may delete a server by its name or id, delete servers of a group or servers of a cloud, give prefix and/or range to find servers by their names. Or user may specify more options to narrow the search vm floating_ip_assign [options...] assign a public ip to a VM of a cloud vm ip show [options...] show the ips of VMs vm ssh [options...] login to a server or execute commands on it vm list [options...] same as command "list vm", please refer to it vm status [options...] Retrieves status of last VM booted on cloud and displays it. vm refresh [--cloud=CLOUDS] this command refreshes the data for virtual machines, images and flavors for the specified clouds. vm ping [NAMES] [--cloud=CLOUDS] [--count=N] [--processors=PROCESSORS] pings the specified virtual machines, while using at most N pings. The ping is executed in parallel. If names are specifies the ping is restricted to the given names in parameter format. If clouds are specified, names that are not in these clouds are ignored. If the name is set in the variables this name is used. Tip: give the VM name, but in a hostlist style, which is very convenient when you need a range of VMs e.g. sample[1-3] => ['sample1', 'sample2', 'sample3'] sample[1-3,18] => ['sample1', 'sample2', 'sample3', 'sample18'] Quoting commands: cm vm login gvonlasz-004 --command=\"uname -a\" Limitations: Azure: rename is not supported """ map_parameters(arguments, 'active', 'cloud', 'command', 'dryrun', 'flavor', 'force', 'output', 'group', 'image', 'interval', 'ip', 'key', 'modify-knownhosts', 'n', 'name', 'public', 'quiet', 'secgroup', 'size', 'username') VERBOSE(arguments) variables = Variables() if arguments.refresh: names = [] clouds, names = Arguments.get_cloud_and_names( "refresh", arguments, variables) return "" elif arguments.ping: # TODO: IMPLEMENT names = [] pings = int(arguments.N or 3) names = [] clouds, names = Arguments.get_cloud_and_names( "ping", arguments, variables) for name in names: ping = Shell.live("ping -c {N} {name}".format(name=name, N=arguments.N)) print(ping) else: return True return "" elif arguments.check: names = [] clouds, names = Arguments.get_cloud_and_names( "check", arguments, variables) return "" elif arguments.status: names = [] clouds, names = Arguments.get_cloud_and_names( "status", arguments, variables) return "" elif arguments.start: names = [] clouds, names = Arguments.get_cloud_and_names( "start", arguments, variables) return "" elif arguments.stop: names = [] clouds, names = Arguments.get_cloud_and_names( "stop", arguments, variables) return "" elif arguments.terminate: names = [] clouds, names = Arguments.get_cloud_and_names( "terminate", arguments, variables) return "" elif arguments.delete: clouds, names = Arguments.get_cloud_and_names( "delete", arguments, variables) return "" elif arguments.boot: print("boot the vm") elif arguments.list: # vm list [NAMES] # [--cloud=CLOUDS] # [--output=OUPTUT] # [--refresh] # if no clouds find the clouds of all specified vms by name # find all vms of the clouds, # print only those vms specified by name, if no name is given print all for the cloud # print("list the vms") clouds, names = Arguments.get_cloud_and_names( "list", arguments, variables) # print("Clouds:", clouds) if arguments.NAMES is not None: names = Parameter.expand(arguments.NAMES) Console.error("NAMES, not yet implemented" + str(names)) try: if arguments["--refresh"]: pass # find all clouds in db # iterate over the clouds # for each name in name queue, find it and add it to the cloud vm list # for each cloud print the vms else: pass # find all clouds in db # iterate over all clouds # find the vm with the name # add it to the cloud list # for each cloud print the vms except Exception as e: VERBOSE(e) return "" else: try: if arguments["--refresh"]: for cloud in clouds: Console.ok("refresh " + cloud) p = Provider(cloud) vms = p.list() order = p.p.output['vm']['order'] # not pretty header = p.p.output['vm']['header'] # not pretty print( Printer.flatwrite(vms, sort_keys=["cm.name"], order=order, header=header, output=arguments.output)) else: for cloud in clouds: p = Provider(cloud) kind = p.kind # pprint(p.__dict__) # pprint(p.p.__dict__) # not pretty collection = "{cloud}-node".format(cloud=cloud, kind=p.kind) db = CmDatabase() vms = db.find(collection=collection) # pprint(vms) # print(arguments.output) # print(p.p.output['vm']) order = p.p.output['vm']['order'] # not pretty header = p.p.output['vm']['header'] # not pretty print( Printer.flatwrite(vms, sort_keys=["cm.name"], order=order, header=header, output=arguments.output)) except Exception as e: VERBOSE(e) return "" elif arguments.info: """ vm info [--cloud=CLOUD] [--output=OUTPUT] """ print("info for the vm") cloud, names = Arguments.get_cloud_and_names( "info", arguments, variables) elif arguments.rename: print("rename the vm") v = Variables() cloud = v["cloud"] p = Provider(cloud) try: oldnames = Parameter.expand(arguments["OLDNAMES"]) newnames = Parameter.expand(arguments["NEWNAMES"]) force = arguments["--force"] if oldnames is None or newnames is None: Console.error("Wrong VMs specified for rename", traceflag=False) elif len(oldnames) != len(newnames): Console.error("The number of VMs to be renamed is wrong", traceflag=False) else: print(oldnames) print(newnames) for i in range(0, len(oldnames)): oldname = oldnames[i] newname = newnames[i] if arguments["--dryrun"]: Console.ok("Rename {} to {}".format( oldname, newname)) else: print(f"rename {oldname} -> {newname}") p.rename(source=oldname, destination=newname) msg = "info. OK." Console.ok(msg) except Exception as e: Error.traceback(e) Console.error("Problem renameing instances", traceflag=True) elif arguments["ip"] and arguments["show"]: print("show the ips") """ vm ip show [NAMES] [--group=GROUP] [--cloud=CLOUD] [--output=OUTPUT] [--refresh] """ elif arguments["ip"] and arguments["assign"]: """ vm ip assign [NAMES] [--cloud=CLOUD] """ print("assign the public ip") elif arguments["ip"] and arguments["inventory"]: """ vm ip inventory [NAMES] """ print("list ips that could be assigned") elif arguments.username: """ vm username USERNAME [NAMES] [--cloud=CLOUD] """ print("sets the username for the vm") elif arguments.default: print("sets defaults for the vm") elif arguments.run: """ vm run [--name=NAMES] [--username=USERNAME] [--dryrun] COMMAND """ pass elif arguments.script: """ vm script [--name=NAMES] [--username=USERNAME] [--dryrun] SCRIPT """ pass elif arguments.resize: """ vm resize [NAMES] [--size=SIZE] """ pass elif arguments.ssh: """ vm ssh [NAMES] [--username=USER] [--quiet] [--ip=IP] [--key=KEY] [--command=COMMAND] [--modify-knownhosts] """ print("ssh the vm") elif arguments.console: # vm console [NAME] [--force] names = Arguments.get_names(arguments, variables) for name in names: # r = vm.console(name,force=argument.force) Console.msg("{label} {name}".format(label="console", name=name)) return elif arguments.wait: """ vm wait [--cloud=CLOUD] [--interval=SECONDS] """ print("waits for the vm till its ready and one can login")
def do_queue(self, args, arguments): """ :: Usage: queue create --name=NAME --policy=POLICY --cloud=CLOUD [--charge=CHARGE] [--unit=UNIT] queue activate --cloud=CLOUD --queue=NAME queue deactivate --cloud=CLOUD --queue=NAME queue set --cloud=CLOUD --queue=QUEUE --param=PARAM --val=VALUE queue list clouds queue list queues --cloud=CLOUD queue list jobs --queue=QUEUE queue remove --name=NAME Arguments: FILE a file name INPUT_TYPE tbd Options: --depth=DEPTH [default: 1] --format=FORMAT [default: table] Description: This command creates a queue that is associated with a cloud. Each queue is associated with a cluster and can have several jobs in it. It is possible to get the list of the jobs in a queue either based on the queue name or based on the cluster name with which the queue is interacting. Examples: """ queue = Queue() # debug=arguments["--debug"]) VERBOSE(arguments) implemented_policies = ['FIFO', 'FILO'] variables = Variables() # docopt for some reason does not show all of the arguments in dot # format that's the reason I used -- format. if arguments.create and \ arguments['--name'] and \ arguments['--cloud'] and \ arguments['--policy']: queue_name = arguments['--name'] cloud_name = arguments['--cloud'] policy = arguments['--policy'] if policy.upper() not in ['FIFO', 'FILO']: Console.error("Policy {policy} not defined, currently " "implemented policies are {policies} ".format( policy=policy.upper(), policies=implemented_policies)) return charge = arguments['--charge'] unit = arguments['--unit'] queue.create(queue_name, cloud_name, policy, charge, unit) elif arguments.activate and \ arguments['--cloud'] and \ arguments['--queue']: queue_found = queue.findQueue(arguments['--cloud'], arguments[ '--queue']) if queue_found: queue.activate() elif arguments.deactivate and \ arguments['--cloud'] and \ arguments['--queue']: queue_found = queue.findQueue(arguments['--cloud'], arguments[ '--queue']) if queue_found: queue.deactivate() elif arguments.list and \ arguments.clouds: queue.findClouds() elif arguments.list and \ arguments.queues: cloud = arguments['--cloud'] queue.findQueues(cloud) elif arguments.list and arguments.jobs: queue.findQueues() elif arguments.set and arguments['--cloud'] and arguments['--queue'] \ and arguments['--param'] and arguments['--val']: param = arguments['--param'] val = arguments['--val'] queue_found = queue.findQueue(arguments['--cloud'], arguments[ '--queue']) if queue_found: queue.setParam(param,val) elif arguments.remove and arguments['--name']: name = arguments['--name'] queue_found = queue.findQueue(name) if queue_found: queue.removeQueue()
def do_image(self, args, arguments): """ :: Usage: image list [NAMES] [--cloud=CLOUD] [--refresh] [--output=OUTPUT] Options: --output=OUTPUT the output format [default: table] --cloud=CLOUD the cloud name --refresh live data taken from the cloud Description: cm image list cm image list --output=csv cm image list 58c9552c-8d93-42c0-9dea-5f48d90a3188 --refresh """ map_parameters(arguments, "refresh", "cloud", "output") VERBOSE(arguments) variables = Variables() if arguments.list and arguments.refresh: names = [] clouds, names = Arguments.get_cloud_and_names( "list", arguments, variables) print("AAA", clouds, names) for cloud in clouds: print(f"cloud {cloud}") provider = Provider(name=cloud) images = provider.images() order = provider.p.output['vm']['order'] # not pretty header = provider.p.output['vm']['header'] # not pretty print( Printer.flatwrite(images, sort_keys=["name"], order=order, header=header, output=arguments.output)) return "" elif arguments.list: names = [] clouds, names = Arguments.get_cloud_and_names( "list", arguments, variables) print(clouds, names) try: for cloud in clouds: print(f"List {cloud}") p = Provider(cloud) kind = p.kind collection = "{cloud}-image".format(cloud=cloud, kind=p.kind) db = CmDatabase() vms = db.find(collection=collection) order = p.p.output['vm']['order'] # not pretty header = p.p.output['vm']['header'] # not pretty print( Printer.flatwrite(vms, sort_keys=["name"], order=order, header=header, output=arguments.output)) except Exception as e: VERBOSE(e) return ""
def onecmd(self, line): """Interpret the argument as though it had been typed in response to the prompt. This may be overridden, but should not normally need to be; see the precmd() and postcmd() methods for useful execution hooks. The return value is a flag indicating whether interpretation of commands by the interpreter should stop. """ oldline, line = self.replace_vars(line) # ----------------------------- # print comment lines, but do not execute them # ----------------------------- if line.startswith('#') \ or line.startswith('//') \ or line.startswith('/*'): print(line) return "" if line.startswith('!'): os.system(line[1:]) return "" # if line is None: # return "" # if line.startswith("!"): # line.replace("!", "! ") # line = self.var_replacer(line) # if line != "hist" and line: # self._hist += [line.strip()] # if line.startswith("!") or line.startswith("shell"): # self.do_shell_exec(line[1:]) # return "" cmd, arg, line = self.parseline(line) if line.startswith("$") or line.startswith('var.'): line = line.replace("$", "", 1) line = line.replace("var.", "", 1) print("FIND>", line, "<", sep='') variable = Variables() print(variable[line]) variable.close() return "" # ----------------------------- # handle empty line # ----------------------------- if not line: return self.emptyline() # ----------------------------- # handle file execution # ----------------------------- # # this does not yet work # # if os.path.isfile(line): # print ("... execute", line) # self.do_exec(line) # return "" if cmd != '': try: func = getattr(self, 'do_' + cmd) return func(arg) except AttributeError as e: variables = Variables() trace = "T" in variables['trace'] debug = "T" in variables['debug'] command_missing = "'CMShell' object has no attribute 'do_{cmd}'".format( cmd=cmd) if e.args[0] == command_missing: Console.error( "this command does not exist: '{cmd}'".format(cmd=cmd), traceflag=False) else: Error.traceback(error=e, debug=debug, trace=trace) # noinspection PyUnusedLocal cmd = None line = oldline return ""
def main(): arguments = dotdict(docopt(__doc__)) verbose = arguments["-v"] v = Variables() if verbose: v["verbose"] = "10" else: v["verbose"] = "0" # arguments["FORMAT"] = arguments["--format"] location = path_expand(arguments.DIRECTORY) VERBOSE(arguments) if arguments.CLASS in ["5", "516", "e516"]: repos = repos_516 elif arguments.CLASS in ["2", "222", "e222"]: repos = repos_222 else: print("your class is not yet supported") sys.exit(10) if arguments.contrib: owners = class_list(repos, location) create_contributors(owners, location) elif arguments.sections: print("# Section List") print() artifacts = artifact_list(repos, "section", location) t = [] for entry in artifacts: if "url" in entry: entry["link"] = entry["url"] if ".md" not in entry["url"]: entry["link"] = entry["url"] = None else: entry["link"] = entry["url"] = None if "title" not in entry: entry["title"] = "TBD" title = entry["title"] url = entry["url"] if url is not None: try: r = requests.get(url, allow_redirects=True) if r.status_code == 200: url = f"[url]({url})" else: url = ":o: invalid " except: url = ":o: invalid " else: url = ":o: ERROR: not an md file" if "TBD" == title: title = ":o: ERROR: no title specified" link = entry["link"] if entry["lastname"] != "TBD": t.append([ "[{hid}](https://github.com/cloudmesh-community/{hid})". format(**entry), entry["lastname"], entry["firstname"], url, title ]) print( tabulate( t, headers=["Hid", "Lastname", "Firstname", "Url to md", "Title"], tablefmt=arguments["--format"])) elif arguments.chapters: print("# Chapter List") print() artifacts = artifact_list(repos, "chapter", location) t = [] for entry in artifacts: if "url" in entry: entry["link"] = entry["url"] if ".md" not in entry["url"]: entry["link"] = entry["url"] = None else: entry["link"] = entry["url"] = None if "title" not in entry: entry["title"] = "TBD" title = entry["title"] url = entry["url"] if url is not None: try: r = requests.get(url, allow_redirects=True) if r.status_code == 200: url = f"[url]({url})" else: url = ":o: invalid " except: url = ":o: invalid " else: url = ":o: ERROR: not an md file" if "TBD" == title: title = ":o: ERROR: no title specified" link = entry["link"] if entry["lastname"] != "TBD": t.append([ "[{hid}](https://github.com/cloudmesh-community/{hid})". format(**entry), entry["lastname"], entry["firstname"], url, title ]) print( tabulate( t, headers=["Hid", "Lastname", "Firstname", "Url to md", "Title"], tablefmt=arguments["--format"])) elif arguments.projects: print("# Project List") print() artifacts = artifact_list(repos, "project", location) t = [] for entry in artifacts: if "url" in entry: entry["link"] = entry["url"] if ".md" not in entry["url"]: entry["link"] = entry["url"] = None else: entry["link"] = entry["url"] = None if "title" not in entry: entry["title"] = "TBD" title = entry["title"] url = entry["url"] if url is not None: try: r = requests.get(url, allow_redirects=True) if r.status_code == 200: url = f"[url]({url})" else: url = ":o: invalid " except: url = ":o: invalid " else: url = ":o: ERROR: not an md file" if "TBD" == title: title = ":o: ERROR: no title specified" link = entry["link"] if entry["lastname"] != "TBD": t.append([ "[{hid}](https://github.com/cloudmesh-community/{hid})". format(**entry), entry["lastname"], entry["firstname"], url, "", title ]) print( tabulate(t, headers=[ "Hid", "Lastname", "Firstname", "Url to md", "Pytests", "Title" ], tablefmt=arguments["--format"]))
def do_key(self, args, arguments): """ :: Usage: key -h | --help key list --cloud=CLOUDS [--output=OUTPUT] key list --source=ssh [--dir=DIR] [--output=OUTPUT] key list --source=git [--output=OUTPUT] [--username=USERNAME] key list [NAMES] [--output=OUTPUT] key load --filename=FILENAME [--output=OUTPUT] key add [NAME] [--source=FILENAME] key add [NAME] [--source=git] key add [NAME] [--source=ssh] key get NAME [--output=OUTPUT] key default --select key delete (NAMES | --select | --all) [--dryrun] key delete NAMES --cloud=CLOUDS [--dryrun] key upload [NAMES] [--cloud=CLOUDS] [--dryrun] key upload [NAMES] [VMS] [--dryrun] key group upload [--group=GROUPNAMES] [--cloud=CLOUDS] [--dryrun] key group add [--group=GROUPNAMES] [--cloud=CLOUDS] [--dryrun] key group add --file=FILENAME key group delete [--group=GROUPNAMES] [NAMES] [--dryrun] key group list [--group=GROUPNAMES] [--output=OUTPUT] key group export --group=GROUNAMES --filename=FILENAME Arguments: VMS Parameterized list of virtual machines CLOUDS The clouds NAME The name of the key. SOURCE db, ssh, all KEYNAME The name of a key. For key upload it defaults to the default key name. OUTPUT The format of the output (table, json, yaml) FILENAME The filename with full path in which the key is located Options: --dir=DIR the directory with keys [default: ~/.ssh] --output=OUTPUT the format of the output [default: table] --source=SOURCE the source for the keys [default: cm] --username=USERNAME the source for the keys [default: none] --name=KEYNAME The name of a key Description: Please note that some values are read from the cloudmesh4.yaml file. One such value is cloudmesh.profile.user Manages public keys is an essential component of accessing virtual machine sin the cloud. There are a number of sources where you can find public keys. This includes teh ~/.ssh directory and for example github. To list these keys the following list functions are provided. key list --source=git [--username=USERNAME] lists all keys in git for the specified user. If the name is not specified it is read from cloudmesh4.yaml key list --source=ssh [--dir=DIR] [--output=OUTPUT] lists all keys in the directory. If the directory is not specified the default will be ~/.ssh key list NAMES lists all keys in the named virtual machines. The keys will be uploaded into cloudmesh with the add command under the given name. If the name is not specified the name cloudmesh.profile.user is assumed. key add --ssh adds the default key in ~/.ssh/id_rsa.pub key add NAME --source=FILENAME adds the key specified by the filename with the given name key add NAME --git --username=username adds a named github key from a user with the given github username. Once the keys are uploaded to github, they can be listed key list [NAME] [--output=OUTPUT] list the keys loaded to cloudmesh in the given format: json, yaml, table. table is default. The NAME can be specified and if omitted the name cloudmesh.profile.user is assumed. key get NAME Retrieves the key indicated by the NAME parameter from cloudmesh and prints its details. key default --select Select the default key interactively key delete NAMES deletes the keys. This may also have an impact on groups key rename NAME NEW renames the key from NAME to NEW. Group management of keys is an important concept in cloudmesh, allowing multiple users to be added to virtual machines. The keys must be uploaded to cloudmesh with a name so they can be used in a group. The --dryrun option executes the command without uploading the information to the clouds. If no groupname is specified the groupname default is assumed. If no cloudnames are specified, all active clouds are assumed. active clouds can be set in the cloudmesh4.yaml file. key group delete [GROUPNAMES] [NAMES] [--dryrun] deletes the named keys from the named groups. key group list [GROUPNAMES] [--output=OUTPUT] list the key names and details in the group. key group upload [GROUPNAMES] [CLOUDS] [--dryrun] uploads the named groups to the specified clouds. In some cases you may want to store the public keys in files. For this reason we support the following commands. key group add --group=GROUPNAME --file=FILENAME the command adds the keys to the given group. The keys are written in the files in yaml format. key group export --group=GROUNAMES --filename=FILENAME the command exports the keys to the given group. The keys are written in the files in yaml format. The yaml format is as follows: cloudmesh: keys: NAMEOFKEY: name: NAMEOFKEY key: ssh-rsa AAAA..... comment group: - GROUPNAME ... If a key is included in multiple groups they will be added to the grouplist of the key """ map_parameters(arguments, 'cloud', 'output', 'source', 'dir') pprint(arguments) invalid_names = ['tbd', 'none', "", 'id_rsa'] m = Manager() if arguments.list and arguments.source == "git": # this is much simpler config = Config() username = config["cloudmesh.profile.github"] print("Username:"******"name"], order=["id", "name", "fingerprint", "source"], header=["Id", "Name", "Fingerprint", "Source"])) return "" elif arguments.list and arguments.source == "ssh": # this is much simpler sshkey = SSHkey() print( Printer.flatwrite( [sshkey], sort_keys=["name"], order=["name", "type", "fingerprint", "comment"], header=["Name", "Type", "Fingerprint", "Comment"])) return "" elif arguments.list and arguments.cloud: clouds = Parameter.expand(arguments.cloud) print(clouds) if len(clouds) == 0: variables = Variables() cloudname = variables['cloud'] clouds = [cloudname] cloudkey = [] for cloud in clouds: print(cloud) provider = Provider(clouds) cloudkey.append(provider.keys()) print( Printer.flatwrite( [cloudkey], sort_keys=["name"], order=["name", "type", "fingerprint", "comment"], header=["Name", "Type", "Fingerprint", "Comment"])) return "" elif arguments.list and arguments.source == "db": if arguments.NAMES: names = Parameter.expand(arguments.NAMES) print("find the keys of the following vms", names) print("the keys will be read from mongo") return "" return ""
def setup(self): variables = Variables() service = Parameter.expand(variables['storage'])[0] self.p = Provider(service=service)
def do_batch(self, args, arguments): """ :: Usage: batch job create --name=NAME --cluster=CLUSTER --script=SCRIPT --executable=EXECUTABLE --destination=DESTINATION --source=SOURCE [--companion-file=COMPANION_FILE] [--outfile-name=OUTPUT_FILE_NAME] [--suffix=SUFFIX] [--overwrite] batch job run [--name=NAMES] [--output=OUTPUT] batch job fetch [--name=NAMES] batch job remove [--name=NAMES] batch job clean [--name=NAMES] batch job set [--name=NAMES] PARAMETER=VALUE batch job list [--name=NAMES] [--depth=DEPTH] batch connection_test --job=JOB batch cluster list [--cluster=CLUSTERS] [--depth=DEPTH] batch cluster remove [--cluster=CLUSTERS] batch cluster set [--cluster=CLUSTERS] PARAMETER=VALUE Arguments: FILE a file name INPUT_TYPE tbd Options: -f specify the file --depth=DEPTH [default: 1] --output=OUTPUT [default: table] Description: This command allows to submit batch jobs to queuing systems hosted in an HBC center as a service directly form your commandline. We assume that a number of experiments are conducted with possibly running the script multiple times. Each experiment will save the batch script in its own folder. The output of the script can be saved in a destination folder. A virtual directory is used to coordinate all saved files. The files can be located due to the use of the virtual directory on multiple different data or file services Authentication to the Batch systems is done viw the underlaying HPC center authentication. We assume that the user has an account to submit on these systems. (SSH, 2 factor, XSEDE-account) TBD. Experiments: experiments are jobs that can be run multiple times and create input and output file sin them cloudmesh: experiment: job: name: {cloudmesh.profile.user.name}-01 directory: ~/experiment/{experiment.job.name} output: {cloudmesh.experiment.job.name}/output input: ~/experiment/{experiment.job.name}/input script: script.sh source ,,, destination: {cloudmesh.experiment.job.directory} - queue associates with server (cloud) - job could be run on queue and is associated with one or multiple servers - experiment is same as job, but gives some facility to run it multiple times I do not know what companion file is Examples: batch job run [--name=NAMES] [--output=OUTPUT] runs jobs with the given names LOTS OF DOCUMENTATION MISSING HERE [--companion-file=COMPANION_FILE] [--outfile-name=OUTPUT_FILE_NAME] [--suffix=SUFFIX] [--overwrite] """ # # create slurm manager so it can be used in all commands # slurm_manager = SlurmCluster() # debug=arguments["--debug"]) arguments["--cloud"] = "test" arguments["NAME"] = "fix" map_parameters(arguments, "cloud", "name", "cluster", "script", "type", "destination", "source", "output") # if not arguments.create # find cluster name from Variables() # if no cluster is defined look it up in yaml in batch default: # if not defined there fail # clusters = Parameter.expand(arguments.cluster) # name = Parameters.expand[argumnets.name) # this will return an array of clusters and names of jobs and all cluster # job or clusterc commands will be executed on them # see the vm # # if active: False in the yaml file for the cluster this cluster is not used and scipped. VERBOSE(arguments) variables = Variables() # do not use print but use ,Console.msg(), Console.error(), Console.ok() if arguments.tester: print("running ... ") slurm_manager.tester() elif arguments.run and arguments.job: # config = Config()["cloudmesh.batch"] names = Parameter.expand(arguments.name) # clouds, names = Arguments.get_cloud_and_names("refresh", arguments, # variables) data = [] for name in names: entry = SlurmCluster.job_specification() data.append(entry) ''' data = { "cm": { "cloud": "karst_debug", "kind": "batch-job", "name": "job012", }, "batch": { "source": "~/.cloudmesh/batch/dir", "destination": "~/.cloudmesh/dir/", "status": "running" } }''' try: raise NotImplementedError except Exception as e: Console.error("Haha", traceflag=True) pprint(data) print( Printer.flatwrite(data, order=["cm.name", "cm.kind", "batch.status"], header=["Name", "Kind", "Status"], output=arguments.output)) return "" # handling batch job create sample command c # cms batch job create --name newjob1 --cluster slurm-taito # --script ./1_argsin_stdout.slurm --executable # ./1_argsin_stdout_script.sh --destination /home/vafanda --source ~/tmp elif arguments.job and \ arguments.create and \ arguments.name and \ arguments.cluster and \ arguments.script and \ arguments['--executable'] and \ arguments.destination and \ arguments.source: job_name = arguments.name cluster_name = arguments.cluster script_path = Path(arguments.script) if not script_path.exists(): raise FileNotFoundError executable_path = Path(arguments['--executable']) if not executable_path.exists(): raise FileNotFoundError destination = Path(arguments.destination) if not destination.is_absolute(): Console.error("destination path must be absolute", traceflag=True) raise FileNotFoundError source = Path(arguments.source) if not source.exists(): raise FileNotFoundError if arguments.experiment is None: experiment_name = 'job' + self.suffix_generator() else: experiment_name = arguments.experiment + self.suffix_generator( ) if arguments.get("--companion-file") is None: companion_file = Path() else: companion_file = Path(arguments.get("--companion-file")) slurm_manager.create(job_name, cluster_name, script_path, executable_path, destination, source, experiment_name, companion_file) elif arguments.remove: if arguments.cluster: slurm_manager.remove("cluster", arguments.get("CLUSTER_NAME")) if arguments.job: slurm_manager.remove("job", arguments.get("JOB_NAME")) elif arguments.list: max_depth = 1 if arguments.get("DEPTH") is None else int( arguments.get("DEPTH")) if arguments.get("clusters"): slurm_manager.list("clusters", max_depth) elif arguments.get("jobs"): slurm_manager.list("jobs", max_depth) elif arguments.set: if arguments.get("cluster"): cluster_name = arguments.get("CLUSTER_NAME") parameter = arguments.get("PARAMETER") value = arguments.get("VALUE") slurm_manager.set_param("cluster", cluster_name, parameter, value) if arguments.job: config_name = arguments.get("JOB_NAME") parameter = arguments.get("PARAMETER") value = arguments.get("VALUE") slurm_manager.set_param("job-metadata", config_name, parameter, value) elif arguments.start and arguments.job: job_name = arguments.get("JOB_NAME") slurm_manager.run(job_name) elif arguments.get("fetch"): job_name = arguments.get("JOB_NAME") slurm_manager.fetch(job_name) elif arguments.connection_test: slurm_manager.connection_test(arguments.job) elif arguments.clean: job_name = arguments.get("JOB_NAME") slurm_manager.clean_remote(job_name)