def do_akey(self, args, arguments): """ :: Usage: akey akey list akey add --name=key-name --pub=pub-key-path --cert=certificate-file-path --pfx=pfx-file-path """ # pprint("Akey command pressed") # pprint(args) # pprint(arguments) if arguments['list']: print("Key list time") elif arguments['add']: Console.info("Azure key addition invoked") if arguments['--name']: print("name:"+arguments['--name']) key_name = arguments['--name'] if arguments['--cert']: print("cert:"+arguments['--cert']) certificate_path = arguments['--cert'] if arguments['--pub']: print("pub:"+arguments['--pub']) key_path = arguments['--pub'] if arguments['--pfx']: print("pfx:"+arguments['--pfx']) pfx_path = arguments['--pfx'] Key.add_azure_key_to_db(key_name, key_path, certificate_path, pfx_path) return ""
def list_flavor(self, cloudname, **kwargs): # return self.list(self.provider.list_sizes, cloudnames, kwargs) Console.info("In list_flavor of libcloud") sizes = self.provider.list_sizes() self._print(sizes) sizes_dict = self._to_dict(sizes) return sizes_dict
def list_image(self, cloudname, **kwargs): # return self.list(self.provider.list_images, cloudnames, kwargs) Console.info("In list_images of libcloud") images = self.provider.list_images() self._print(images) image_dict = self._to_dict(images) return image_dict
def init(self, stackname='bds', activate=True, name=None, username=None, branch=None, overrides=None, playbooks=None, ips=None, force=False, update=False): factory = ProjectFactory() if stackname == 'bds': factory.use_bds() else: raise NotImplementedError(stackname) factory\ .set_project_name(name)\ .set_user_name(os.getenv('USER') if username is '$USER' else username)\ .set_branch(branch)\ .set_ips(ips)\ .set_overrides(overrides)\ .set_playbooks(playbooks)\ .activate(activate)\ .set_force(force=force)\ .set_update(update) project = factory() Console.info('Created project {}'.format(project.name))
def add_azure_key_to_db(cls, key_name, key_path, certificate_path, pfx_path): """ Adds the public key to the existing database model and adds the certificate, key and fingerprint into the azure key database model. :param key_name: Key name to be added :param key_path: Public key path :param certificate_path: Certificate file path(PEM file) :param pfx_path: PKCS encoded certificate path :return: """ pprint("add_azure_key_to_db") # Add to the current DB cls.add_from_path(key_path, key_name, source="ssh", uri="file://" + key_path) # Add certificate to the new DB fingerprint_cmd = "openssl x509 -in " + certificate_path + " -sha1 -noout -fingerprint | sed s/://g" # print("fingerprint_cmd:", fingerprint_cmd) fingerprint = cls.run_command(fingerprint_cmd) fingerprint = fingerprint.split('=')[1] fingerprint = fingerprint.rstrip('\n') # pprint("Certificate Fingerprint="+fingerprint) key_azure_obj = { "kind": "key_azure", "name": key_name, "fingerprint": fingerprint, "certificate": certificate_path, "key_path": key_path, "pfx_path": pfx_path } cls.cm.add(key_azure_obj) Console.info("Azure key added.ok.")
def list_vm(self, cloudname, **kwargs): # return self.list(self.provider.list_nodes, cloudnames, kwargs) Console.info("In list_vm") nodes = self.provider.list_nodes() self._print(nodes) vm_dict = self._to_dict(nodes) return vm_dict
def do_akey(self, args, arguments): """ :: Usage: akey akey list akey add --name=key-name --pub=pub-key-path --cert=certificate-file-path --pfx=pfx-file-path """ # pprint("Akey command pressed") # pprint(args) # pprint(arguments) if arguments['list']: print("Key list time") elif arguments['add']: Console.info("Azure key addition invoked") if arguments['--name']: print("name:" + arguments['--name']) key_name = arguments['--name'] if arguments['--cert']: print("cert:" + arguments['--cert']) certificate_path = arguments['--cert'] if arguments['--pub']: print("pub:" + arguments['--pub']) key_path = arguments['--pub'] if arguments['--pfx']: print("pfx:" + arguments['--pfx']) pfx_path = arguments['--pfx'] Key.add_azure_key_to_db(key_name, key_path, certificate_path, pfx_path) return ""
def boot_vm(self, name, image=None, flavor=None, cloud=None, key=None, secgroup=None, meta=None, nics=None, **kwargs): """ Spawns a VM instance on the cloud. If image and flavor passed as none, it would consider the defaults specified in cloudmesh.yaml. :param name: Name of the instance to be started :param image: Image id to be used for the instance :param flavor: Flavor to be used for the instance :param cloud: Cloud on which to spawn the machine. Defaults to 'India'. :param key: Key to be used for the instance :param secgroup: Security group for the instance :param nics: TODO: fixme :param meta: A dict of arbitrary key/value metadata to store for this server """ pprint("BOOTING UP THE VM") if cloud is None: Console.error("Cloud is not specified") return auth = NodeAuthPassword('mysecretpassword') # self.provider.create_node("test_node", auth=auth) if image is not None: image = self.get_image_by_id(image) pprint("Image Id") pprint(image) else: Console.error("Image Id not found") if flavor is not None: flavor = self.get_size_by_id(flavor) pprint("FLAVOR::") pprint(flavor) else: Console.error("valid Flavor Id not found") # flavor = self.provider.list_sizes()[2] # location = self.provider.list_locations()[0] # pprint(self.provider.features['create_node']) # create_args = dict() # create_args['image'] = image # Console.info("Demo start a VM:") # Console.info("Image selected :"+image.name) # Console.info("Flavor selected :"+flavor.name) # Console.info("Key :") # pprint(key) self.provider.create_node(name=name, image=image, size=flavor, ex_keyname=key) Console.info("VM boot up success.ok.")
def deploy(self, force=False): if not self.is_deployed or force: self.stack.deploy(**self.deployparams) else: Console.info('Already deployed') self.is_deployed = True
def list_key(self, cloudname, **kwargs): Console.info("In list_key") keys = self.provider.list_key_pairs() #Console.info(keys) #self._print(keys) keys_dict = self._to_dict(keys) #print (keys_dict) return keys_dict
def add_key_to_cloud(self, name, public_key): """ Method to add key to libcloud based clouds, typically a keypair for AWS EC2. :param name: Name of the keypair. :param public_key: public key string. :return: """ keypair = self.provider.import_key_pair_from_string(name, key_material=public_key) Console.info("Uploading the key to libcloud. ok.") return keypair
def _get_storage_name(self): """ Fetches the default storage service name from the Azure cloud :return: """ result = self.provider.list_storage_accounts() if len(result) > 0: Console.info("{0} storage found ".format(result[0].service_name)) return result[0].service_name Console.warning("No Storage Accounts found") return None
def delete_vm(self, name, group=None, force=None): """ Delete a VM instance whose hosted service name and instance name is given by name :param name: :param group: :param force: :return: """ self.provider.delete_deployment(service_name=name, deployment_name=name) self.provider.delete_hosted_service(service_name=name) Console.info("Delete Success.ok.")
def add_key_to_cloud(self, name, public_key): """ Method to add key to libcloud based clouds, typically a keypair for AWS EC2. :param name: Name of the keypair. :param public_key: public key string. :return: """ keypair = self.provider.import_key_pair_from_string( name, key_material=public_key) Console.info("Uploading the key to libcloud. ok.") return keypair
def copy(commands): if vm.ip is not None: if arg.verbose: Console.info("Connecting to Instance at IP:" + format(vm.ip)) sshcommand = "scp" sshcommand += " -o StrictHostKeyChecking=no" sshcommand += " {username}@{ip}".format(**vm) sshcommand += ":{:}".format(commands) print(sshcommand) os.system(sshcommand) else: Console.error("No Public IPs found for the instance", traceflag=False)
def push(from_path, vm): vm.ip = vm.floating_ip if vm.ip is not None: if arg.verbose: Console.info("Connecting to Instance at IP:" + format(vm.ip)) sshcommand = "scp" sshcommand += " -o StrictHostKeyChecking=no" sshcommand += " {:}".format(from_path) sshcommand += " {username}@{ip}:.ssh/authorized_keys".format(**vm) print(sshcommand) os.system(sshcommand) else: Console.error("No Public IPs found for the instance", traceflag=False)
def delete_vm(self, name, group=None, force=None): """ Delete a VM instance whose hosted service name and instance name is given by name :param name: :param group: :param force: :return: """ self.provider.delete_deployment(service_name=name, deployment_name=name) # Wait a sec before deleting hosted service? import time time.sleep(10) self.provider.delete_hosted_service(service_name=name) Console.info("{0} deleted".format(name))
def add(self): """Boots a new instance and adds it to this cluster""" provider = Provider.from_cloud(self.cloud) Console.info('Booting VM for cluster {}'.format(self.name)) node = provider.boot(key=self.key, image=self.image, flavor=self.flavor, secgroup=self.secgroup, cluster=self.name, username=self.username) if self.assignFloatingIP: node.create_ip()
def enable_ssh(self, cloud, secgroup_name='default'): if cloud == "aws": params = {'Action': 'AuthorizeSecurityGroupIngress', 'GroupName': secgroup_name, 'IpProtocol': 'tcp', 'FromPort': '22', 'ToPort': '22', 'CidrIp': '0.0.0.0/0'} try: self.provider.connection.request(self.provider.path, params=params).object Console.info("Permission added.ok") except Exception as e: if e.args[0].find("InvalidPermission.Duplicate") == -1: Console.info("Permission already exists.ok") else: Console.error("Enable SSH not implemented for others")
def copy(commands): if vm.ip is not None: if arg.verbose: Console.info("Connecting to Instance at IP:" + format(vm.ip)) sshcommand = "scp" sshcommand += " -o StrictHostKeyChecking=no" sshcommand += " {username}@{ip}".format(**vm) sshcommand += ":{:}".format(commands) print(sshcommand) os.system(sshcommand) else: Console.error( "No Public IPs found for the instance", traceflag=False)
def push(from_path, vm): vm.ip = vm.floating_ip if vm.ip is not None: if arg.verbose: Console.info("Connecting to Instance at IP:" + format(vm.ip)) sshcommand = "scp" sshcommand += " -o StrictHostKeyChecking=no" sshcommand += " {:}".format(from_path) sshcommand += " {username}@{ip}:.ssh/authorized_keys".format( **vm) print(sshcommand) os.system(sshcommand) else: Console.error("No Public IPs found for the instance", traceflag=False)
def enable_ssh(self, cloud, secgroup_name='default'): if cloud == "aws": params = { 'Action': 'AuthorizeSecurityGroupIngress', 'GroupName': secgroup_name, 'IpProtocol': 'tcp', 'FromPort': '22', 'ToPort': '22', 'CidrIp': '0.0.0.0/0' } try: self.provider.connection.request(self.provider.path, params=params).object Console.info("Permission added.ok") except Exception as e: if e.args[0].find("InvalidPermission.Duplicate") == -1: Console.info("Permission already exists.ok") else: Console.error("Enable SSH not implemented for others")
def delete_vm(self, name, group=None, force=None): """ Delete a VM instance whose instance name is given by name :param name: :param group: :param force: :return: """ Console.info("Delete VM for " + name) nodes_list = self.provider.list_nodes() node_obj = None for node in nodes_list: if node.name == name: node_obj = node break if node_obj is not None: self.provider.destroy_node(node_obj) Console.info("VM delete success.ok.") else: Console.error("No valid node found with the name "+name)
def delete_vm(self, name, group=None, force=None): """ Delete a VM instance whose instance name is given by name :param name: :param group: :param force: :return: """ Console.info("Delete VM for " + name) nodes_list = self.provider.list_nodes() node_obj = None for node in nodes_list: if node.name == name: node_obj = node break if node_obj is not None: self.provider.destroy_node(node_obj) Console.info("VM delete success.ok.") else: Console.error("No valid node found with the name " + name)
def upload(cls, cloud=None, group=None): if cloud is None or cloud == 'all': clouds = ConfigDict("cloudmesh.yaml")["cloudmesh"]["active"] else: clouds = [cloud] if group is None: rules = cls.list(output='dict') groups = set() for g in rules: r = rules[g] groups.add(r["group"]) groups = list(groups) else: groups = [group] for cloud in clouds: Console.msg( "Uploading the groups/rules to cloud - {}...".format(cloud)) for g in groups: cls.delete_all_rules_cloud(cloud, g) group = cls.get(name=g, cloud=cloud) group_cloud = cls.get_group_cloud(cloud, g) if not group_cloud: cls.add_group_cloud(cloud, g) rules = cls.list_rules(group=g, output="dict") if rules: for ruleid in rules: rule = rules[ruleid] rulename = rule["name"] cls.add_rule_cloud(cloud, g, rulename) ''' SecGroup.delete(category=c, group=g) uuid = SecGroup.create(category=c, group=g) for key in rules: r = rules[key] if r["group"] == g: SecGroup.add_rule(c,uuid,r["fromPort"],r["toPort"] , r['protocol'],r['cidr']) # create group ''' Console.msg("...done") Console.info("All completed")
def _to_dict(self, libcloud_result): d = {} result_type = "" if len(libcloud_result) > 0: name = libcloud_result[0].__class__.__name__ print("RRRR", name) if name in ["Node", "NodeImage", "NodeSize"]: result_type = name Console.info("{} type object received".format(name)) # pprint(libcloud_result[0]) for index, obj in enumerate(libcloud_result): if result_type == "Node": d[index] = dict(LibcloudDict.convert_libcloud_vm_to_dict(obj)) elif result_type == "NodeImage": d[index] = dict(LibcloudDict.handle_vm_image_details(obj)) elif result_type == "NodeSize": d[index] = dict(LibcloudDict.handle_vm_size_details(obj)) # pprint("Index:"+str(index)) return d
def _create_storage_account(self): """ Storage service is required to create a disk space for a VM Reference: SimpleAzure """ # Note that it is better to create a storage account with a VM name included. # And have a unique name to avoid conflict # TODO: Get a unique readable storage account name # Tips: no dash(-) or space is allowed in naming temp_name = "cloudmesh" name = temp_name[:24].replace("-","") description = name + "description" label = name + "label" result = self.provider.create_storage_account(service_name=name, description=description, label=label, location=self.get_location()) operation_result = self.provider.get_operation_status(result.request_id) Console.info("Storage Account creation: " + operation_result.status)
def enable_ssh(cls, secgroup_name='default', cloud="general"): ret = False if cloud in LibcloudDict.Libcloud_category_list: Console.info("Creating and adding security group for libcloud") cloud_provider = CloudProvider(cloud).provider cloud_provider.create_sec_group(cloud, secgroup_name) cloud_provider.enable_ssh(cloud, secgroup_name) else: cloud_provider = CloudProvider(cloud).provider.provider secgroups = cloud_provider.security_groups.list() for asecgroup in secgroups: if asecgroup.name == secgroup_name: rules = asecgroup.rules rule_exists = False # structure of a secgroup rule: # {u'from_port': 22, u'group': {}, u'ip_protocol': u'tcp', u'to_port': 22, u'parent_group_id': u'UUIDHERE', u'ip_range': {u'cidr': u'0.0.0.0/0'}, u'id': u'UUIDHERE'} for arule in rules: if arule["from_port"] == 22 and \ arule["to_port"] == 22 and \ arule["ip_protocol"] == 'tcp' and \ arule["ip_range"] == {'cidr': '0.0.0.0/0'}: # print (arule["id"]) rule_exists = True break if not rule_exists: cloud_provider.security_group_rules.create( asecgroup.id, ip_protocol='tcp', from_port=22, to_port=22, cidr='0.0.0.0/0') # else: # print ("The rule allowing ssh login did exist!") ret = True break # print ("*" * 80) # d = SecGroup.convert_list_to_dict(secgroups) # print (d) return ret
def upload(cls, cloud=None, group=None): if cloud is None or cloud=='all': clouds = ConfigDict("cloudmesh.yaml")["cloudmesh"]["active"] else: clouds = [cloud] if group is None: rules = cls.list(output='dict') groups = set() for g in rules: r = rules[g] groups.add(r["group"]) groups = list(groups) else: groups = [group] for cloud in clouds: Console.msg("Uploading the groups/rules to cloud - {}...".format(cloud)) for g in groups: cls.delete_all_rules_cloud(cloud, g) group = cls.get(name=g, cloud=cloud) group_cloud = cls.get_group_cloud(cloud, g) if not group_cloud: cls.add_group_cloud(cloud, g) rules = cls.list_rules(group=g, output="dict") if rules: for ruleid in rules: rule = rules[ruleid] rulename = rule["name"] cls.add_rule_cloud(cloud, g, rulename) ''' SecGroup.delete(category=c, group=g) uuid = SecGroup.create(category=c, group=g) for key in rules: r = rules[key] if r["group"] == g: SecGroup.add_rule(c,uuid,r["fromPort"],r["toPort"] , r['protocol'],r['cidr']) # create group ''' Console.msg("...done") Console.info("All completed")
def project(self, list_projects=False, name=None): db = ProjectDB() # set if name is given if name: project = db.lookup(name) db.activate(project) # list of asked to do so if list_projects: for project in db: isactive = '>' if db.isactive(project) else '' ctime = time.strftime('%Y-%m-%d %H:%M:%S', project.ctime) msg = '' msg += '{isactive:3s}' msg += '{project.name:10s}' msg += 'created: {ctime}' msg += 'stack: {project.stack.__class__.__name__:16s}' msg += 'deployed: {project.is_deployed}' msg = msg.format(isactive=isactive, project=project, ctime=ctime) Console.info(msg)
def _create_storage_account(self): """ Storage service is required to create a disk space for a VM Reference: SimpleAzure """ # Note that it is better to create a storage account with a VM name included. # And have a unique name to avoid conflict # TODO: Get a unique readable storage account name # Tips: no dash(-) or space is allowed in naming temp_name = "cloudmesh" name = temp_name[:24].replace("-", "") description = name + "description" label = name + "label" result = self.provider.create_storage_account( service_name=name, description=description, label=label, location=self.get_location()) operation_result = self.provider.get_operation_status( result.request_id) Console.info("Storage Account creation: " + operation_result.status)
def boot_vm(self, name, group=None, image=None, flavor=None, cloud=None, cert_thumbprint=None, pub_key_path=None, cert_path=None, pfx_path=None, secgroup=None, meta=None, nics=None, **kwargs): """ Boots up a new VM Instance. Steps involved: creating a hosted(Cloud) Service, adding the PFX certificate file, get default storage name, creating a configuration set, adding an endpoint(SSH by default), and finally creating a VM deployment :param name: Hosted Service Name and VM instance name :param group: :param image: :param flavor: :param cloud: :param cert_thumbprint: :param pub_key_path: :param cert_path: :param pfx_path: :param secgroup: :param meta: :param nics: :param kwargs: :return: """ location = ConfigDict(filename="cloudmesh.yaml")["cloudmesh"][ "clouds"]["azure"]["default"]["location"] or 'Central US' try: self.provider.create_hosted_service(service_name=name, label=name, location=location) except: traceback.print_exc() pprint("Error creating hosted service") pprint("service name" + name) pprint("location name" + location) pprint("cert_thumbprint" + cert_thumbprint) pprint("pub_key_path" + pub_key_path) pprint("cert_path" + cert_path) pprint("pfx_path" + pfx_path) pprint("Image" + image) pprint("Flavor" + flavor) pprint("Certificate adding") self.add_certificate(name, pfx_path) pprint("Certificate added") storage_name = self._get_storage_name() media_link = 'https://{0}.blob.core.windows.net/vhds/{1}.vhd'.format( storage_name, name) os_hd = OSVirtualHardDisk(image, media_link) username = ConfigDict( filename="cloudmesh.yaml" )["cloudmesh"]["clouds"]["azure"]["default"]["username"] password = ConfigDict( filename="cloudmesh.yaml" )["cloudmesh"]["clouds"]["azure"]["default"]["password"] pprint("Username:"******"password:"******"Starting the VM on ", media_link) try: vm_create_result = self.provider.create_virtual_machine_deployment( service_name=name, deployment_name=name, deployment_slot='production', label=name, role_name=name, system_config=linux_config, os_virtual_hard_disk=os_hd, network_config=network, role_size=flavor) # pprint(vm_create_result) self.provider.wait_for_operation_status( vm_create_result.request_id, timeout=30) Console.info("VM boot up successful.ok.") except: traceback.print_exc() pprint("Exception in starting the VM") return name
def create_sec_group(self, cloud, secgroup_name='default'): try: self.provider.ex_create_security_group(secgroup_name, "Default Security Group") except Exception as e: Console.info("create_sec_group exception." + e.args[0])
def do_comet(self, args, arguments): """ :: Usage: comet init comet active [ENDPOINT] comet ll [CLUSTERID] [--format=FORMAT] [--endpoint=ENDPOINT] comet cluster [--concise|--status] [CLUSTERID] [--format=FORMAT] [--sort=SORTKEY] [--endpoint=ENDPOINT] comet computeset [COMPUTESETID] [--allocation=ALLOCATION] [--cluster=CLUSTERID] [--state=COMPUTESESTATE] [--endpoint=ENDPOINT] comet start CLUSTERID [--count=NUMNODES] [COMPUTENODEIDS] [--allocation=ALLOCATION] [--reservation=RESERVATION] [--walltime=WALLTIME] [--endpoint=ENDPOINT] comet terminate COMPUTESETID [--endpoint=ENDPOINT] comet power (on|off|reboot|reset|shutdown) CLUSTERID [NODESPARAM] [--endpoint=ENDPOINT] comet console [--link] CLUSTERID [COMPUTENODEID] [--endpoint=ENDPOINT] comet node info CLUSTERID [COMPUTENODEID] [--format=FORMAT] [--endpoint=ENDPOINT] comet node rename CLUSTERID OLDNAMES NEWNAMES [--endpoint=ENDPOINT] comet iso list [--endpoint=ENDPOINT] comet iso upload [--isoname=ISONAME] PATHISOFILE [--endpoint=ENDPOINT] comet iso attach ISOIDNAME CLUSTERID [COMPUTENODEIDS] [--endpoint=ENDPOINT] comet iso detach CLUSTERID [COMPUTENODEIDS] [--endpoint=ENDPOINT] comet reservation (list|create|update|delete) Options: --endpoint=ENDPOINT Specify the comet nucleus service endpoint to work with, e.g., dev or production --format=FORMAT Format is either table, json, yaml, csv, rest [default: table] --sort=SORTKEY Sorting key for the table view --count=NUMNODES Number of nodes to be powered on. When this option is used, the comet system will find a NUMNODES number of arbitrary nodes that are available to boot as a computeset --allocation=ALLOCATION Allocation to charge when power on node(s) --reservation=RESERVATION Submit the request to an existing reservation --walltime=WALLTIME Walltime requested for the node(s). Walltime could be an integer value followed by a unit (m, h, d, w, for minute, hour, day, and week, respectively). E.g., 3h, 2d --isoname=ISONAME Name of the iso image after being stored remotely. If not specified, use the original filename --state=COMPUTESESTATE List only computeset with the specified state. The state could be submitted, running, completed --link Whether to open the console url or just show the link --concise Concise table view for cluster info --status Cluster table view displays only those columns showing state of nodes Arguments: ENDPOINT Service endpoint based on the yaml config file. By default it's either dev or production. CLUSTERID The assigned name of a cluster, e.g. vc1 COMPUTESETID An integer identifier assigned to a computeset COMPUTENODEID A compute node name, e.g., vm-vc1-0 If not provided, the requested action will be taken on the frontend node of the specified cluster COMPUTENODEIDS A set of compute node names in hostlist format, e.g., vm-vc1-[0-3] One single node is also acceptable: vm-vc1-0 If not provided, the requested action will be taken on the frontend node of the specified cluster NODESPARAM Specifying the node/nodes/computeset to act on. In case of integer, will be intepreted as a computesetid; in case of a hostlist format, e.g., vm-vc1-[0-3], a group of nodes; or a single host is also acceptable, e.g., vm-vc1-0 ISONAME Name of an iso image at remote server ISOIDNAME Index or name of an iso image at the remote server. The index is based on the list from 'comet iso list'. PATHISOFILE The full path to the iso image file to be uploaded OLDNAMES The list of current node names to be renamed, in hostlist format. A single host is also acceptable. NEWNAMES The list of new names to rename to, in hostlist format. A single host is also acceptable. """ # back up of all the proposed commands/options """ comet status comet tunnel start comet tunnel stop comet tunnel status comet logon comet logoff comet ll [CLUSTERID] [--format=FORMAT] comet docs comet info [--user=USER] [--project=PROJECT] [--format=FORMAT] comet cluster [CLUSTERID][--name=NAMES] [--user=USER] [--project=PROJECT] [--hosts=HOSTS] [--start=TIME_START] [--end=TIME_END] [--hosts=HOSTS] [--format=FORMAT] comet computeset [COMPUTESETID] comet start ID comet stop ID comet power on CLUSTERID [NODESPARAM] [--allocation=ALLOCATION] [--walltime=WALLTIME] comet power (off|reboot|reset|shutdown) CLUSTERID [NODESPARAM] comet console CLUSTERID [COMPUTENODEID] comet delete [all] [--user=USER] [--project=PROJECT] [--name=NAMES] [--hosts=HOSTS] [--start=TIME_START] [--end=TIME_END] [--host=HOST] comet delete --file=FILE comet update [--name=NAMES] [--hosts=HOSTS] [--start=TIME_START] [--end=TIME_END] comet add [--user=USER] [--project=PROJECT] [--host=HOST] [--description=DESCRIPTION] [--start=TIME_START] [--end=TIME_END] NAME comet add --file=FILENAME Options: --user=USER user name --name=NAMES Names of the vcluster --start=TIME_START Start time of the vcluster, in YYYY/MM/DD HH:MM:SS format. [default: 1901-01-01] --end=TIME_END End time of the vcluster, in YYYY/MM/DD HH:MM:SS format. In addition a duratio can be specified if the + sign is the first sig The duration will than be added to the start time. [default: 2100-12-31] --project=PROJECT project id --host=HOST host name --description=DESCRIPTION description summary of the vcluster --file=FILE Adding multiple vclusters from one file --format=FORMAT Format is either table, json, yaml, csv, rest [default: table] --allocation=ALLOCATION Allocation to charge when power on node(s) --walltime=WALLTIME Walltime requested for the node(s) Arguments: FILENAME the file to open in the cwd if . is specified. If file in in cwd you must specify it with ./FILENAME Opens the given URL in a browser window. """ """ if not arguments["tunnel"] and Comet.tunnelled and not Comet.is_tunnel(): Console.error("Please establish a tunnel first with:") print print (" comet tunnel start") print return "" try: if not arguments["tunnel"]: logon = Comet.logon() if logon is False: Console.error("Could not logon") return "" except: Console.error("Could not logon") # pprint (arguments) output_format = arguments["--format"] or "table" if arguments["status"]: Comet.state() elif arguments["tunnel"] and arguments["start"]: Comet.tunnel(True) elif arguments["tunnel"] and arguments["stop"]: Comet.tunnel(False) elif arguments["tunnel"] and arguments["status"]: Comet.state() elif arguments["logon"]: if self.context.comet_token is None: if Comet.logon(): Console.ok("logging on") self.context.comet_token = Comet.token else: Console.error("could not logon") else: Console.error("already logged on") elif arguments["logoff"]: if self.context.comet_token is None: Console.error("not logged in") else: if Comet.logoff(): Console.ok("Logging off") self.context.comet_token = None else: Console.error( "some issue while logging off. Maybe comet not reachable") elif arguments["docs"]: Comet.docs() elif arguments["info"]: Console.error("not yet implemented") elif arguments["add"]: print ("add the cluster") elif arguments["start"]: cluster_id = arguments["ID"] print("start", cluster_id) Cluster.start(cluster_id) elif arguments["stop"]: cluster_id = arguments["ID"] print("stop", cluster_id) Cluster.stop(cluster_id) elif arguments["ll"]: """ if arguments["init"]: print ("Initializing the comet configuration file...") config = ConfigDict("cloudmesh.yaml") # for unit testing only. cometConf = config["cloudmesh.comet"] endpoints = [] # print (cometConf.keys()) if "endpoints" in cometConf.keys(): endpoints = cometConf["endpoints"].keys() if len(endpoints) < 1: Console.error("No service endpoints available. " "Please check the config template", traceflag=False) return "" if "username" in cometConf.keys(): default_username = cometConf['username'] # print (default_username) if 'TBD' == default_username: set_default_user = \ input("Set a default username (RETURN to skip): ") if set_default_user: config.data["cloudmesh"]["comet"]["username"] = \ set_default_user config.save() Console.ok("Comet default username set!") if "active" in cometConf.keys(): active_endpoint = cometConf['active'] set_active_endpoint = \ input("Set the active service endpoint to use. " "The availalbe endpoints are - %s [%s]: " % ("/".join(endpoints), active_endpoint) ) if set_active_endpoint: if set_active_endpoint in endpoints: config.data["cloudmesh"]["comet"]["active"] = \ set_active_endpoint config.save() Console.ok("Comet active service endpoint set!") else: Console.error("The provided endpoint does not match " "any available service endpoints. Try %s" % "/".join(endpoints), traceflag=False) if cometConf['active'] in endpoints: endpoint_url = cometConf["endpoints"] \ [cometConf['active']]["nucleus_base_url"] api_version = cometConf["endpoints"] \ [cometConf['active']]["api_version"] set_endpoint_url = \ input("Set the base url for the nucleus %s service [%s]: " \ % (cometConf['active'], endpoint_url) ) if set_endpoint_url: if set_endpoint_url != endpoint_url: config.data["cloudmesh"]["comet"]["endpoints"] \ [cometConf['active']]["nucleus_base_url"] \ = set_endpoint_url config.save() Console.ok("Service base url set!") set_api_version = \ input("Set the api version for the nucleus %s service [%s]: " \ % (cometConf['active'], api_version) ) if set_api_version: if set_api_version != api_version: config.data["cloudmesh"]["comet"]["endpoints"] \ [cometConf['active']]["api_version"] \ = set_api_version config.save() Console.ok("Service api version set!") print("Authenticating to the nucleus %s " \ "service and obtaining the apikey..." \ % cometConf['active']) Comet.get_apikey(cometConf['active']) return '' # Comet.get_apikey() if arguments["active"]: config = ConfigDict("cloudmesh.yaml") cometConf = config["cloudmesh.comet"] endpoint = arguments["ENDPOINT"] or None # parameter specified, intended to change if endpoint: if "endpoints" in cometConf.keys(): endpoints = cometConf["endpoints"].keys() if endpoint in endpoints: config.data["cloudmesh"] \ ["comet"] \ ["active"] = endpoint config.save() Console.ok("Comet active service endpoint set" " to: %s" % endpoint) else: Console.error("The provided endpoint does not match " "any available service endpoints. Try %s." % "/".join(endpoints), traceflag = False) else: Console.error("No available endpoint to set. " "Check config file!", traceflag=False) else: if "active" in cometConf.keys(): active_endpoint = cometConf['active'] Console.ok("Current active service endpoint is: %s" % active_endpoint) else: Console.error("Cannot set active endpoint. " "Check config file!", traceflag = False) try: endpoint = None config = ConfigDict("cloudmesh.yaml") cometConf = config["cloudmesh.comet"] if arguments["--endpoint"]: endpoint = arguments["--endpoint"] if "endpoints" in cometConf.keys(): endpoints = cometConf["endpoints"].keys() if endpoint not in endpoints: Console.error("The provided endpoint does not match " "any available service endpoints. Try %s." % "/".join(endpoints), traceflag = False) return '' logon = Comet.logon(endpoint=endpoint) if logon is False: Console.error("Could not logon. Please try first:\n" "cm comet init", traceflag = False) return "" except: Console.error("Could not logon", traceflag = False) output_format = arguments["--format"] or "table" if arguments["ll"]: cluster_id = arguments["CLUSTERID"] or None print(Cluster.simple_list(cluster_id, format=output_format)) elif arguments["cluster"]: view = "FULL" if arguments["--concise"]: view = "CONCISE" if arguments["--status"]: view = "STATE" cluster_id = arguments["CLUSTERID"] sortkey = arguments["--sort"] print(Cluster.list(cluster_id, format=output_format, sort=sortkey, view=view)) elif arguments["computeset"]: computeset_id = arguments["COMPUTESETID"] or None cluster = arguments["--cluster"] or None state = arguments["--state"] or None allocation = arguments["--allocation"] or None cluster = arguments["--cluster"] or None print (Cluster.computeset(computeset_id, cluster, state, allocation)) elif arguments["start"]: clusterid = arguments["CLUSTERID"] numnodes = arguments["--count"] or None computenodeids = arguments["COMPUTENODEIDS"] or None # check allocation information for the cluster cluster = Cluster.list(clusterid, format='rest') try: allocations = cluster[0]['allocations'] except: # print (cluster) Console.error("No allocation available for the specified cluster."\ "Please check with the comet help team", traceflag=False) return "" # checking whether the computesetids is in valid hostlist format if computenodeids: try: hosts_param = hostlist.expand_hostlist(computenodeids) except hostlist.BadHostlist: Console.error("Invalid hosts list specified!", traceflag=False) return "" elif numnodes: try: param = int(numnodes) except ValueError: Console.error("Invalid count value specified!", traceflag=False) return "" if param <= 0: Console.error("count value has to be greather than zero", traceflag=False) return "" numnodes = param else: Console.error("You have to specify either the count of nodes, " \ "or the names of nodes in hostlist format", traceflag=False) return "" walltime = arguments["--walltime"] or None allocation = arguments["--allocation"] or None reservation = arguments["--reservation"] or None # validating walltime and allocation parameters walltime = Cluster.convert_to_mins(walltime) if not walltime: print("No valid walltime specified. " \ "Using system default (2 days)") if not allocation: if len(allocations) == 1: allocation = allocations[0] else: allocation = Cluster.display_get_allocation(allocations) # issuing call to start a computeset with specified parameters print(Cluster.computeset_start(clusterid, computenodeids, numnodes, allocation, reservation, walltime) ) elif arguments["terminate"]: computesetid = arguments["COMPUTESETID"] print(Cluster.computeset_terminate(computesetid)) elif arguments["power"]: clusterid = arguments["CLUSTERID"] or None fuzzyparam = arguments["NODESPARAM"] or None # parsing nodesparam for proper action if fuzzyparam: try: param = int(fuzzyparam) subject = 'COMPUTESET' except ValueError: param = fuzzyparam try: hosts_param = hostlist.expand_hostlist(fuzzyparam) subject = 'HOSTS' except hostlist.BadHostlist: Console.error("Invalid hosts list specified!", traceflag=False) return "" else: subject = 'FE' param = None if arguments["on"]: action = "on" elif arguments["off"]: action = "off" elif arguments["reboot"]: action = "reboot" elif arguments["reset"]: action = "reset" elif arguments["shutdown"]: action = "shutdown" else: action = None print (Cluster.power(clusterid, subject, param, action) ) elif arguments["console"]: clusterid = arguments["CLUSTERID"] linkonly = False if arguments["--link"]: linkonly = True nodeid = None if 'COMPUTENODEID' in arguments: nodeid = arguments["COMPUTENODEID"] Comet.console(clusterid, nodeid, linkonly) elif arguments["iso"]: if arguments["list"]: isos = (Comet.list_iso()) idx = 0 for iso in isos: if iso.startswith("public/"): iso = iso.split("/")[1] idx += 1 print ("{}: {}".format(idx, iso)) if arguments["upload"]: isofile = arguments["PATHISOFILE"] isofile = os.path.abspath(isofile) if os.path.isfile(isofile): if arguments["--isoname"]: filename = arguments["--isoname"] else: filename = os.path.basename(isofile) else: print ("File does not exist - {}" \ .format(arguments["PATHISOFILE"])) return "" print(Comet.upload_iso(filename, isofile)) elif arguments["attach"]: isoidname = arguments["ISOIDNAME"] clusterid = arguments["CLUSTERID"] computenodeids = arguments["COMPUTENODEIDS"] or None print(Cluster.attach_iso(isoidname, clusterid, computenodeids)) elif arguments["detach"]: clusterid = arguments["CLUSTERID"] computenodeids = arguments["COMPUTENODEIDS"] or None print(Cluster.detach_iso(clusterid, computenodeids)) elif arguments["node"]: if arguments["info"]: clusterid = arguments["CLUSTERID"] nodeid = arguments["COMPUTENODEID"] print (Cluster.node_info(clusterid, nodeid=nodeid, format=output_format)) elif arguments["rename"]: clusterid = arguments["CLUSTERID"] oldnames = Parameter.expand(arguments["OLDNAMES"]) newnames = Parameter.expand(arguments["NEWNAMES"]) if len(oldnames) != len(newnames): Console.error("Length of OLDNAMES and NEWNAMES have to be the same", traceflag=False) return "" else: for newname in newnames: if newname.strip() == "": Console.error("Newname cannot be empty string", traceflag=False) return "" cluster_data = Cluster.list(clusterid, format="rest") if len(cluster_data) > 0: computes = cluster_data[0]["computes"] nodenames = [x["name"] for x in computes] else: Console.error("Error obtaining the cluster information", traceflag=False) return "" # check if new names ar not already taken # to be implemented # print (oldnames) # print (newnames) # print (nodenames) oldset = set(oldnames) newset = set(newnames) currentset = set(nodenames) # at least one OLDNAME does not exist if not oldset <= currentset: Console.error("Not all OLDNAMES are valid", traceflag=False) return "" else: # those unchanged nodes keptset = currentset - oldset # duplication between name of unchanged nodes and # the requested NEWNAMES if keptset & newset != set(): Console.error("Not proceeding as otherwise introducing "\ "duplicated names", traceflag=False) else: for i in range(0,len(oldnames)): oldname = oldnames[i] newname = newnames[i] print ("%s -> %s" % (oldname, newname)) confirm = input("Confirm batch renaming (Y/y to confirm, "\ "any other key to abort):") if confirm.lower() == 'y': print ("Conducting batch renaming") for i in range(0,len(oldnames)): oldname = oldnames[i] newname = newnames[i] print (Cluster.rename_node(clusterid, oldname, newname)) else: print ("Action aborted!") elif arguments["reservation"]: if arguments["create"] or \ arguments["update"] or \ arguments["delete"]: Console.info("Operation not supported. Please contact XSEDE helpdesk for help!") if arguments["list"]: if "hpcinfo" in cometConf: hpcinfourl = cometConf["hpcinfo"]["endpoint"] else: Console.error("Admin feature not configured for this client", traceflag = False) return "" ret = requests.get("%s/reservations/%s" % (hpcinfourl, cometConf['active']) ) jobs = ret.json() result = Printer.write(jobs) print (result) return ""
def do_vm(self, args, arguments): """ :: Usage: vm default [--cloud=CLOUD][--format=FORMAT] vm refresh [all][--cloud=CLOUD] vm boot [--name=NAME] [--cloud=CLOUD] [--username=USERNAME] [--image=IMAGE] [--flavor=FLAVOR] [--group=GROUP] [--public] [--secgroup=SECGROUP] [--key=KEY] [--dryrun] vm boot [--n=COUNT] [--cloud=CLOUD] [--username=USERNAME] [--image=IMAGE] [--flavor=FLAVOR] [--group=GROUP] [--public] [--secgroup=SECGROUP] [--key=KEY] [--dryrun] vm ping [NAME] [N] vm console [NAME] [--group=GROUP] [--cloud=CLOUD] [--force] vm start [NAMES] [--group=GROUP] [--cloud=CLOUD] [--force] vm stop [NAMES] [--group=GROUP] [--cloud=CLOUD] [--force] vm terminate [NAMES] [--group=GROUP] [--cloud=CLOUD] [--force] vm delete [NAMES] [--group=GROUP] [--cloud=CLOUD] [--keep] [--dryrun] vm ip assign [NAMES] [--cloud=CLOUD] vm ip show [NAMES] [--group=GROUP] [--cloud=CLOUD] [--format=FORMAT] [--refresh] vm ip inventory [NAMES] [--header=HEADER] [--file=FILE] vm ssh [NAME] [--username=USER] [--quiet] [--ip=IP] [--cloud=CLOUD] [--key=KEY] [--command=COMMAND] vm rename [OLDNAMES] [NEWNAMES] [--force] [--dryrun] vm list [NAMES] [--cloud=CLOUDS|--active] [--group=GROUP] [--format=FORMAT] [--refresh] vm status [NAMES] vm wait [--cloud=CLOUD] [--interval=SECONDS] vm info [--cloud=CLOUD] [--format=FORMAT] vm check NAME vm username USERNAME [NAMES] [--cloud=CLOUD] Arguments: COMMAND positional arguments, the commands you want to execute on the server(e.g. ls -a) separated by ';', you will get a return of executing result instead of login to the server, note that type in -- is suggested before you input the commands NAME server name. By default it is set to the name of last vm from database. NAMES server name. By default it is set to the name of last vm from database. KEYPAIR_NAME Name of the openstack keypair to be used to create VM. Note this is not a path to key. NEWNAMES New names of the VM while renaming. OLDNAMES Old names of the VM while renaming. Options: --username=USERNAME the username to login into the vm. If not specified it will be guessed from the image name and the cloud --ip=IP give the public ip of the server --cloud=CLOUD give a cloud to work on, if not given, selected or default cloud will be used --count=COUNT give the number of servers to start --detail for table print format, a brief version is used as default, use this flag to print detailed table --flavor=FLAVOR give the name or id of the flavor --group=GROUP give the group name of server --secgroup=SECGROUP security group name for the server --image=IMAGE give the name or id of the image --key=KEY specify a key to use, input a string which is the full path to the private key file --keypair_name=KEYPAIR_NAME Name of the openstack keypair to be used to create VM. Note this is not a path to key. --user=USER give the user name of the server that you want to use to login --name=NAME give the name of the virtual machine --force rename/ delete vms without user's confirmation --command=COMMAND specify the commands to be executed Description: commands used to boot, start or delete servers of a cloud vm default [options...] Displays default parameters that are set for vm boot either on the default cloud or the specified cloud. vm boot [options...] Boots servers on a cloud, user may specify flavor, image .etc, otherwise default values will be used, see how to set default values of a cloud: cloud help vm start [options...] Starts a suspended or stopped vm instance. vm stop [options...] Stops a vm instance . vm delete [options...] Delete servers of a cloud, user may delete a server by its name or id, delete servers of a group or servers of a cloud, give prefix and/or range to find servers by their names. Or user may specify more options to narrow the search vm floating_ip_assign [options...] assign a public ip to a VM of a cloud vm ip show [options...] show the ips of VMs vm ssh [options...] login to a server or execute commands on it vm list [options...] same as command "list vm", please refer to it vm status [options...] Retrieves status of last VM booted on cloud and displays it. Tip: give the VM name, but in a hostlist style, which is very convenient when you need a range of VMs e.g. sample[1-3] => ['sample1', 'sample2', 'sample3'] sample[1-3,18] => ['sample1', 'sample2', 'sample3', 'sample18'] Quoting commands: cm vm login gvonlasz-004 --command=\"uname -a\" """ """ # terminate # issues a termination to the cloud, keeps vm in database # delete # issues a terminate if not already done # (remember you do not have to go to cloud if state is already terminated) # deletes the vm from database # # bulk rename rename abc[0-1] def[3-4] renames the abc0,abc1 -> def3,def4 if arguments["rename"]: oldnames = Parameter.expand(arguments["OLDNAME"]) newnames = Parameter.expand(arguments["NEWNAME"]) # check if new names ar not already taken # to be implemented if len(oldnames) == len(newnames): for i in range(0, len(oldnames)): oldname = oldnames[i] newname = newnames[i] if newname is None or newname == '': print("New node name cannot be empty") else: print(Cluster.rename_node(clusterid, oldname, newname)) """ cm = CloudmeshDatabase() def _print_dict(d, header=None, output='table'): return Printer.write(d, order=["id", "name", "status"], output=output, sort_keys=True) def _print_dict_ip(d, header=None, output='table'): return Printer.write(d, order=["network", "version", "addr"], output=output, sort_keys=True) def get_vm_name(name=None, offset=0, fill=3): if name is None: count = Default.get_counter(name='name') + offset prefix = Default.user if prefix is None or count is None: Console.error("Prefix and Count could not be retrieved correctly.", traceflag=False) return name = prefix + "-" + str(count).zfill(fill) return name def _refresh_cloud(cloud): try: msg = "Refresh VMs for cloud {:}.".format(cloud) if Vm.refresh(cloud=cloud): Console.ok("{:} OK.".format(msg)) else: Console.error("{:} failed".format(msg), traceflag=False) except Exception as e: Console.error("Problem running VM refresh", traceflag=False) def _get_vm_names(): vm_list = cm.find(kind="vm") vms = [vm["name"] for vm in vm_list] names = pattern = arguments["NAMES"] if pattern is not None: if "*" in pattern: names = search(vms, pattern) else: names = Parameter.expand(names) if names == ['last'] or names is None: names == [Default.vm] return vm_list, names cloud = arguments["--cloud"] or Default.cloud config = ConfigDict("cloudmesh.yaml") active_clouds = config["cloudmesh"]["active"] def _refresh(cloud): all = arguments["all"] or None if all is None: _refresh_cloud(cloud) else: for cloud in active_clouds: _refresh_cloud(cloud) arg = dotdict(arguments) arg.cloud = arguments["--cloud"] or Default.cloud arg.image = arguments["--image"] or Default.get(name="image", category=arg.cloud) arg.flavor = arguments["--flavor"] or Default.get(name="flavor", category=arg.cloud) arg.group = arguments["--group"] or Default.group arg.secgroup = arguments["--secgroup"] or Default.secgroup arg.key = arguments["--key"] or Default.key arg.dryrun = arguments["--dryrun"] arg.name = arguments["--name"] arg.format = arguments["--format"] or 'table' arg.refresh = Default.refresh or arguments["--refresh"] arg.count = int(arguments["--n"] or 1) arg.dryrun = arguments["--dryrun"] arg.verbose = not arguments["--quiet"] # # in many cases use NAMES # if arg.NAMES is not None: # arg.names = Parameter.expand(arg.NAMES) # gvonlasz[001-002] gives ["gvonlasz-001", "gvonlasz-002"] # else: # arg.names = None # if arguments["boot"]: arg.username = arguments["--username"] or Image.guess_username(arg.image) is_name_provided = arg.name is not None arg.user = Default.user for index in range(0, arg.count): vm_details = dotdict({ "cloud": arg.cloud, "name": get_vm_name(arg.name, index), "image": arg.image, "flavor": arg.flavor, "key": arg.key, "secgroup": arg.secgroup, "group": arg.group, "username": arg.username, "user": arg.user }) # correct the username vm_details.username = Image.guess_username_from_category( vm_details.cloud, vm_details.image, username=arg.username) try: if arg.dryrun: print(Printer.attribute(vm_details, output=arg.format)) msg = "dryrun info. OK." Console.ok(msg) else: vm_id = Vm.boot(**vm_details) if vm_id is None: msg = "info. failed." Console.error(msg, traceflag=False) return "" # set name and counter in defaults Default.set_vm(value=vm_details.name) if is_name_provided is False: Default.incr_counter("name") # Add to group if vm_id is not None: Group.add(name=vm_details.group, species="vm", member=vm_details.name, category=vm_details.cloud) msg = "info. OK." Console.ok(msg) except Exception as e: Console.error("Problem booting instance {name}".format(**vm_details), traceflag=False) elif arguments["username"]: arg.username = arguments["--username"] or Image.guess_username(arg.image) cloud = arg.cloud username = arg.USERNAME if arg.NAMES is None: names = [Default.vm] else: names = Parameter.expand(arg.NAMES) if len(names) == 0: return for name in names: arg.name = name Console.ok("Set username for {cloud}:{name} to {USERNAME}".format(**arg)) Vm.set_login_user(name=name, cloud=cloud, username=username) elif arguments["default"]: try: count = Default.get_counter() prefix = Username() if prefix is None or count is None: Console.error("Prefix and Count could not be retrieved correctly.", traceflag=False) return vm_name = prefix + "-" + str(count).zfill(3) arg = { "name": vm_name, "cloud": arguments["--cloud"] or Default.cloud } for attribute in ["image", "flavor"]: arg[attribute] = Default.get(name=attribute, category=cloud) for attribute in ["key", "group", "secgroup"]: arg[attribute] = Default.get(name=attribute, category='general') output = arguments["--format"] or "table" print(Printer.attribute(arg, output=output)) msg = "info. OK." Console.ok(msg) ValueError("default command not implemented properly. Upon " "first install the defaults should be read from yaml.") except Exception as e: # Error.traceback(e) Console.error("Problem listing defaults", traceflag=False) elif arguments["ping"]: try: if arguments["NAME"] is None and arguments["N"] is None: name = arguments["NAME"] or Default.vm n = arguments["N"] or 1 elif arguments["NAME"].isdigit(): n = arguments["NAME"] name = Default.vm else: name = arguments["NAME"] or Default.vm n = arguments["N"] or 1 print("Ping:", name, str(n)) vm = dotdict(Vm.list(name=name, category=cloud, output="dict")["dict"]) ip = vm.floating_ip result = Shell.ping(host=ip, count=n) print(result) except Exception as e: Console.error(e.message, traceflag=False) elif arguments["console"]: try: name = arguments["NAME"] or Default.vm vm = dotdict(Vm.list(name=name, category=cloud, output="dict")["dict"]) cloud_provider = CloudProvider(cloud).provider vm_list = cloud_provider.list_console(vm.uuid) print(vm_list) msg = "info. OK." Console.ok(msg) except Exception as e: # Error.traceback(e) Console.error("Problem retrieving status of the VM", traceflag=False) elif arguments["status"]: try: cloud_provider = CloudProvider(cloud).provider vm_list = cloud_provider.list_vm(cloud) vms = [vm_list[i]["name"] for i in vm_list ] print ("V", vms) pattern = arguments["NAMES"] if pattern is not None: if "*" in pattern: print ("serach") names = search(vms, pattern) else: names = Parameter.expand() for i in vm_list: if vm_list[i]["name"] in names: print("{} {}".format(vm_list[i]["status"], vm_list[i]["name"])) else: print("{} {}".format(vm_list[0]["status"], vm_list[0]["name"])) except Exception as e: # Error.traceback(e) Console.error("Problem retrieving status of the VM", traceflag=True) elif arguments["wait"]: interval = arguments["--interval"] or 5 try: cloud_provider = CloudProvider(cloud).provider for i in range(1,10): vm_list = cloud_provider.list_vm(cloud) time.sleep(float(1)) d = {} for id in vm_list: vm = vm_list[id] d[vm["name"]] = vm["status"] print (d) print("{} {}".format(vm_list[0]["status"], vm_list[0]["name"])) if vm_list[0]["status"] in ['ACTIVE']: return except Exception as e: # Error.traceback(e) Console.error("Problem retrieving status of the VM", traceflag=True) elif arguments["info"]: try: cloud_provider = CloudProvider(cloud).provider vms = cloud_provider.list_vm(cloud) vm = vms[0] output_format = arguments["--format"] or "table" print(Printer.attribute(vm, output=output_format)) msg = "info. OK." Console.ok(msg) except Exception as e: # Error.traceback(e) Console.error("Problem retrieving status of the VM", traceflag=False) elif arguments["check"]: test = {} try: names = Parameter.expand(arguments["NAME"]) id = 0 for name in names: print("Not implemented: {}".format(name)) # TODO: check the status of the vms status = "active" # TODO: check if they have a floating ip # TODO: get ip floating_ip = "127.0.0.1" ip = True # ping # TODO: ping the machine with the shell command ping = True # check if one can login and run a command check = False try: r = Shell.execute("uname", "-a") # do a real check check = True except: check = False test[name] = { "id": id, "name": name, "status": status, "ip": ip, "ping": ping, "login": check } id += 1 pprint(test) print(Printer.write(test, order=["id", "name", "status", "ip", "ping", "login"], output="table", sort_keys=True)) msg = "not yet implemented. failed." Console.error(msg, traceflag=False) except Exception as e: # Error.traceback(e) Console.error("Problem retrieving status of the VM", traceflag=False) elif arguments["start"]: try: servers = Parameter.expand(arguments["NAMES"]) # If names not provided, take the last vm from DB. if len(servers) == 0: last_vm = Default.vm if last_vm is None: Console.error("No VM records in database. Please run vm refresh.", traceflag=False) return "" name = last_vm["name"] # print(name) servers = list() servers.append(name) group = arguments["--group"] force = arguments["--force"] # if default cloud not set, return error if not cloud: Console.error("Default cloud not set.", traceflag=False) return "" Vm.start(cloud=cloud, servers=servers) msg = "info. OK." Console.ok(msg) except Exception as e: # Error.traceback(e) Console.error("Problem starting instances", traceflag=False) elif arguments["stop"]: try: servers = Parameter.expand(arguments["NAMES"]) # If names not provided, take the last vm from DB. if servers is None or len(servers) == 0: last_vm = Default.vm if last_vm is None: Console.error("No VM records in database. Please run vm refresh.", traceflag=False) return "" name = last_vm["name"] # print(name) servers = list() servers.append(name) group = arguments["--group"] force = arguments["--force"] # if default cloud not set, return error if not cloud: Console.error("Default cloud not set.", traceflag=False) return "" Vm.stop(cloud=cloud, servers=servers) msg = "info. OK." Console.ok(msg) except Exception as e: # Error.traceback(e) Console.error("Problem stopping instances", traceflag=False) elif arguments["refresh"]: _refresh(cloud) elif arguments["delete"]: dryrun = arguments["--dryrun"] group = arguments["--group"] force = not arguments["--keep"] cloud = arguments["--cloud"] vms, servers = _get_vm_names() if servers in [None, []]: Console.error("No vms found.", traceflag=False) return "" for server in servers: if dryrun: Console.ok("Dryrun: delete {}".format(server)) else: Vm.delete(servers=[server], force=force) return "" elif arguments["ip"] and arguments["assign"]: if arguments["NAMES"] is None: names = [Default.vm] else: names = Parameter.expand(arguments["NAMES"]) for name in names: # ip = Network.get_floatingip(....) vm = dotdict(Vm.list(name=name, category=cloud, output="dict")["dict"]) if vm.floating_ip is None: Console.ok("Assign IP to {}".format(name)) try: floating_ip = Network.find_assign_floating_ip(cloudname=cloud, instance_id=name) Vm.refresh(cloud=cloud) if floating_ip is not None: print( "Floating IP assigned to {:} is {:}".format( name, floating_ip)) msg = "info. OK." Console.ok(msg) except Exception as e: Console.error("Problem assigning floating ips.", traceflag=False) else: Console.error("VM {} already has a floating ip: {}".format(name, vm.floating_ip), traceflag=False) elif arguments["ip"] and arguments["inventory"]: vms, names = _get_vm_names() if names in [None, []]: if str(Default.vm) in ['None', None]: Console.error("The default vm is not set.", traceflag=False) return "" else: names = [Default.vm] header = arguments["--header"] or "[servers]" filename = arguments["--file"] or "inventory.txt" try: vm_ips = [] for vm in vms: if vm["name"] in names: print (vm["name"]) vm_ips.append(vm["floating_ip"]) result = header + "\n" result += '\n'.join(vm_ips) Console.ok("Creating inventory file: {}".format(filename)) Console.ok(result) with open(filename, 'w') as f: f.write(result) except Exception as e: Console.error("Problem getting ip addresses for instance", traceflag=True) elif arguments["ip"] and arguments["show"]: if arguments["NAMES"] is None: if str(Default.vm) in ['None', None]: Console.error("The default vm is not set.", traceflag=False) return "" else: names = [Default.vm] else: names = Parameter.expand(arguments["NAMES"]) group = arguments["--group"] output_format = arguments["--format"] or "table" refresh = arguments["--refresh"] try: ips = Ip.list(cloud=arg.cloud, output=output_format, names=names) print(ips) except Exception as e: Console.error("Problem getting ip addresses for instance", traceflag=False) elif arguments["ssh"]: def _print(msg): if arg.verbose: Console.msg(msg) chameleon = "chameleon" in ConfigDict(filename="cloudmesh.yaml")["cloudmesh"]["clouds"][arg.cloud][ "cm_host"] if chameleon: arg.username = "******" elif arg.cloud == "azure": arg.username = ConfigDict(filename="cloudmesh.yaml")["cloudmesh"]["clouds"]["azure"]["default"]["username"] else: if arg.username is None: Console.error("Could not guess the username of the vm", traceflag=False) return arg.username = arguments["--username"] or Image.guess_username(arg.image) arg.command = arguments["--command"] data = dotdict({ 'name': arguments["NAME"] or Default.vm, 'username': arg.username, 'cloud': arg.cloud, 'command': arg.command }) _print("login {cloud}:{username}@{name}".format(**data)) vm = Vm.get(data.name, category=data.cloud) Vm.set_login_user(name=data.name, cloud=data.cloud, username=data.username) data.floating_ip = vm.floating_ip data.key = arguments["--key"] or Default.key _print(Printer.attribute(data)) ''' if vm.username is None: user_from_db = Vm.get_login_user(vm.name, vm.cloud) user_suggest = user_from_db or Default.user username = input("Username (Default: {}):".format(user_suggest)) or user_suggest Vm.set_login_user(name=data.name, cloud=cloud, username=data.username) ''' ip = arguments["--ip"] commands = arguments["--command"] ip_addresses = [] cloud_provider = CloudProvider(cloud).provider ip_addr = cloud_provider.get_ips(vm.name) ipaddr_dict = Vm.construct_ip_dict(ip_addr, cloud) for entry in ipaddr_dict: ip_addresses.append(ipaddr_dict[entry]["addr"]) if len(ip_addresses) > 0: if ip is not None: if ip not in ip_addresses: Console.error("IP Address specified does not match with the host.", traceflag=False) return "" else: _print("Determining IP Address to use with a ping test.") # This part assumes that the ping is allowed to the machine. for ipadd in ip_addresses: _print("Checking {:}...".format(ipadd)) try: # Evading ping test, as ping is not enabled for VMs on Azure cloud # socket.gethostbyaddr(ipadd) # ip will be set if above command is successful. ip = ipadd except socket.herror: _print("Cannot reach {:}.".format(ipadd)) if ip is None: _print("Unable to connect to the machine") return "" else: _print("IP to be used is: {:}".format(ip)) # # TODO: is this correctly implemented # if not cloud == 'azure': SecGroup.enable_ssh(cloud=cloud) if arg.verbose: Console.info("Connecting to Instance at IP:" + format(ip)) # Constructing the ssh command to connect to the machine. sshcommand = "ssh" if arg.key is not None: sshcommand += " -i {:}".format(arg.key) sshcommand += " -o StrictHostKeyChecking=no" sshcommand += " {:}@{:}".format(data.username, ip) if commands is not None: sshcommand += " \"{:}\"".format(commands) # print(sshcommand) os.system(sshcommand) else: Console.error("No Public IPs found for the instance", traceflag=False) elif arguments["list"]: # groups = Group.list(output="dict") arg = dotdict(arguments) arg.names = arguments["NAMES"] arg.group = arguments["--group"] if arg.group is None: arg.group = [] else: arg.group = Parameter.expand(arguments["--group"]) arg.refresh = arguments["--refresh"] or Default.refresh if arg.NAMES is not None: arg.names = Parameter.expand(arguments["NAMES"]) else: arg.names = ["all"] _format = arguments["--format"] or "table" if arguments["--active"]: clouds = active_clouds else: if arguments["--cloud"]: clouds = Parameter.expand(arguments["--cloud"]) else: clouds = [Default.cloud] try: d = ConfigDict("cloudmesh.yaml") for cloud in clouds: if arg.refresh: _refresh(cloud) Console.ok("Listing VMs on Cloud: {:}".format(cloud)) vms = Vm.list(category=cloud, output="raw") # print ("XXX", type(vms), vms) if vms is None: break result = [] if "all" in arg.names: if result is None: result = [] else: result = vms elif arg.group is not None and len(arg.group) > 0: for vm in vms: if vm["group"] in arg.group: result.append(vm) elif arg.names is not None and len(arg.names) > 0: for vm in vms: if vm["name"] in arg.names: result.append(vm) if len(result) > 0: # print(result) (order, header) = CloudProvider(cloud).get_attributes("vm") print(Printer.write(result, order=order, output=_format) ) else: Console.error("No data found with requested parameters.", traceflag=False) except Exception as e: # Error.traceback(e) Console.error("Problem listing all instances", traceflag=False) elif arguments["rename"]: try: oldnames = Parameter.expand(arguments["OLDNAMES"]) newnames = Parameter.expand(arguments["NEWNAMES"]) force = arguments["--force"] if oldnames is None or newnames is None: Console.error("Wrong VMs specified for rename", traceflag=False) elif len(oldnames) != len(newnames): Console.error("The number of VMs to be renamed is wrong", traceflat=False) else: for i in range(0, len(oldnames)): oldname = oldnames[i] newname = newnames[i] if arguments["--dryrun"]: Console.ok("Rename {} to {}".format(oldname, newname)) else: Vm.rename(cloud=cloud, oldname=oldname, newname=newname, force=force ) msg = "info. OK." Console.ok(msg) except Exception as e: # Error.traceback(e) Console.error("Problem deleting instances", traceflag=False) return ""
def list_size(self, cloudname, **kwargs): Console.info("In list_sizes of libcloud") sizes = self.provider.list_sizes() self._print(sizes) sizes_dict = self._to_dict(sizes) return sizes_dict
def initialize(self, cloudname, user=None): Console.info("Initializing libcloud-ec2 for " + cloudname) cls = get_driver(Provider.EC2) d = ConfigDict("cloudmesh.yaml") self.config = d["cloudmesh"]["clouds"][cloudname] credentials = self.config["credentials"] cm_type = self.config["cm_type"] ec2_access_key = credentials['EC2_ACCESS_KEY'] ec2_secret_key = credentials['EC2_SECRET_KEY'] if not cloudname == "aws": auth_url = credentials["EC2_URL"] searchobj = re.match(r'^http[s]?://(.+):([0-9]+)/([a-zA-Z/]*)', auth_url, re.M | re.I) path = None host = None port = None if searchobj: host = searchobj.group(1) port = searchobj.group(2) path = searchobj.group(3) Console.info("url : " + searchobj.group()) Console.info("host: " + host) Console.info("port: " + port) Console.info("path: " + path) extra_args = {'path': path} else: Console.error("Authentication url incorrect: {}".format(auth_url)) self.provider = cls(ec2_access_key, ec2_secret_key, host=host, port=port, **extra_args) else: Console.info("AWS INIT") self.provider = cls(ec2_access_key, ec2_secret_key)
def do_comet(self, args, arguments): """ :: Usage: comet init comet active [ENDPOINT] comet ll [CLUSTERID] [--format=FORMAT] [--endpoint=ENDPOINT] comet cluster [--concise|--status] [CLUSTERID] [--format=FORMAT] [--sort=SORTKEY] [--endpoint=ENDPOINT] comet computeset [COMPUTESETID] [--allocation=ALLOCATION] [--cluster=CLUSTERID] [--state=COMPUTESESTATE] [--endpoint=ENDPOINT] comet start CLUSTERID [--count=NUMNODES] [COMPUTENODEIDS] [--allocation=ALLOCATION] [--reservation=RESERVATION] [--walltime=WALLTIME] [--endpoint=ENDPOINT] comet terminate COMPUTESETID [--endpoint=ENDPOINT] comet power (on|off|reboot|reset|shutdown) CLUSTERID [NODESPARAM] [--endpoint=ENDPOINT] comet console [--link] CLUSTERID [COMPUTENODEID] [--endpoint=ENDPOINT] comet node info CLUSTERID [COMPUTENODEID] [--format=FORMAT] [--endpoint=ENDPOINT] comet node rename CLUSTERID OLDNAMES NEWNAMES [--endpoint=ENDPOINT] comet iso list [--endpoint=ENDPOINT] comet iso upload [--isoname=ISONAME] PATHISOFILE [--endpoint=ENDPOINT] comet iso attach ISOIDNAME CLUSTERID [COMPUTENODEIDS] [--endpoint=ENDPOINT] comet iso detach CLUSTERID [COMPUTENODEIDS] [--endpoint=ENDPOINT] comet reservation (list|create|update|delete) Options: --endpoint=ENDPOINT Specify the comet nucleus service endpoint to work with, e.g., dev or production --format=FORMAT Format is either table, json, yaml, csv, rest [default: table] --sort=SORTKEY Sorting key for the table view --count=NUMNODES Number of nodes to be powered on. When this option is used, the comet system will find a NUMNODES number of arbitrary nodes that are available to boot as a computeset --allocation=ALLOCATION Allocation to charge when power on node(s) --reservation=RESERVATION Submit the request to an existing reservation --walltime=WALLTIME Walltime requested for the node(s). Walltime could be an integer value followed by a unit (m, h, d, w, for minute, hour, day, and week, respectively). E.g., 3h, 2d --isoname=ISONAME Name of the iso image after being stored remotely. If not specified, use the original filename --state=COMPUTESESTATE List only computeset with the specified state. The state could be submitted, running, completed --link Whether to open the console url or just show the link --concise Concise table view for cluster info --status Cluster table view displays only those columns showing state of nodes Arguments: ENDPOINT Service endpoint based on the yaml config file. By default it's either dev or production. CLUSTERID The assigned name of a cluster, e.g. vc1 COMPUTESETID An integer identifier assigned to a computeset COMPUTENODEID A compute node name, e.g., vm-vc1-0 If not provided, the requested action will be taken on the frontend node of the specified cluster COMPUTENODEIDS A set of compute node names in hostlist format, e.g., vm-vc1-[0-3] One single node is also acceptable: vm-vc1-0 If not provided, the requested action will be taken on the frontend node of the specified cluster NODESPARAM Specifying the node/nodes/computeset to act on. In case of integer, will be intepreted as a computesetid; in case of a hostlist format, e.g., vm-vc1-[0-3], a group of nodes; or a single host is also acceptable, e.g., vm-vc1-0 ISONAME Name of an iso image at remote server ISOIDNAME Index or name of an iso image at the remote server. The index is based on the list from 'comet iso list'. PATHISOFILE The full path to the iso image file to be uploaded OLDNAMES The list of current node names to be renamed, in hostlist format. A single host is also acceptable. NEWNAMES The list of new names to rename to, in hostlist format. A single host is also acceptable. """ # back up of all the proposed commands/options """ comet status comet tunnel start comet tunnel stop comet tunnel status comet logon comet logoff comet ll [CLUSTERID] [--format=FORMAT] comet docs comet info [--user=USER] [--project=PROJECT] [--format=FORMAT] comet cluster [CLUSTERID][--name=NAMES] [--user=USER] [--project=PROJECT] [--hosts=HOSTS] [--start=TIME_START] [--end=TIME_END] [--hosts=HOSTS] [--format=FORMAT] comet computeset [COMPUTESETID] comet start ID comet stop ID comet power on CLUSTERID [NODESPARAM] [--allocation=ALLOCATION] [--walltime=WALLTIME] comet power (off|reboot|reset|shutdown) CLUSTERID [NODESPARAM] comet console CLUSTERID [COMPUTENODEID] comet delete [all] [--user=USER] [--project=PROJECT] [--name=NAMES] [--hosts=HOSTS] [--start=TIME_START] [--end=TIME_END] [--host=HOST] comet delete --file=FILE comet update [--name=NAMES] [--hosts=HOSTS] [--start=TIME_START] [--end=TIME_END] comet add [--user=USER] [--project=PROJECT] [--host=HOST] [--description=DESCRIPTION] [--start=TIME_START] [--end=TIME_END] NAME comet add --file=FILENAME Options: --user=USER user name --name=NAMES Names of the vcluster --start=TIME_START Start time of the vcluster, in YYYY/MM/DD HH:MM:SS format. [default: 1901-01-01] --end=TIME_END End time of the vcluster, in YYYY/MM/DD HH:MM:SS format. In addition a duratio can be specified if the + sign is the first sig The duration will than be added to the start time. [default: 2100-12-31] --project=PROJECT project id --host=HOST host name --description=DESCRIPTION description summary of the vcluster --file=FILE Adding multiple vclusters from one file --format=FORMAT Format is either table, json, yaml, csv, rest [default: table] --allocation=ALLOCATION Allocation to charge when power on node(s) --walltime=WALLTIME Walltime requested for the node(s) Arguments: FILENAME the file to open in the cwd if . is specified. If file in in cwd you must specify it with ./FILENAME Opens the given URL in a browser window. """ """ if not arguments["tunnel"] and Comet.tunnelled and not Comet.is_tunnel(): Console.error("Please establish a tunnel first with:") print print (" comet tunnel start") print return "" try: if not arguments["tunnel"]: logon = Comet.logon() if logon is False: Console.error("Could not logon") return "" except: Console.error("Could not logon") # pprint (arguments) output_format = arguments["--format"] or "table" if arguments["status"]: Comet.state() elif arguments["tunnel"] and arguments["start"]: Comet.tunnel(True) elif arguments["tunnel"] and arguments["stop"]: Comet.tunnel(False) elif arguments["tunnel"] and arguments["status"]: Comet.state() elif arguments["logon"]: if self.context.comet_token is None: if Comet.logon(): Console.ok("logging on") self.context.comet_token = Comet.token else: Console.error("could not logon") else: Console.error("already logged on") elif arguments["logoff"]: if self.context.comet_token is None: Console.error("not logged in") else: if Comet.logoff(): Console.ok("Logging off") self.context.comet_token = None else: Console.error( "some issue while logging off. Maybe comet not reachable") elif arguments["docs"]: Comet.docs() elif arguments["info"]: Console.error("not yet implemented") elif arguments["add"]: print ("add the cluster") elif arguments["start"]: cluster_id = arguments["ID"] print("start", cluster_id) Cluster.start(cluster_id) elif arguments["stop"]: cluster_id = arguments["ID"] print("stop", cluster_id) Cluster.stop(cluster_id) elif arguments["ll"]: """ if arguments["init"]: print("Initializing the comet configuration file...") config = ConfigDict("cloudmesh.yaml") # for unit testing only. cometConf = config["cloudmesh.comet"] endpoints = [] # print (cometConf.keys()) if "endpoints" in cometConf.keys(): endpoints = cometConf["endpoints"].keys() if len(endpoints) < 1: Console.error( "No service endpoints available. " "Please check the config template", traceflag=False) return "" if "username" in cometConf.keys(): default_username = cometConf['username'] # print (default_username) if 'TBD' == default_username: set_default_user = \ input("Set a default username (RETURN to skip): ") if set_default_user: config.data["cloudmesh"]["comet"]["username"] = \ set_default_user config.save() Console.ok("Comet default username set!") if "active" in cometConf.keys(): active_endpoint = cometConf['active'] set_active_endpoint = \ input("Set the active service endpoint to use. " "The availalbe endpoints are - %s [%s]: " % ("/".join(endpoints), active_endpoint) ) if set_active_endpoint: if set_active_endpoint in endpoints: config.data["cloudmesh"]["comet"]["active"] = \ set_active_endpoint config.save() Console.ok("Comet active service endpoint set!") else: Console.error( "The provided endpoint does not match " "any available service endpoints. Try %s" % "/".join(endpoints), traceflag=False) if cometConf['active'] in endpoints: endpoint_url = cometConf["endpoints"] \ [cometConf['active']]["nucleus_base_url"] api_version = cometConf["endpoints"] \ [cometConf['active']]["api_version"] set_endpoint_url = \ input("Set the base url for the nucleus %s service [%s]: " \ % (cometConf['active'], endpoint_url) ) if set_endpoint_url: if set_endpoint_url != endpoint_url: config.data["cloudmesh"]["comet"]["endpoints"] \ [cometConf['active']]["nucleus_base_url"] \ = set_endpoint_url config.save() Console.ok("Service base url set!") set_api_version = \ input("Set the api version for the nucleus %s service [%s]: " \ % (cometConf['active'], api_version) ) if set_api_version: if set_api_version != api_version: config.data["cloudmesh"]["comet"]["endpoints"] \ [cometConf['active']]["api_version"] \ = set_api_version config.save() Console.ok("Service api version set!") print("Authenticating to the nucleus %s " \ "service and obtaining the apikey..." \ % cometConf['active']) Comet.get_apikey(cometConf['active']) return '' # Comet.get_apikey() if arguments["active"]: config = ConfigDict("cloudmesh.yaml") cometConf = config["cloudmesh.comet"] endpoint = arguments["ENDPOINT"] or None # parameter specified, intended to change if endpoint: if "endpoints" in cometConf.keys(): endpoints = cometConf["endpoints"].keys() if endpoint in endpoints: config.data["cloudmesh"] \ ["comet"] \ ["active"] = endpoint config.save() Console.ok("Comet active service endpoint set" " to: %s" % endpoint) else: Console.error( "The provided endpoint does not match " "any available service endpoints. Try %s." % "/".join(endpoints), traceflag=False) else: Console.error( "No available endpoint to set. " "Check config file!", traceflag=False) else: if "active" in cometConf.keys(): active_endpoint = cometConf['active'] Console.ok("Current active service endpoint is: %s" % active_endpoint) else: Console.error( "Cannot set active endpoint. " "Check config file!", traceflag=False) try: endpoint = None config = ConfigDict("cloudmesh.yaml") cometConf = config["cloudmesh.comet"] if arguments["--endpoint"]: endpoint = arguments["--endpoint"] if "endpoints" in cometConf.keys(): endpoints = cometConf["endpoints"].keys() if endpoint not in endpoints: Console.error( "The provided endpoint does not match " "any available service endpoints. Try %s." % "/".join(endpoints), traceflag=False) return '' logon = Comet.logon(endpoint=endpoint) if logon is False: Console.error( "Could not logon. Please try first:\n" "cm comet init", traceflag=False) return "" except: Console.error("Could not logon", traceflag=False) output_format = arguments["--format"] or "table" if arguments["ll"]: cluster_id = arguments["CLUSTERID"] or None print(Cluster.simple_list(cluster_id, format=output_format)) elif arguments["cluster"]: view = "FULL" if arguments["--concise"]: view = "CONCISE" if arguments["--status"]: view = "STATE" cluster_id = arguments["CLUSTERID"] sortkey = arguments["--sort"] print( Cluster.list(cluster_id, format=output_format, sort=sortkey, view=view)) elif arguments["computeset"]: computeset_id = arguments["COMPUTESETID"] or None cluster = arguments["--cluster"] or None state = arguments["--state"] or None allocation = arguments["--allocation"] or None cluster = arguments["--cluster"] or None print(Cluster.computeset(computeset_id, cluster, state, allocation)) elif arguments["start"]: clusterid = arguments["CLUSTERID"] numnodes = arguments["--count"] or None computenodeids = arguments["COMPUTENODEIDS"] or None # check allocation information for the cluster cluster = Cluster.list(clusterid, format='rest') try: allocations = cluster[0]['allocations'] except: # print (cluster) Console.error("No allocation available for the specified cluster."\ "Please check with the comet help team", traceflag=False) return "" # checking whether the computesetids is in valid hostlist format if computenodeids: try: hosts_param = hostlist.expand_hostlist(computenodeids) except hostlist.BadHostlist: Console.error("Invalid hosts list specified!", traceflag=False) return "" elif numnodes: try: param = int(numnodes) except ValueError: Console.error("Invalid count value specified!", traceflag=False) return "" if param <= 0: Console.error("count value has to be greather than zero", traceflag=False) return "" numnodes = param else: Console.error("You have to specify either the count of nodes, " \ "or the names of nodes in hostlist format", traceflag=False) return "" walltime = arguments["--walltime"] or None allocation = arguments["--allocation"] or None reservation = arguments["--reservation"] or None # validating walltime and allocation parameters walltime = Cluster.convert_to_mins(walltime) if not walltime: print("No valid walltime specified. " \ "Using system default (2 days)") if not allocation: if len(allocations) == 1: allocation = allocations[0] else: allocation = Cluster.display_get_allocation(allocations) # issuing call to start a computeset with specified parameters print( Cluster.computeset_start(clusterid, computenodeids, numnodes, allocation, reservation, walltime)) elif arguments["terminate"]: computesetid = arguments["COMPUTESETID"] print(Cluster.computeset_terminate(computesetid)) elif arguments["power"]: clusterid = arguments["CLUSTERID"] or None fuzzyparam = arguments["NODESPARAM"] or None # parsing nodesparam for proper action if fuzzyparam: try: param = int(fuzzyparam) subject = 'COMPUTESET' except ValueError: param = fuzzyparam try: hosts_param = hostlist.expand_hostlist(fuzzyparam) subject = 'HOSTS' except hostlist.BadHostlist: Console.error("Invalid hosts list specified!", traceflag=False) return "" else: subject = 'FE' param = None if arguments["on"]: action = "on" elif arguments["off"]: action = "off" elif arguments["reboot"]: action = "reboot" elif arguments["reset"]: action = "reset" elif arguments["shutdown"]: action = "shutdown" else: action = None print(Cluster.power(clusterid, subject, param, action)) elif arguments["console"]: clusterid = arguments["CLUSTERID"] linkonly = False if arguments["--link"]: linkonly = True nodeid = None if 'COMPUTENODEID' in arguments: nodeid = arguments["COMPUTENODEID"] Comet.console(clusterid, nodeid, linkonly) elif arguments["iso"]: if arguments["list"]: isos = (Comet.list_iso()) idx = 0 for iso in isos: if iso.startswith("public/"): iso = iso.split("/")[1] idx += 1 print("{}: {}".format(idx, iso)) if arguments["upload"]: isofile = arguments["PATHISOFILE"] isofile = os.path.abspath(isofile) if os.path.isfile(isofile): if arguments["--isoname"]: filename = arguments["--isoname"] else: filename = os.path.basename(isofile) else: print ("File does not exist - {}" \ .format(arguments["PATHISOFILE"])) return "" print(Comet.upload_iso(filename, isofile)) elif arguments["attach"]: isoidname = arguments["ISOIDNAME"] clusterid = arguments["CLUSTERID"] computenodeids = arguments["COMPUTENODEIDS"] or None print(Cluster.attach_iso(isoidname, clusterid, computenodeids)) elif arguments["detach"]: clusterid = arguments["CLUSTERID"] computenodeids = arguments["COMPUTENODEIDS"] or None print(Cluster.detach_iso(clusterid, computenodeids)) elif arguments["node"]: if arguments["info"]: clusterid = arguments["CLUSTERID"] nodeid = arguments["COMPUTENODEID"] print( Cluster.node_info(clusterid, nodeid=nodeid, format=output_format)) elif arguments["rename"]: clusterid = arguments["CLUSTERID"] oldnames = Parameter.expand(arguments["OLDNAMES"]) newnames = Parameter.expand(arguments["NEWNAMES"]) if len(oldnames) != len(newnames): Console.error( "Length of OLDNAMES and NEWNAMES have to be the same", traceflag=False) return "" else: for newname in newnames: if newname.strip() == "": Console.error("Newname cannot be empty string", traceflag=False) return "" cluster_data = Cluster.list(clusterid, format="rest") if len(cluster_data) > 0: computes = cluster_data[0]["computes"] nodenames = [x["name"] for x in computes] else: Console.error( "Error obtaining the cluster information", traceflag=False) return "" # check if new names ar not already taken # to be implemented # print (oldnames) # print (newnames) # print (nodenames) oldset = set(oldnames) newset = set(newnames) currentset = set(nodenames) # at least one OLDNAME does not exist if not oldset <= currentset: Console.error("Not all OLDNAMES are valid", traceflag=False) return "" else: # those unchanged nodes keptset = currentset - oldset # duplication between name of unchanged nodes and # the requested NEWNAMES if keptset & newset != set(): Console.error("Not proceeding as otherwise introducing "\ "duplicated names", traceflag=False) else: for i in range(0, len(oldnames)): oldname = oldnames[i] newname = newnames[i] print("%s -> %s" % (oldname, newname)) confirm = input("Confirm batch renaming (Y/y to confirm, "\ "any other key to abort):") if confirm.lower() == 'y': print("Conducting batch renaming") for i in range(0, len(oldnames)): oldname = oldnames[i] newname = newnames[i] print( Cluster.rename_node( clusterid, oldname, newname)) else: print("Action aborted!") elif arguments["reservation"]: if arguments["create"] or \ arguments["update"] or \ arguments["delete"]: Console.info( "Operation not supported. Please contact XSEDE helpdesk for help!" ) if arguments["list"]: if "hpcinfo" in cometConf: hpcinfourl = cometConf["hpcinfo"]["endpoint"] else: Console.error( "Admin feature not configured for this client", traceflag=False) return "" ret = requests.get("%s/reservations/%s" % (hpcinfourl, cometConf['active'])) jobs = ret.json() result = Printer.write(jobs) print(result) return ""
def deploy(self, ips=None, name=None, user=None, playbooks=None, defines=None, ping_max=10, ping_sleep=10): """Deploy the big-data-stack to a previously stood up cluster located at `ips` with login user `user`. :param ips: the ip addresses of the cluster to deploy to :type ips: :class:`list` of :class:`str` IP addresses :param name: the name of the cluster :type name: :class:`str` :param user: the login username of the cluster :type user: :class:`str` :param playbooks: the list of playbooks to deploy. These are paths relative to the root directory of the BDS repository. :type playbooks: :class:`list` of :class:`str` :param defines: the overridden variables defined for each playbook :type defines: :class:`dict` from playbook name to :class:`dict` of variable name to value :param ping_max: the maximum number of time to attempt to ping the cluster during the verification step. :type ping_max: :class:`int` :param ping_sleep: the number of seconds to wait between each attempt to ping :type ping_sleep: :class:`int` """ assert ips is not None assert ping_max > 0, ping_max assert ping_sleep > 0, ping_sleep name = name or os.getenv('USER') + '-' + os.path.basename(self.path) user = user or 'defaultuser' playbooks = playbooks or list() defines = defines or defaultdict(list) Console.debug_msg('Calling mk-inventory in {}'.format(self.path)) cmd = ['python', 'mk-inventory', '-n', name] + ips inventory = Subprocess(cmd, cwd=self.path, env=self._env) Console.debug_msg('Writing inventory file') Console.debug_msg('\n ' + ('\n' + 4 * ' ').join(inventory.stdout.split('\n'))) with open(os.path.join(self.path, 'inventory.txt'), 'w') as fd: fd.write(inventory.stdout) Console.info('Waiting for cluster to be accessible') ping_ok = False for i in xrange(ping_max): Console.debug_msg('Attempt {} / {}'.format(i + 1, ping_max)) try: Subprocess(['ansible', 'all', '-m', 'ping', '-u', user], cwd=self.path, env=self._env, stdout=None, stderr=None) ping_ok = True Console.debug_msg('Success!') break except SubprocessError: Console.debug_msg( 'Failure, sleeping for {} seconds'.format(ping_sleep)) time.sleep(ping_sleep) if not ping_ok: msg = 'Ping Failure' reason = 'Unable to connect to all nodes' Console.error(' '.join([msg, reason])) raise SanityCheckError(message=msg, reason=reason) basic_command = ['ansible-playbook', '-u', user] Console.debug_msg('Running playbooks {}'.format(playbooks)) for play in playbooks: cmd = basic_command + [play] define = ['{}={}'.format(k, v) for k, v in defines[play]] if define: cmd.extend(['-e', ','.join(define)]) Console.info('Running playbook {} with overrides {}'.format( play, define)) Subprocess(cmd, cwd=self.path, env=self._env, stdout=None, stderr=None)
def boot_vm(self, name, group=None, image=None, flavor=None, cloud=None, cert_thumbprint=None, pub_key_path=None, cert_path=None, pfx_path=None, secgroup=None, meta=None, nics=None, **kwargs): """ Boots up a new VM Instance. Steps involved: creating a hosted(Cloud) Service, adding the PFX certificate file, get default storage name, creating a configuration set, adding an endpoint(SSH by default), and finally creating a VM deployment :param name: Hosted Service Name and VM instance name :param group: :param image: :param flavor: :param cloud: :param cert_thumbprint: :param pub_key_path: :param cert_path: :param pfx_path: :param secgroup: :param meta: :param nics: :param kwargs: :return: """ location = ConfigDict(filename="cloudmesh.yaml")["cloudmesh"]["clouds"]["azure"]["default"]["location"] or 'Central US' try: self.provider.create_hosted_service(service_name=name, label=name, location=location) except: Console.error("Failed to create hosted service in Azure: {0}".format(traceback.format_exc())) try: Console.info("service name: " + name) Console.info("location name: " + location) Console.info("cert_thumbprint: " + cert_thumbprint) Console.info("pub_key_path: " + pub_key_path) Console.info("cert_path: " + cert_path) Console.info("pfx_path:" + pfx_path) Console.info("Image:" + image) Console.info("Flavor:" + flavor) #Console.info("Certificate adding") # Disabled - not required to start Virtual Machine #self.add_certificate(name, pfx_path) #Console.info("Certificate added") except Exception as e: Console.warning("Console.info error: {0}".format(traceback.format_exc())) storage_name = self._get_storage_name() if storage_name is None: self._create_storage_account() storage_name = self._get_storage_name() media_link = 'https://{0}.blob.core.windows.net/vhds/{1}.vhd'.format( storage_name, name) os_hd = OSVirtualHardDisk(image, media_link) username = ConfigDict(filename="cloudmesh.yaml")["cloudmesh"]["clouds"]["azure"]["default"]["username"] password = ConfigDict(filename="cloudmesh.yaml")["cloudmesh"]["clouds"]["azure"]["default"]["password"] # Auto-generated Password in case of TBD if username.lower() in ["tbd"]: username = "******"; if password.lower() in ["tbd"]: password = generate_password(16) Console.info("Username: "******"password: "******"blob storage location: {0} ".format(media_link)) try: vm_create_result = self.provider.create_virtual_machine_deployment(service_name=name, deployment_name=name, deployment_slot='production', label=name, role_name=name, system_config=linux_config, os_virtual_hard_disk=os_hd, network_config=network, role_size=flavor) # pprint(vm_create_result) self.provider.wait_for_operation_status(vm_create_result.request_id, timeout=30) Console.info("{0} created successfully".format(name)) except: Console.error("Failed to start Azure Virtual Machine: {0}".format(traceback.format_exc())) return name
def boot_vm(self, name, group=None, image=None, flavor=None, cloud=None, cert_thumbprint=None, pub_key_path=None, cert_path=None, pfx_path=None, secgroup=None, meta=None, nics=None, **kwargs): """ Boots up a new VM Instance. Steps involved: creating a hosted(Cloud) Service, adding the PFX certificate file, get default storage name, creating a configuration set, adding an endpoint(SSH by default), and finally creating a VM deployment :param name: Hosted Service Name and VM instance name :param group: :param image: :param flavor: :param cloud: :param cert_thumbprint: :param pub_key_path: :param cert_path: :param pfx_path: :param secgroup: :param meta: :param nics: :param kwargs: :return: """ location = ConfigDict(filename="cloudmesh.yaml")["cloudmesh"]["clouds"]["azure"]["default"]["location"] or 'Central US' try: self.provider.create_hosted_service(service_name=name, label=name, location=location) except: traceback.print_exc() pprint("Error creating hosted service") pprint("service name"+name) pprint("location name"+location) pprint("cert_thumbprint"+cert_thumbprint) pprint("pub_key_path"+pub_key_path) pprint("cert_path"+cert_path) pprint("pfx_path"+pfx_path) pprint("Image"+image) pprint("Flavor"+flavor) pprint("Certificate adding") self.add_certificate(name, pfx_path) pprint("Certificate added") storage_name = self._get_storage_name() media_link = 'https://{0}.blob.core.windows.net/vhds/{1}.vhd'.format( storage_name, name) os_hd = OSVirtualHardDisk(image, media_link) username = ConfigDict(filename="cloudmesh.yaml")["cloudmesh"]["clouds"]["azure"]["default"]["username"] password = ConfigDict(filename="cloudmesh.yaml")["cloudmesh"]["clouds"]["azure"]["default"]["password"] pprint("Username:"******"password:"******"Starting the VM on ", media_link) try: vm_create_result = self.provider.create_virtual_machine_deployment(service_name=name, deployment_name=name, deployment_slot='production', label=name, role_name=name, system_config=linux_config, os_virtual_hard_disk=os_hd, network_config=network, role_size=flavor) # pprint(vm_create_result) self.provider.wait_for_operation_status(vm_create_result.request_id, timeout=30) Console.info("VM boot up successful.ok.") except: traceback.print_exc() pprint("Exception in starting the VM") return name