def test_provider_vm_terminate(self): HEADING() name = str(Name()) Benchmark.Start() data = provider.destroy(name=name) Benchmark.Stop() pprint(data) termination_timeout = 360 time = 0 while time <= termination_timeout: sleep(5) time += 5 if cloud == 'chameleon' and len(provider.info(name=name)) == 0: break elif cloud == 'google': break elif cloud == 'aws' and (len( provider.info(name=name)) == 0 or provider.info( name=name)[0]["cm"]["status"] in ['TERMINATED']): break elif cloud == 'azure': try: provider.info(name=name) except Exception: # if there is an exception that means the group has been # deleted break # print(provider.info(name=name)) if cloud == 'chameleon': assert len(provider.info(name=name)) == 0 elif cloud == 'google': cm = CmDatabase() vm = cm.find_name(name, kind="vm")[0] assert 'status' in vm and vm['status'] == 'DELETED' elif cloud == 'aws': assert len(data) == 0 if data else True \ or (data[0]["cm"]["status"] in [ 'BOOTING', 'TERMINATED'] if data and data[0].get('cm', None) is not None else True) elif cloud == 'azure': try: provider.info(name=name) except Exception: # if there is an exception that means the group has been # deleted pass elif cloud == 'oracle': info = provider.info(name) assert info is None or info[0]['_lifecycle_state'] in [ 'TERMINATED' ] else: raise NotImplementedError
def test_provider_vm_wait(self): HEADING() name = str(Name()) Benchmark.Start() cm = CmDatabase() vm = cm.find_name(name, kind="vm")[0] assert provider.wait(vm=vm), "cms wait timed out ..." Benchmark.Stop()
def image(self, name=None): """ Gets the image with a given nmae :param name: The name of the image :return: the dict of the image """ cm = CmDatabase() return cm.find_name(name, kind='image')
def test_provider_vm_ssh(self): HEADING() name = str(Name()) Benchmark.Start() cm = CmDatabase() vm = cm.find_name(name, kind="vm")[0] data = provider.ssh(vm=vm, command='\"echo IAmAlive\"') print(data) assert 'IAmAlive' in data Benchmark.Stop() VERBOSE(data)
def list(self, name=None): """ :param name: if none all :return: """ cm = CmDatabase() if name == None: entries = cm.find(cloud="local", kind="registry") else: entries = cm.find_name(name=name, kind="registry") return entries
def list(self, name=None): """ list entries in the registry :param name: name of registered server. If not passed will list all registered servers. :return: list of registered server(s) """ cm = CmDatabase() if name == None: entries = cm.find(cloud="local", kind="registry") else: entries = cm.find_name(name=name, kind="registry") return entries
class TestMongo: def setup(self): self.database = CmDatabase() self.name = Name(experiment="exp", group="grp", user="******", kind="vm", counter=1) def test_10_find_in_collection(self): HEADING() r = self.database.find_name("CC-CentOS7") pprint(r) def test_11_find_in_collections(self): HEADING() r = self.database.find_names("CC-CentOS7,CC-CentOS7-1811") pprint(r) def test_12_find_in_collection(self): HEADING() r = self.database.name_count("CC-CentOS7") pprint(r)
class Provider(VolumeABC): kind = "openstack" sample = """ cloudmesh: volume: openstack: cm: active: true heading: Chameleon host: chameleoncloud.org label: chameleon kind: openstack version: train service: volume credentials: auth: username: TBD password: TBD auth_url: https://kvm.tacc.chameleoncloud.org:5000/v3 project_id: TBD project_name: cloudmesh user_domain_name: Default region_name: KVM@TACC interface: public identity_api_version: '3' key_path: TBD/id_rsa.pub default: size: 1 volume_type: __DEFAULT__ """ vm_state = [ 'ACTIVE', 'BUILDING', 'DELETED', 'ERROR', 'HARD_REBOOT', 'PASSWORD', 'PAUSED', 'REBOOT', 'REBUILD', 'RESCUED', 'RESIZED', 'REVERT_RESIZE', 'SHUTOFF', 'SOFT_DELETED', 'STOPPED', 'SUSPENDED', 'UNKNOWN', 'VERIFY_RESIZE' ] output = { "volume": { "sort_keys": ["cm.name"], "order": ["cm.name", "cm.cloud", "cm.kind", "availability_zone", "created_at", "size", "status", "id", "volume_type" ], "header": ["Name", "Cloud", "Kind", "Availability Zone", "Created At", "Size", "Status", "Id", "Volume Type" ], } } def __init__(self, name): """ Initialize provider. The default parameters are read from the configuration file that is defined in yaml format. :param name: name of cloud """ self.cloud = name self.config = Config()["cloudmesh.volume.openstack.credentials"] self.defaults = Config()["cloudmesh.volume.openstack.default"] self.cm = CmDatabase() def update_dict(self, results): """ This function adds a cloudmesh cm dict to each dict in the list elements. Typically this method is used internally. :param results: the original dicts. :return: The list with the modified dicts """ if results is None: return None d = [] for entry in results: volume_name = entry['name'] if "cm" not in entry: entry['cm'] = {} entry["cm"].update({ "cloud": self.cloud, "kind": "volume", "name": volume_name, }) d.append(entry) return d def status(self, volume_name): """ This function get volume status, such as "in-use", "available" :param volume_name: Volume name :return: Volume_status """ con = openstack.connect(**self.config) result = con.get_volume(name_or_id=volume_name) result = [result] result = self.update_dict(result) return result def list(self, **kwargs): """ This function list all volumes as following: If NAME (volume_name) is specified, it will print out info of NAME If NAME (volume_name) is not specified, it will print out info of all volumes :param kwargs: contains name of volume, vm name (optional) :return: Dictionary of volumes """ try: if kwargs and kwargs['refresh'] is False: result = self.cm.find(cloud=self.cloud, kind='volume') for key in kwargs: if key == 'NAME' and kwargs['NAME']: result = self.cm.find_name(name=kwargs['NAME']) elif key == 'NAMES' and kwargs['NAMES']: result = self.cm.find_names(names=kwargs['NAMES']) else: con = openstack.connect(**self.config) results = con.list_volumes() if kwargs and kwargs['NAME']: result = con.get_volume(name_or_id=kwargs["NAME"]) result = [result] result = self.update_dict(result) if kwargs and kwargs['vm']: server_id = con.get_server_id(name_or_id=kwargs['vm']) vol_list = [] for entry in results: attach_list = entry['attachments'] if len(attach_list) != 0: if attach_list[0]['server_id'] == server_id: vol_list.append(entry) result = self.update_dict(vol_list) else: result = self.update_dict(results) except Exception as e: Console.error("Problem listing volumes", traceflag=True) print(e) raise RuntimeError return result def create(self, **kwargs): """ This function creates a new volume with default volume type __DEFAULT__. Default parameters are read from self.config. :param kwargs: Contains Volume name,size :return: Volume dictionary """ try: con = openstack.connect(**self.config) arguments = dotdict(kwargs) if arguments.volume_type is None: arguments.volume_type = self.defaults["volume_type"] if arguments.size is None: arguments.size = self.defaults["size"] r = con.create_volume(name=arguments.NAME, size=arguments.size, volume_type=arguments.volume_type ) r = [r] result = self.update_dict(r) except Exception as e: Console.error("Problem creating volume", traceflag=True) print(e) raise RuntimeError return result def attach(self, names=None, vm=None): """ This function attaches a given volume to a given instance :param names: Names of Volumes :param vm: Instance name :return: Dictionary of volumes """ try: con = openstack.connect(**self.config) server = con.get_server(vm) volume = con.get_volume(name_or_id=names[0]) con.attach_volume(server, volume, device=None, wait=True, timeout=None) except Exception as e: Console.error("Problem attaching volume", traceflag=True) print(e) raise RuntimeError return self.list(NAME=names[0], refresh=True) def detach(self, name=None): """ This function detaches a given volume from an instance :param name: Volume name :return: Dictionary of volumes """ try: con = openstack.connect(**self.config) volume = con.get_volume(name_or_id=name) attachments = volume['attachments'] server = con.get_server(attachments[0]['server_id']) con.detach_volume(server, volume, wait=True, timeout=None) except Exception as e: Console.error("Problem detaching volume", traceflag=True) print(e) raise RuntimeError # return of self.list(NAME=NAME)[0] throwing error:cm attribute # not found inside CmDatabase.py. So manipulating result as below t = self.list(NAME=name, refresh=True)[0] result = {} result.update( {"cm": t["cm"], "availability_zone": t["availability_zone"], "created_at": t["created_at"], "size": t["size"], "id": t["id"], "status": t["status"], "volume_type": t["volume_type"] } ) return result def delete(self, name=None): """ This function delete one volume. :param name: Volume name :return: Dictionary of volumes """ try: con = openstack.connect(**self.config) con.delete_volume(name_or_id=name) results = con.list_volumes() result = self.update_dict(results) except Exception as e: Console.error("Problem deleting volume", traceflag=True) print(e) raise RuntimeError return result def add_tag(self, **kwargs): """ This function add tag to a volume. :param kwargs: NAME: name of volume key: name of tag value: value of tag :return: Dictionary of volume """ try: con = openstack.connect(**self.config) name = kwargs['NAME'] key = kwargs['key'] value = kwargs['value'] metadata = {key: value} con.update_volume(name_or_id=name, metadata=metadata) except Exception as e: Console.error("Problem in tagging volume", traceflag=True) print(e) raise RuntimeError # return of self.list(NAME=NAME)[0] throwing error:cm attribute # not found inside CmDatabase.py. So manipulating result as below t = self.list(NAME=name, refresh=True)[0] result = {} result.update( {"cm": t["cm"], "availability_zone": t["availability_zone"], "created_at": t["created_at"], "size": t["size"], "id": t["id"], "status": t["status"], "volume_type": t["volume_type"] } ) return result def migrate(self, name=None, fvm=None, tvm=None, fregion=None, tregion=None, fservice=None, tservice=None, fcloud=None, tcloud=None, cloud=None, region=None, service=None): """ Migrate volume from one vm to another vm. :param name: name of volume :param fvm: name of vm where volume will be moved from :param tvm: name of vm where volume will be moved to :param fregion: the region where the volume will be moved from :param tregion: region where the volume will be moved to :param fservice: the service where the volume will be moved from :param tservice: the service where the volume will be moved to :param fcloud: the provider where the volume will be moved from :param tcloud: the provider where the volume will be moved to :param cloud: the provider where the volume will be moved within :param region: the region where the volume will be moved within :param service: the service where the volume will be moved within :return: dict """ raise NotImplementedError def sync(self, volume_id=None, zone=None, cloud=None): """ sync contents of one volume to another volume :param volume_id: id of volume A :param zone: zone where new volume will be created :param cloud: the provider where volumes will be hosted :return: str """ raise NotImplementedError
class Provider(VolumeABC): kind = "volume" sample = """ cloudmesh: volume: {name}: cm: active: '1' heading: multipass host: TBD kind: multipass version: TBD service: volume default: path: /Volumes/multipass """ output = { "volume": { "sort_keys": ["cm.name"], "order": [ "cm.name", "cm.cloud", "cm.kind", "State", "path", 'machine_path', "AttachedToVm", "tags", "time" ], "header": [ "Name", "Cloud", "Kind", "State", "Path", 'Machine Path', "AttachedToVm", "Tags", "Update Time" ] } } def generate_volume_info(self, NAME, path): """ generate volume info dict. info['AttachedToVm'] is a list of vm names where the volume is attached to. (volume can attach to multiple vm and vm can have multiple attachments) info['machine_path'] is the volume path in vm info['time"] is the created time, will be updated as updated time :param NAME: volume name :param path: volume path :return: dict """ info = { 'tags': [], 'name': NAME, 'path': path, 'AttachedToVm': [], 'State': 'available', 'machine_path': None, 'time': datetime.datetime.now() } return info def update_volume_after_attached_to_vm(self, info, vms): """ Update volume info after attached to a vm. info['AttachedToVm'] is a list of vm names where the volume is attached to. info['machine_path'] is the volume path in vm info['time"] is the updated as updated time :param info: volume info got from MongoDB database :param vms: attached to vms :return: list of one dict """ path = info[0]['path'] path_list = path.split(sep='/') machine_path_list = ["~", "Home"] machine_path_list.extend(path_list[3:]) info[0]['machine_path'] = "/".join(machine_path_list) info[0]['AttachedToVm'] = vms info[0]['State'] = 'in-use' info[0]['time'] = datetime.datetime.now() return info def update_volume_after_detach(self, info, vms): """ update volume info after detaching from a vm info['AttachedToVm'] is a list of vm names where the volume is attached to. info['time"] is the updated time :param info: volume info :param vms: attached to vms :return: list of one dict """ info[0]['AttachedToVm'] = vms if len(vms) == 0: info[0]['machine_path'] = None info[0]['State'] = 'available' info[0]['time'] = datetime.datetime.now() return info def update_volume_tag(self, info, key, value): """ Update volume tag. Tags is a key-value pair, with key as tag name and value as tag value, tag = {key: value}. A volume can have multipale tags. If given duplicated tag name, update the value to the current tag value. :param value: value :param key: key :param info: volume info :param vms: attached to vms :return: list of one dict """ keys = [] for tag in info[0]['tags']: if key == list(tag.keys())[0]: if len(value) == 0: info[0]['tags'].remove(tag) keys.append(list(tag.keys())[0]) else: tag.update({key: value}) keys.append(list(tag.keys())[0]) if key not in keys: tag = {key: value} info[0]['tags'].append(tag) info[0]['time'] = datetime.datetime.now() return info def __init__(self, name): """ Initialize provider. set cloudtype to "multipass", get the default dict, create a cloudmesh database object. :param name: name of cloud """ self.cloud = name self.cloudtype = "multipass" config = Config() self.default = config[f"cloudmesh.volume.{self.cloud}.default"] self.cm = CmDatabase() def update_dict(self, elements, kind=None): """ converts the dict into a list. :param elements: the list of original dicts. If elements is a single dict a list with a single element is returned. :param kind: "multipass" :return: The list with the modified dicts """ if elements is None: return None d = [] for element in elements: if "cm" not in element.keys(): element['cm'] = {} element["cm"].update({ "kind": "volume", "cloud": self.cloud, "name": element['name'], }) d.append(element) return d def create(self, **kwargs): """ This function create a new volume. Default parameters from self.default, such as: path="/Users/username/multipass". Note: Windows users should also use "/" in file path. :param NAME (string): the name of volume :param path (string): path of volume :return: dict """ for key in self.default.keys(): if key not in kwargs.keys(): kwargs[key] = self.default[key] elif kwargs[key] is None: kwargs[key] = self.default[key] name = kwargs['NAME'] path = Path(kwargs['path']) new_path = Path(f'{path}/{name}') result = os.system(f"mkdir {new_path}") if result == 0: result = self.generate_volume_info(NAME=name, path=kwargs['path']) result = self.update_dict([result]) return result def delete(self, name): """ Delete volumes. If name is not given, delete the most recent volume. :param name: volume name :return: """ result = self.cm.find_name(name) path = result[0]['path'] delete_path = Path(f'{path}/{name}') try: os.system(f"rmdir {delete_path}") result[0]['State'] = 'deleted' result = self.update_dict(result) except: Console.error("volume is either not empty or not exist") return result def list(self, **kwargs): """ This function list all volumes as following: If NAME (volume name) is specified, it will print out info of NAME. If NAME (volume name) is not specified, it will print out info of all volumes under current cloud. If vm is specified, it will print out all the volumes attached to vm. If region(path) is specified, it will print out all the volumes in that region. i.e. /Users/username/multipass :param NAME: name of volume :param vm: name of vm :param region: for multipass, it is the same with "path" :return: dict """ if kwargs: result = self.cm.find(cloud='multipass', kind='volume') for key in kwargs: if key == 'NAME' and kwargs['NAME']: result = self.cm.find_name(name=kwargs['NAME']) elif key == 'NAMES' and kwargs['NAMES']: result = self.cm.find_names(names=kwargs['NAMES']) elif key == 'vm' and kwargs['vm']: result = self.cm.find(collection=f"{self.cloud}-volume", query={'AttachedToVm': kwargs['vm']}) elif key == 'region' and kwargs['region']: result = self.cm.find(collection=f"{self.cloud}-volume", query={'path': kwargs['region']}) else: result = self.cm.find(cloud='multipass', kind='volume') return result def _get_vm_status(self, name=None) -> dict: """ Get vm status. :param name (string): vm name :return: dict """ dict_result = {} result = Shell.run(f"multipass info {name} --format=json") if f'instance "{name}" does not exist' in result: dict_result = {'name': name, 'status': "instance does not exist"} else: result = json.loads(result) dict_result = { 'name': name, 'status': result["info"][name]['State'] } return dict_result def attach(self, names, vm): """ This function attach one or more volumes to vm. It returns info of updated volume. The updated dict with "AttachedToVm" showing the name of vm where the volume attached to. :param names (string): names of volumes :param vm (string): name of vm :return: dict """ results = [] for name in names: volume_info = self.cm.find_name(name) if volume_info and volume_info[0]['State'] != "deleted": vms = volume_info[0]['AttachedToVm'] path = volume_info[0]['path'] if vm in vms: Console.error(f"{name} already attached to {vm}") else: result = self.mount(path=f"{path}/{name}", vm=vm) mounts = result['mounts'] if f"{path}/{name}" in mounts.keys(): vms.append(vm) result = self.update_volume_after_attached_to_vm( info=volume_info, vms=vms) results.append(result) else: Console.error( "volume is not existed or volume had been deleted") return results[0] def mount(self, path=None, vm=None): """ mount volume to vm :param path (string): path of volume :param vm (string): name of vm :return: dict """ os.system(f"multipass mount {path} {vm}") dict_result = self._get_mount_status(vm=vm) return dict_result def _get_mount_status(self, vm=None): """ Get mount status of vm :param vm (string): name of vm :return: """ result = Shell.run(f"multipass info {vm} --format=json") if f'instance "{vm}" does not exist' in result: dict_result = {'name': vm, 'status': "instance does not exist"} else: result = json.loads(result) dict_result = { 'name': vm, 'status': result["info"][vm]['state'], 'mounts': result["info"][vm]['mounts'] } return dict_result def unmount(self, path=None, vm=None): """ Unmount volume from vm :param path (string): path of volume :param vm (string): name of vm :return: """ os.system(f"multipass unmount {vm}:{path}") dict_result = self._get_mount_status(vm=vm) return dict_result def detach(self, name): """ This function detach a volume from vm. It returns the info of the updated volume. The vm under "AttachedToVm" will be removed if volume is successfully detached. Will detach volume from all vms. :param name: name of volume to be detached :return: dict """ volume_info = self.cm.find_name(name) if volume_info and volume_info[0]['State'] != "deleted": vms = volume_info[0]['AttachedToVm'] path = volume_info[0]['path'] if len(vms) == 0: Console.error(f"{name} is not attached to any vm") else: removed = [] for vm in vms: result = self.unmount(path=f"{path}/{name}", vm=vm) mounts = result['mounts'] if f"{path}/{name}" not in mounts.keys(): removed.append(vm) for vm in removed: vms.remove(vm) result = self.update_volume_after_detach(volume_info, vms) return result[0] else: Console.error("volume does not exist or volume had been deleted") def add_tag(self, **kwargs): """ This function add tag to a volume. If volume name is not specified, then tag will be added to the last volume. :param NAME: name of volume :param key: name of tag :param value: value of tag :return: dict """ key = kwargs['key'] value = kwargs['value'] volume_info = self.cm.find_name(name=kwargs['NAME']) volume_info = self.update_volume_tag(info=volume_info, key=key, value=value) return volume_info[0] def status(self, name=None): """ This function get volume status, such as "in-use", "available", "deleted" :param name: volume name :return: dict """ volume_info = self.cm.find_name(name) if volume_info: status = volume_info[0]['State'] else: Console.error("volume is not existed") return volume_info def migrate(self, **kwargs): """ Migrate volume from one vm to another vm. "region" is volume path. If vm and volume are in the same region (path), migrate within the same region (path). If vm and volume are in different regions, migrate between two regions (path) :param NAME (string): the volume name :param vm (string): the vm name :return: dict """ volume_name = kwargs['NAME'] vm = kwargs['vm'] volume_info = self.cm.find_name(name=volume_name) volume_attached_vm = volume_info[0]['AttachedToVm'] vm_info = Shell.run(f"multipass info {vm} --format=json") vm_info = json.loads(vm_info) vm_status = vm_info["info"][vm]['state'] if vm_status == 'running': param = {'NAME': volume_name} self.detach(**param) self.attach(**param, vm=vm) try: for old_vm in volume_attached_vm: volume_info[0]['AttachedToVm'].remove(old_vm) except: pass volume_info[0]['AttachedToVm'].append(vm) return volume_info def sync(self, **kwargs): """ sync contents of one volume to another volume :param names (list): list of volume names :return: list of dict """ volume_1 = kwargs['NAMES'][0] volume_2 = kwargs['NAMES'][1] path1 = f"{self.cm.find_name(name=volume_1)[0]['path']}/{volume_1}/" path2 = f"{self.cm.find_name(name=volume_2)[0]['path']}/{volume_2}/" os.system(f"rsync -avzh {path2} {path1}") kwargs1 = {'NAME': volume_1, 'key': "sync_with", 'value': volume_2} volume_info1 = self.add_tag(**kwargs1) result = [volume_info1] return result
class Provider(VolumeABC): kind = "volume" sample = """ cloudmesh: volume: {name}: cm: active: true heading: AWS host: aws.com label: VAWAS1 kind: aws version: TBD service: volume default: volume_type: gp2 size: TBD encrypted: False region_name: {region_name} region: {availability_zone} snapshot: "None" credentials: EC2_SECURITY_GROUP: default EC2_ACCESS_ID: {aws_access_key_id} EC2_SECRET_KEY: {aws_secret_access_key} EC2_PRIVATE_KEY_FILE_PATH: {private_key_file_path} EC2_PRIVATE_KEY_FILE_NAME: {private_key_file_name} """ volume_status = ['in-use', 'available', 'creating', 'deleting'] output = { "volume": { "sort_keys": ["cm.name"], "order": [ "cm.name", "cm.cloud", "cm.kind", "cm.region", # "AvailabilityZone", # "CreateTime", # "Encrypted", "Size", # "SnapshotId", "State", # "VolumeId", # "Iops", "cm.tags", "VolumeType", # "created", "AttachedToVm", # "UpdateTime" ], "header": [ "Name", "Cloud", "Kind", "Region", # "AvailabilityZone", # "Create Time", # "Encrypted", "Size(GB)", # "SnapshotId", "Status", # "VolumeId", # "Iops", "Tags", "Volume Type", # "Created", "Attached To Vm", # "Update Time" ], } } def __init__(self, name=None): """ Initialize provider, create boto3 ec2 client, get the default dict. :param name: name of cloud """ self.cloud = name config = Config() self.default = config[f"cloudmesh.volume.{self.cloud}.default"] self.cred = config[f'cloudmesh.volume.{self.cloud}.credentials'] self.client = boto3.client( 'ec2', region_name=self.default['region_name'], aws_access_key_id=self.cred['EC2_ACCESS_ID'], aws_secret_access_key=self.cred['EC2_SECRET_KEY']) self.cm = CmDatabase() def update_dict(self, results): """ This function adds a cloudmesh cm dict to each dict in the list elements. For aws, we make region = AvailabilityZone. :param results: the original dicts. :param kind: for volume special attributes are added. This includes cloud, kind, name, region. :return: The list with the modified dicts """ # {'Volumes': # [ # { # 'Attachments': # [ # { # 'AttachTime': datetime.datetime(2020, 3, 16, 20, 0, # 35, tzinfo=tzutc()), # 'Device': '/dev/sda1', # 'InstanceId': 'i-0765529fec90ba56b', # 'State': 'attached', # 'VolumeId': 'vol-09db404935694e941', # 'DeleteOnTermination': True # } # ], # 'AvailabilityZone': 'us-east-2c', # 'CreateTime': datetime.datetime(2020, 3, 16, 20, 0, 35, # 257000, tzinfo=tzutc()), # 'Encrypted': False, # 'Size': 8, # 'SnapshotId': 'snap-085c8383cc8833286', # 'State': 'in-use', # 'VolumeId': 'vol-09db404935694e941', # 'Iops': 100, # 'Tags': # [{'Key': 'Name', # 'Value': 'xin-vol-3'}], # 'VolumeType': 'gp2' # }, # {...} # ] # } if results is None: return None d = [] elements = results['Volumes'] for entry in elements: tags = "" try: tags = entry['Tags'].copy() for item in entry['Tags']: if item['Key'] == 'Name': if item['Value'] == "": # Console.error(f"Please name volume # {entry['VolumeId']}") volume_name = " " elif item['Value'] == " ": # Console.error(f"Please name volume # {entry['VolumeId']}") volume_name = " " else: volume_name = item['Value'] tags.remove(item) else: # Console.error(f"Please name volume # {entry['VolumeId']}") volume_name = " " except: # Console.error(f"Please name volume {entry['VolumeId']}") volume_name = " " if "cm" not in entry: entry['cm'] = {} entry["cm"].update({ "cloud": self.cloud, "kind": "volume", "name": volume_name, "region": entry["AvailabilityZone"], "tags": tags }) d.append(entry) return d def vm_info(self, vm): """ This function find vm info through given vm name. :param vm: the name of vm. :return: dict """ vm_info = self.client.describe_instances(Filters=[ { 'Name': 'tag:Name', 'Values': [ vm, ] }, ]) return vm_info def find_vm_info_from_volume_name(self, volume_name=None): """ This function find vm info which the volume attached to through given volume name. Only implemented circumstance when a volume can only attach to one vm. (type iol volume could attach to multiple vms, not implemented) :param volume_name: the name of volume. :return: string """ volume = self.client.describe_volumes(Filters=[ { 'Name': 'tag:Name', 'Values': [ volume_name, ] }, ], ) elements = volume['Volumes'] for i in range(len(elements)): try: for item in elements[i]['Attachments']: vm_id = item['InstanceId'] instance = client.describe_instances(InstanceIds=[vm_id]) for tag in instance['Reservations'][0]['Instances'][0][ 'Tags']: if tag['Key'] == 'Name': vm_name = tag['Value'] return vm_name except: Console.error(f"{volume_name} does not attach to any vm") def update_AttachedToVm(self, data): """ This function update returned volume dict with result['Volumes'][i]['AttachedToVm'] = vm_name. "i" chould be more than 0 if volume could attach to multiple vm, but for now, one volume only attach to one vm. Only IOPS io1 volumes can attach to multiple vms (creating of io1 volume is not implemented) :param data: volume dict :return: dict """ elements = data['Volumes'] for i in range(len(elements)): elements[i]['AttachedToVm'] = [] try: for item in elements[i]['Attachments']: vm_id = item['InstanceId'] instance = self.client.describe_instances( InstanceIds=[vm_id]) for tag in instance['Reservations'][0]['Instances'][0][ 'Tags']: if tag['Key'] == 'Name': vm_name = tag['Value'] elements[i]['AttachedToVm'].append(vm_name) except: pass return data def find_volume_id(self, volume_name): """ This function find volume_id through volume_name :param volume_name: the name of volume :return: string """ volume = self.client.describe_volumes(Filters=[ { 'Name': 'tag:Name', 'Values': [ volume_name, ] }, ], ) volume_id = volume['Volumes'][0]['VolumeId'] return volume_id def find_vm_id(self, vm_name): """ This function find vm_id through vm_name :param vm_name: the name of vom :return: string """ instance = self.client.describe_instances(Filters=[ { 'Name': 'tag:Name', 'Values': [ vm_name, ] }, ], ) vm_id = instance['Reservations'][0]['Instances'][0]['InstanceId'] return vm_id def wait(self, time=None): """ This function waiting for volume to be updated :param time: time to wait in seconds :return: False """ Console.info("waiting for volume to be updated") sleep(time) return False def status(self, name): """ This function get volume status, such as "in-use", "available", "deleting" :param name :return: dict """ result = self.client.describe_volumes(Filters=[ { 'Name': 'tag:Name', 'Values': [ name, ] }, ], ) result = self.update_dict(result) # volume_status = volume['Volumes'][0]['State'] return result def create(self, **kwargs): """ This function create a new volume, with defalt parameters in self.default. default: {volume_type: gp2 size: 2 encrypted: False region: us-east-2a snapshot: "None"} :param NAME (string): the name of volume :param size (int): the size of volume (GB) :param volume_type: volume type :param region (string): availability zone of volume :return: dict """ for key in self.default.keys(): if key not in kwargs.keys(): kwargs[key] = self.default[key] elif kwargs[key] is None: kwargs[key] = self.default[key] result = self._create(**kwargs) result = self.update_dict(result) return result def _create(self, **kwargs): """ Create a volume. :param name (string): name of volume :param region (string): availability-zone :param encrypted (boolean): True|False :param size (integer): size of volume. Minimum size for st1 and sc1 is 500 GB. :param volume_type (string): type of volume. This can be gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD (not implemented), st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard for Magnetic volumes. :param snapshot (string): snapshot id :return: dict """ if kwargs['volume_type'] == 'io1': raise NotImplementedError if kwargs['volume_type'] in ['sc1', 'st1']: if int(kwargs['size']) < 500: Console.error("minimum volume size for sc1 is 500 GB") if kwargs['snapshot'] != "None": r = self.client.create_volume( AvailabilityZone=kwargs['region'], Encrypted=kwargs['encrypted'], Size=int(kwargs['size']), SnapshotId=kwargs['snapshot'], VolumeType=kwargs['volume_type'], TagSpecifications=[ { 'ResourceType': 'volume', 'Tags': [ { 'Key': "Name", 'Value': kwargs['NAME'] }, ] }, ], ) else: r = self.client.create_volume( AvailabilityZone=kwargs['region'], Encrypted=kwargs['encrypted'], Size=int(kwargs['size']), VolumeType=kwargs['volume_type'], TagSpecifications=[ { 'ResourceType': 'volume', 'Tags': [ { 'Key': "Name", 'Value': kwargs['NAME'] }, ] }, ], ) r = [r] result = {'Volumes': r} result['Volumes'][0]['AttachedToVm'] = [] return result def list(self, **kwargs): """ This function list all volumes as following: If NAME (volume name) is specified, it will print out info of NAME. If NAME (volume name) is not specified, it will print out info of all volumes under current cloud. If vm is specified, it will print out all the volumes attached to vm. If region(availability zone) is specified, it will print out all the volumes in that region. :param NAME: name of volume :param vm: name of vm :param region: name of availability zone :return: dict of volume """ if kwargs and kwargs['refresh']: result = self.client.describe_volumes() for key in kwargs: if key == 'NAME' and kwargs['NAME']: result = self.client.describe_volumes( # DryRun=dryrun, Filters=[ { 'Name': 'tag:Name', 'Values': [ kwargs['NAME'], ] }, ], ) elif key == 'NAMES' and kwargs['NAMES']: if type(kwargs['NAMES']) == str: kwargs['NAMES'] = [kwargs['NAMES']] result = self.client.describe_volumes( # DryRun=dryrun, Filters=[ { 'Name': 'tag:Name', 'Values': kwargs['NAMES'], }, ], ) elif key == 'vm' and kwargs['vm']: vm_id = self.find_vm_id(kwargs['vm']) result = self.client.describe_volumes( # DryRun=dryrun, Filters=[ { 'Name': 'attachment.instance-id', 'Values': [ vm_id, ] }, ], ) elif key == 'region' and kwargs['region']: result = self.client.describe_volumes( # DryRun=dryrun, Filters=[ { 'Name': 'availability-zone', 'Values': [ kwargs['region'], ] }, ], ) result = self.update_AttachedToVm(result) result = self.update_dict(result) elif kwargs and not kwargs['refresh']: result = self.cm.find(cloud=self.cloud, kind='volume') for key in kwargs: if key == 'NAME' and kwargs['NAME']: result = self.cm.find_name(name=kwargs['NAME']) elif key == 'NAMES' and kwargs['NAMES']: result = self.cm.find_names(names=kwargs['NAMES']) elif key == 'vm' and kwargs['vm']: result = self.cm.find(collection=f"{self.cloud}-volume", query={'AttachedToVm': kwargs['vm']}) elif key == 'region' and kwargs['region']: result = self.cm.find( collection=f"{self.cloud}-volume", query={'AvailabilityZone': kwargs['region']}) else: result = self.client.describe_volumes() result = self.update_AttachedToVm(result) result = self.update_dict(result) return result def delete(self, name): """ This function delete one volume. It will return the info of volume with "state" updated as "deleted" and will show in Database. :param NAME (string): volume name :return: dict """ result = self.client.describe_volumes(Filters=[ { 'Name': 'tag:Name', 'Values': [name] }, ], ) volume_id = self.find_volume_id(name) if result['Volumes'][0]['State'] == 'available': response = self.client.delete_volume( VolumeId=volume_id) # noqa: F841 stop_timeout = 360 time = 0 while time <= stop_timeout: sleep(5) time += 5 try: volume_status = self.status( name=name)[0]['State'] # noqa: F841 except: break result['Volumes'][0]['State'] = 'deleted' else: Console.error("volume is not available") result = self.update_dict(result) return result def attach(self, names, vm, dryrun=False): """ This function attach one or more volumes to vm. It returns self.list() to list the updated volume. The updated dict with "AttachedToVm" showing the name of vm where the volume attached to. :param names (string): names of volumes :param vm (string): name of vm :param dryrun (boolean): True|False :return: dict of volume """ devices = [ "/dev/sdb", "/dev/sdd", "/dev/sde", "/dev/sdf", "/dev/sdg", "/dev/sdh", ] vm_id = self.find_vm_id(vm) for name in names: volume_id = self.find_volume_id(name) for device in devices: try: response = self.client.attach_volume( Device=device, InstanceId=vm_id, VolumeId=volume_id, DryRun=dryrun) # noqa: F841 except: pass return self.list(NAMES=names, refresh=True) def detach(self, name): """ This function detach a volume from vm. It returns volume dict of the updated volume. The vm under "AttachedToVm" will be removed if volume is successfully detached. :param name: name of volume to detach :return: dict of volume """ volume_status = self.status(name=name)[0]['State'] if volume_status == 'in-use': volume_id = self.find_volume_id(volume_name=name) rresponse = self.client.detach_volume( VolumeId=volume_id) # noqa: F841 stop_timeout = 360 time = 0 while time <= stop_timeout: sleep(5) time += 5 volume_status = self.status(name=name)[0]['State'] if volume_status == "available": break return self.list(NAME=name, refresh=True)[0] def add_tag(self, **kwargs): """ This function add tag to a volume. In aws Boto3, key for volume name is "Name". For example, key="Name", value="user-volume-1". It could also be used to rename or name a volume. If NAME is not specified, then tag will be added to the last volume. :param NAME: name of volume :param key: name of tag :param value: value of tag :return: dict """ key = kwargs['key'] value = kwargs['value'] volume_id = self.find_volume_id(volume_name=kwargs['NAME']) re = self.client.create_tags( Resources=[ volume_id, ], Tags=[ { 'Key': key, 'Value': value }, ], ) if key == 'Name': result = self.list(NAME=value, refresh=True)[0] else: result = self.list(NAME=kwargs['NAME'], refresh=True)[0] return result def migrate(self, **kwargs): """ Migrate volume from one vm to another vm. :param NAME (string): the volume name :param vm (string): the vm name :param region (string): the availability zone :return: dict of volume """ volume_name = kwargs['NAME'] vm = kwargs['vm'] volume_status = self.status(name=volume_name)[0]['State'] volume_region = self.list(NAME=volume_name, refresh=True)[0]['cm']['region'] volume_id = self.find_volume_id(volume_name=volume_name) vm_info = self.vm_info(vm=vm) vm_status = vm_info['Reservations'][0]['Instances'][0]['State']['Name'] vm_region = vm_info['Reservations'][0]['Instances'][0]['Placement'][ 'AvailabilityZone'] # vm_id = self.find_vm_id(vm_name=vm) # migrate within same region: if vm_status == 'running': if volume_region == vm_region: if volume_status == "in-use": self.detach(name=volume_name) self.attach(names=[ volume_name, ], vm=vm) elif volume_status == "available": self.attach(names=[ volume_name, ], vm=vm) return self.list(NAME=volume_name, refresh=True) else: snapshot_id = self.client.create_snapshot( VolumeId=volume_id, )['SnapshotId'] ec2 = boto3.resource('ec2') snapshot = ec2.Snapshot(snapshot_id) start_timeout = 360 time = 0 while time <= start_timeout: sleep(5) time += 5 if snapshot.state == "completed": break kwargs['snapshot'] = snapshot_id kwargs['region'] = vm_region new_volume = self.create(name=volume_name, **kwargs) # noqa: F841 start_timeout = 360 time = 0 while time <= start_timeout: sleep(5) time += 5 status = self.status(name=volume_name)[0]['State'] if status == "available": break self.attach(names=[ volume_name, ], vm=vm) response = self.client.delete_volume( VolumeId=volume_id) # noqa: F841 else: Console.error("vm is not available") result = self.list(NAME=kwargs['NAME'], refresh=True)[0] return result def sync(self, **kwargs): """ sync contents of one volume to another volume :param NAMES (list): list of volume names :return: dict """ volume_1 = kwargs['NAMES'][0] volume_1_region = self.list(NAME=volume_1, refresh=True)[0]['cm']['region'] volume_2 = kwargs['NAMES'][0] volume_2_id = self.find_volume_id(volume_name=volume_2) snapshot_id = self.client.create_snapshot( VolumeId=volume_2_id, )['SnapshotId'] ec2 = boto3.resource('ec2') snapshot = ec2.Snapshot(snapshot_id) start_timeout = 360 time = 0 while time <= start_timeout: sleep(5) time += 5 if snapshot.state == "completed": break self.delete(name=volume_1) kwargs = { 'region': volume_1_region, 'snapshot': snapshot_id, 'NAME': volume_1 } # new_volume = self.create(**kwargs) start_timeout = 360 time = 0 while time <= start_timeout: sleep(5) time += 5 status = self.status(name=volume_1)[0]['State'] if status == "available": break return self.list(NAME=volume_1, refresh=True)[0]
class SlurmCluster(object): def __init__(self): """ Initializes the SlurmCluster class """ # current_path = os.path.dirname(os.path.realpath(__file__)) # self.workspace = os.path.join(current_path, "batch_workspace/slurm_batch.yaml") # if not os.path.exists(os.path.dirname(self.workspace)): # os.makedirs(os.path.dirname(self.workspace)) self.cm_config = Config() # self.batch_config = GenericConfig(self.workspace) self.all_jobIDs = [] self.slurm_cluster = {} self.job = { 'job_name' : None, 'cluster_name': None, 'script_path': None, 'executable_path': None, 'destination': None, 'source': None, 'experiment_name': None, 'companion_file': None, } self.database = CmDatabase() @staticmethod def job_specification(): # self.job_validator() data = { "cm": { "cloud": "karst_debug", "kind": "batch-job", "name": "job012", }, "batch": { "source": "~/.cloudmesh/batch/dir", "destination": "~/.cloudmesh/dir/", "status": "running" } } return data # @DatabaseUpdate # def status(self,job_name): # return { # "cloud": self.job.cluster_name, # # } # noinspection PyDictCreation @DatabaseUpdate() def create(self, job_name, cluster_name, script_path, executable_path, destination, source, experiment_name, companion_file): """ This method is used to create a job for running on remote slurm cluster :param job_name: name of the job to create :param cluster_name: slurm cluster on which the job is gonna run :param script_path: path of the slurm script :param executable_path: path of the executable that is going to be run on the cluster via slurm script :param destination: path in the remotes on which the scripts is gonna be copied to and ran from :param source: local path to which the results are gonna be copied :param experiment_name: experiment name and suffix of the filenames in the job :param companion_file: path of the file that has to be passed to the file as an argument if any :param overwrite: if the job already exists, this flag overwrites the previous job with the same name :return: """ # if self.batch_config.get('job-metadata') is not None and job_name in \ # list(self.batch_config.get('job-metadata').keys()) and overwrite is False: # raise RuntimeError("The job {} exists in the configuration file, if you want to overwrite the job, \ # use --overwrite argument.".format(job_name)) # tmp_cluster = {cluster_name: dict(slurm_cluster)} # slurm_cluster = self.cm_config.get('cloudmesh').get('cluster')[cluster_name] # self.batch_config.deep_set(['slurm_cluster'], tmp_cluster) name = Name(order=["name","experiment_name"], name=job_name, experiment_name=experiment_name) uid = name.id(name=job_name, experiment_name=experiment_name) print(uid) # return # TODO: remove cloud and kind after fixing CmDatabased update self.job = { 'uid': uid, "cloud": cluster_name, "kind": "batch-job", "name" :job_name, "cm": { "cloud": cluster_name, "kind": "batch-job", "name": job_name, "cluster": self.cm_config.get('cloudmesh').get('cluster')[cluster_name] }, "batch": { "status": "pending", 'script_path': script_path.as_posix(), 'executable_path': executable_path.as_posix(), 'destination': destination.as_posix(), 'source': source.as_posix(), 'experiment_name': experiment_name, 'companion_file': companion_file.as_posix() } } # self.job = { # "cloud": cluster_name, # "kind": "batch-job", # "name": job_name, # "cluster": self.cm_config.get('cloudmesh').get('cluster')[ # cluster_name], # "status": "pending", # 'script_path': script_path.as_posix(), # 'executable_path': executable_path.as_posix(), # 'destination': destination.as_posix(), # 'source': source.as_posix(), # 'experiment_name': experiment_name, # 'companion_file': companion_file.as_posix() # } # job['destination'] = os.path.join(job['remote_path'], job['script_name']) # job['remote_slurm_script_path'] = os.path.join(job['remote_path'], job['slurm_script_name']) # job_metadata = {job_name: job} # self.batch_config.deep_set(['job-metadata'], job_metadata) # data = self.job_specification() if self.database.exists(self.job)[0]: Console.error("Job already exists") return return [self.job] @staticmethod def _execute_in_parallel(func_args): """ This is a method used for running methods in parallel :param func_args: :return: """ target_class = func_args[0] method_to_call = getattr(target_class, func_args[1]) args = list(func_args[2:]) return method_to_call(*args) def _fetch_results_in_parallel(self, job_metadata, job_id, all_job_ids): """ This method is used to fetch the results from remote nodes. :param job_metadata: the dictionary containing the information about the previously submitted job :param job_id: the tuple containing destination node, destination pid and destination node index when the job was submitted :param all_job_ids: :return: """ dest_node_info = self.slurm_cluster path = path_expand(dest_node_info['credentials']['sshconfigpath']) dest_job_id = job_id ssh_caller = lambda *x: self._ssh(dest_node_info['name'], path, *x) scp_caller = lambda *x: self._scp(dest_node_info['name'], path, *x) # # use the qstat from cloudmesh, we have a whole library for that # ps_output = ssh_caller("qstat -u $USER | grep %s" % job_id) if len(ps_output) == 0 or ' c ' in ps_output.lower(): if not os.path.exists(job_metadata['local_path']): os.makedirs(job_metadata['local_path']) # TODO: REPLACE WITH .format scp_caller('-r', '%s:%s' % (dest_node_info['name'], job_metadata['remote_path']), os.path.join(job_metadata['local_path'], '')) os.remove(os.path.join(job_metadata['local_path'], os.path.basename(os.path.normpath(job_metadata['remote_path'])), job_metadata['script_name'])) os.remove(os.path.join(job_metadata['local_path'], os.path.basename(os.path.normpath(job_metadata['remote_path'])), job_metadata['slurm_script_name'])) if job_metadata['input_type'] == 'params+file': os.remove(os.path.join(job_metadata['local_path'], os.path.basename(os.path.normpath(job_metadata['remote_path'])), job_metadata['argfile_name'])) all_job_ids.remove(dest_job_id) # TODO: REPLACE WITH .format print("Results collected from %s for jobID %s" % (dest_node_info['name'], dest_job_id)) @staticmethod def _ssh(hostname, sshconfigpath, *args): """ This method is used to create remove ssh connections :param hostname: hostname :param sshconfigpath: path to sshconfig for connecting to remote node :param args: the argument to be submitted via ssh :return: """ hide_errors_flag = False if type(args[-1]) == bool: hide_errors_flag = True args = args[:-1] # # should we use cloudmesh.common.Shell # shoudl we have a better version of that # # (stdout, stderr) = SimpleShell(...) # ssh = subprocess.Popen(["ssh", hostname, '-F', sshconfigpath, *args], stdout=subprocess.PIPE, stderr=subprocess.PIPE) result = ssh.stdout.readline() if not result: error = ssh.stderr.readlines() if len(error) > 0 and hide_errors_flag == False: # TODO: REPLACE WITH .format print("ERROR in host %s: %s" % (hostname, error)) return [] else: try: return ''.join([chr(x) for x in result]) except AttributeError: return [result.decode('utf-8').strip('\n')] @staticmethod def _scp(hostname, sshconfigpath, *args): """ This method is used for scp from and to remote :param hostname: hostname :param sshconfigpath: ssh config file :param args:arguments for using while copying :return: """ ssh = subprocess.Popen(["scp", '-F', sshconfigpath, *args], stdout=subprocess.PIPE, stderr=subprocess.PIPE) middle_result = ssh.stdout.readlines() if not middle_result: error = ssh.stderr.readlines() if len(error) > 0: print("ERROR in host %s: %s" % (hostname, error)) @staticmethod def add_suffix_to_path(path, suffix): """ This method is used to add suffix to a path :param path: path :param suffix: suffix :return: """ dir_path = os.path.dirname(path) full_filename = os.path.basename(path) filename, fileextention = os.path.splitext(full_filename) full_filename_new = filename + suffix + fileextention new_path = os.path.join(dir_path, full_filename_new) return new_path def clean_remote(self, job_name): """ This method is used to spawn processes for cleaning the remote nodes :param job_name: name of previously submitted job for which the nodes are going to be cleaned :return: """ job_metadata = self.batch_config.get('job-metadata')[job_name] target_cluster_info = self.batch_config.get('slurm_cluster')[job_metadata['slurm_cluster_name']] remote_path = job_metadata['remote_path'] ssh_caller = lambda *x: self._ssh(target_cluster_info['name'], os.path.expanduser(target_cluster_info['credentials'] \ ['sshconfigpath']), *x, True) ssh_caller('rm -rf {}'.format(remote_path)) if len(ssh_caller('ls {}'.format(remote_path))) == 0: print("Job {} cleaned successfully.".format(job_name)) else: print("Error: Job {} could not be cleaned.".format(job_name)) def connection_test(self, slurm_cluster_name): """ This method is used for testing the connection to the slurm cluster connection node :param slurm_cluster_name: name of the slurm cluster which is going to be tested :return: """ r = self.database.find_name("job_20190327_22265228") print(r) return target_node_info = self.batch_config.get('slurm_cluster')[slurm_cluster_name] ssh_caller = lambda *x: self._ssh(target_node_info['name'], os.path.expanduser(target_node_info['credentials'] \ ['sshconfigpath']), *x) if len(ssh_caller('uname -a')) > 0: print("Slurm Cluster {} is accessible.".format(target_node_info['name'])) else: print("Error: Slurm Cluster {} cannot be accessed.".format(target_node_info['name'])) def remove(self, target, key): """ Used to remove virtual clusters and runtime configs :param target: type of entity to be removed :param key: keyname of the entity to be removed :return: """ if target == 'slurm-cluster': self.batch_config.remove(['slurm_cluster'], key) print("Slurm-cluster {} removed successfully.".format(key)) elif target == 'job': self.batch_config.remove(['job-metadata'], key) print("Job {} removed successfully.".format(key)) else: raise ValueError("Target to remove not found.") def fetch(self, job_name): """ This method is used to fetch results from remote nodes :param job_name: the previously submitted job name :return: """ job_metadata = self.batch_config.get('job-metadata')[job_name] self.slurm_cluster = self.batch_config.get('slurm_cluster')[job_metadata['slurm_cluster_name']] loaded_all_job_ids = [x for x in job_metadata['jobIDs']] all_job_ids = Manager().list() all_job_ids.extend(loaded_all_job_ids) pool = Pool(processes=1) print("collecting results") while len(all_job_ids) > 0: time.sleep(1) all_running_jobs = [(self, '_fetch_results_in_parallel', job_metadata, jobID, all_job_ids) for \ jobID in loaded_all_job_ids if jobID in all_job_ids] pool.map(self._execute_in_parallel, all_running_jobs) print("waiting for other results if any...") print("All of the remote results collected.") ''' @DatabaseUpdate def list(self, target, max_depth, current_depth=1, input_dict=None): """ listing the target slurm clusters or job-metadata :param target: name of the virtual cluster to be listed :param max_depth: depth of information to be shown :param current_depth: current depth of printing information :param input_dict: used for recursion for depth of higher than 1 :return: """ if target == 'slurm-clusters' and input_dict is None: input_dict = self.batch_config.get('slurm_cluster') if target == 'jobs' and input_dict is None: input_dict = self.batch_config.get('job-metadata') elif input_dict is None: raise ValueError("Target of listing not found.") if max_depth >= current_depth: if type(input_dict) == dict: for key in input_dict: key_to_print = key + ':' if max_depth >= current_depth else key indent = current_depth if current_depth > 1 else current_depth - 1 print('\t' * indent, key_to_print) if type(input_dict.get(key)) != dict: print('\t' * (indent + 1), input_dict.get(key)) else: for value in input_dict.get(key): value_to_print = value + ':' if max_depth > current_depth else value print('\t' * (indent + 1), value_to_print) self.list(target, max_depth, input_dict=input_dict[key][value], current_depth=current_depth + 1) else: indent = current_depth if current_depth > 1 else current_depth - 1 print('\t' * indent, input_dict) data = [{}, {}] return data ''' def run(self, job_name): """ This method is used to create a job, validate it and run it on remote nodes :param job_name: name of the job to create :return: """ job_metadata = self.batch_config.get('job-metadata')[job_name] all_job_ids = Manager().list() cluster_name = job_metadata['slurm_cluster_name'] slurm_cluster = self.batch_config.get('slurm_cluster').get(cluster_name) path = path_expand(slurm_cluster['credentials']['sshconfigpath']) ssh_caller = lambda *x: self._ssh(slurm_cluster['name'], path, *x) scp_caller = lambda *x: self._scp(slurm_cluster['name'], path, *x) # TODO replace with .format ssh_caller('cd %s && mkdir job%s' % (job_metadata['raw_remote_path'], job_metadata['suffix']), True) scp_caller(job_metadata['slurm_script_path'], '%s:%s' % (slurm_cluster['name'], job_metadata['remote_slurm_script_path'])) scp_caller(job_metadata['job_script_path'], '%s:%s' % (slurm_cluster['name'], job_metadata['remote_script_path'])) ssh_caller('chmod +x', job_metadata['remote_script_path']) if job_metadata['input_type'].lower() == 'params+file': scp_caller(job_metadata['argfile_path'], '%s:%s' % (slurm_cluster['name'], job_metadata['remote_path'])) remote_job_id = ssh_caller("cd %s && qsub %s && qstat -u $USER | tail -n 1 | awk '{print $1}'" % (job_metadata['remote_path'], job_metadata['remote_slurm_script_path'])) remote_job_id = remote_job_id.strip('\n') all_job_ids.append(remote_job_id) print('Remote job ID: %s' % remote_job_id) self.batch_config.deep_set(['job-metadata', job_name, 'jobIDs'], [pid for pid in all_job_ids]) def set_param(self, target, name, parameter, value): """ Used to set a specific parameter in the configuration :param target: the entity type on which the parameter is going to be set, e.g. runtime-config :param name: the entity name on which the parameter is going to be set, e.g. test-config32 :param parameter: name of the parameter to be set :param value: value of that parameter to be set :return: """ # TODO: .format see if .format(**local) works if target == 'slurm-cluster': self.batch_config.deep_set(['slurm_cluster', name, parameter], value) print("slurm-cluster parameter {} set to {} successfully.".format(parameter, value)) elif target == 'job-metadata': self.batch_config.deep_set(['job-metadata', name, parameter], value) print("Job-metadata parameter {} set to {} successfully.".format(parameter, value)) else: raise ValueError("Target of variable set not found.")
def benchmark(self): #get current cloud and create provider var_list = Variables(filename="~/.cloudmesh/var-data") cloud = var_list['cloud'] name = var_list['vm'] newProvider = Provider(name=cloud) #get vm cm = CmDatabase() try: vm = cm.find_name(name, "vm")[0] except IndexError: Console.error(f"could not find vm {name}") # get file path of the benchmark filepath = path.dirname(path.dirname( path.abspath(__file__))) + '/api/benchmark.py' filepath = filepath.replace('\\', '/') # prepare command to run the file vmcom = VmCommand() try: Console.msg('waiting for vm to be reachable...') Console.msg('wait') newProvider.wait(vm=vm) except: Console.msg('could not reach vm for benchmark') return try: Console.msg(f'moving benchmark file to vm...') Console.msg(f'put ' + filepath + ' /home/ubuntu') vmcom.do_vm('put ' + filepath + ' /home/ubuntu') except: Console.msg( f'could not ssh into vm, make sure one is running and reachable' ) return try: Console.msg(f'executing the benchmark...') Console.msg( 'ssh --command=\"chmod +x benchmark.py;./benchmark.py;rm benchmark.py;exit\"' ) benchtime = newProvider.ssh( vm=vm, command= "chmod +x benchmark.py;./benchmark.py;rm benchmark.py;exit") except: Console.msg( f'could not ssh into vm, make sure one is running and reachable' ) return print("successfully benchmarked") benchtime = float(benchtime.strip()) #add the benchmark, cloud, vm, and time to db benchdict = {} benchdict['cloud'] = cloud benchdict['name'] = name benchdict['ImageId'] = vm['ImageId'] benchdict['flavor'] = vm['InstanceType'] benchdict['region'] = vm['Placement']['AvailabilityZone'] benchdict['BenchmarkTime'] = benchtime benchdict['updated'] = str(datetime.utcnow()) benchdict["cm"] = { "kind": 'frugal-benchmark', "driver": cloud, "cloud": cloud, "name": name, "updated": str(datetime.utcnow()), } cm.update(benchdict, progress=True) return ""
class Provider(VolumeABC): kind = "oracle" sample = """ cloudmesh: volume: {name}: cm: active: true heading: {name} host: TBD label: {name} kind: oracle version: TBD service: volume credentials: version: TBD user: TBD fingerprint: TBD key_file: oci_api_key.pem pass_phrase: TBD tenancy: TBD compartment_id: TBD region: TBD availability_domain: TBD default: """ output = { "volume": { "sort_keys": ["cm.name"], "order": ["cm.name", "cm.cloud", "cm.kind", "availability_domain", "time_created", "size_in_gbs", "lifecycle_state", "id" ], "header": ["Name", "Cloud", "Kind", "Availability Zone", "Created At", "Size(Gb)", "Status", "Id" ], } } def update_dict(self, results): """ This function adds a cloudmesh cm dict to each dict in the list elements. Libcloud returns an object or list of objects With the dict method this object is converted to a dict. Typically this method is used internally. :param results: the original dicts. :return: The list with the modified dicts """ if results is None: return None d = [] for entry in results: display_name = entry.__getattribute__("display_name") availability_domain = entry.__getattribute__("availability_domain") time_created = entry.__getattribute__("time_created") size_in_gbs = entry.__getattribute__("size_in_gbs") lifecycle_state = entry.__getattribute__("lifecycle_state") attribute_id = entry.__getattribute__("id") entry = { "availability_domain": availability_domain, "time_created": time_created, "size_in_gbs": size_in_gbs, "id": attribute_id, "lifecycle_state": lifecycle_state } if "cm" not in entry: entry['cm'] = {} entry["cm"].update({ "cloud": self.cloud, "kind": "volume", "name": display_name, }) d.append(entry) return d def __init__(self, name): """ Initialize provider. The default parameters are read from the configuration file that is defined in yaml format. :param name: name of cloud """ self.cloud = name self.config = Config()["cloudmesh.volume.oracle.credentials"] self.defaults = Config()["cloudmesh.volume.oracle.default"] self.cm = CmDatabase() def get_volume_id_from_name(self, block_storage, name): """ This function get volume id from volume name :param block_storage: Block storage client object :param name: volume name :return: volume id """ v = block_storage.list_volumes(self.config['compartment_id']) results = v.data volume_id = None for entry in results: display_name = entry.__getattribute__("display_name") if name == display_name: volume_id = entry.__getattribute__("id") break return volume_id def get_attachment_id_from_name(self, block_storage, name): """ This function get attachment id from volume name :param block_storage: Block storage client object :param name: Name of the volume :return: Volume attachment id """ v = block_storage.list_volumes(self.config['compartment_id']) results = v.data attachment_id = None for entry in results: display_name = entry.__getattribute__("display_name") if name == display_name: tags = entry.__getattribute__("freeform_tags") attachment_id = tags['attachment_id'] break return attachment_id def status(self, name): """ This function get volume status, such as "in-use", "available" :param name: Volume name :return: Volume_status """ try: block_storage = oci.core.BlockstorageClient(self.config) v = block_storage.list_volumes(self.config['compartment_id']) volumes = v.data result = [] entry = None for entry in volumes: display_name = entry.__getattribute__("display_name") if name == display_name: break result.append(entry) result = self.update_dict(result) except Exception as e: Console.error("Problem finding status", traceflag=True) print(e) raise RuntimeError return result def list(self, **kwargs): """ This function list all volumes as following: If NAME (volume_name) is specified, it will print out info of NAME If NAME (volume_name) is not specified, it will print out info of all volumes :param kwargs: contains name of volume :return: Dictionary of volumes """ try: if kwargs and kwargs['refresh'] is False: result = self.cm.find(cloud=self.cloud, kind='volume') for key in kwargs: if key == 'NAME' and kwargs['NAME']: result = self.cm.find_name(name=kwargs['NAME']) elif key == 'NAMES' and kwargs['NAMES']: result = self.cm.find_names(names=kwargs['NAMES']) else: block_storage = oci.core.BlockstorageClient(self.config) if kwargs and kwargs['NAME']: v = block_storage.list_volumes( self.config['compartment_id']) results = v.data entry = None for entry in results: display_name = entry.__getattribute__("display_name") if kwargs["NAME"] == display_name: break result = [entry] result = self.update_dict(result) else: v = block_storage.list_volumes( self.config['compartment_id']) results = v.data result = self.update_dict(results) except Exception as e: Console.error("Problem listing volume", traceflag=True) print(e) raise RuntimeError return result def create(self, **kwargs): """ This function creates a new volume with default size of 50gb. Default parameters are read from self.config. :param kwargs: Contains Volume name :return: Volume dictionary """ try: arguments = dotdict(kwargs) block_storage = oci.core.BlockstorageClient(self.config) result = block_storage.create_volume( oci.core.models.CreateVolumeDetails( compartment_id=self.config['compartment_id'], availability_domain=self.config['availability_domain'], display_name=arguments.NAME )) # wait for availability of volume oci.wait_until( block_storage, block_storage.get_volume(result.data.id), 'lifecycle_state', 'AVAILABLE' ).data v = block_storage.list_volumes(self.config['compartment_id']) results = v.data result = self.update_dict(results) except Exception as e: Console.error("Problem creating volume", traceflag=True) print(e) raise RuntimeError return result def attach(self, names=None, vm=None): """ This function attaches a given volume to a given instance :param names: Names of Volumes :param vm: Instance name :return: Dictionary of volumes """ try: compute_client = oci.core.ComputeClient(self.config) # get instance id from VM name i = compute_client.list_instances(self.config['compartment_id']) instances = i.data instance_id = None for entry in instances: display_name = entry.__getattribute__("display_name") if vm == display_name: instance_id = entry.__getattribute__("id") break # get volumeId from Volume name block_storage = oci.core.BlockstorageClient(self.config) volume_id = self.get_volume_id_from_name(block_storage, names[0]) # attach volume to vm a = compute_client.attach_volume( oci.core.models.AttachIScsiVolumeDetails( display_name='IscsiVolAttachment', instance_id=instance_id, volume_id=volume_id ) ) # tag volume with attachment id. This needed during detach. block_storage.update_volume( volume_id, oci.core.models.UpdateVolumeDetails( freeform_tags={'attachment_id': a.data.id}, )) # wait until attached oci.wait_until( compute_client, compute_client.get_volume_attachment( a.data.id), 'lifecycle_state', 'ATTACHED' ) # return result after attach v = block_storage.list_volumes(self.config['compartment_id']) results = v.data results = self.update_dict(results) except Exception as e: Console.error("Problem attaching volume", traceflag=True) print(e) raise RuntimeError return results def detach(self, name=None): """ This function detaches a given volume from an instance :param name: Volume name :return: Dictionary of volumes """ try: compute_client = oci.core.ComputeClient(self.config) block_storage = oci.core.BlockstorageClient(self.config) attachment_id = self.get_attachment_id_from_name(block_storage, name) compute_client.detach_volume(attachment_id) # wait for detachment oci.wait_until( compute_client, compute_client.get_volume_attachment(attachment_id), 'lifecycle_state', 'DETACHED' ) # return result after detach v = block_storage.list_volumes(self.config['compartment_id']) results = v.data results = self.update_dict(results) except Exception as e: Console.error("Problem detaching volume", traceflag=True) print(e) raise RuntimeError return results[0] def delete(self, name=None): """ This function delete one volume. :param name: Volume name :return: Dictionary of volumes """ try: block_storage = oci.core.BlockstorageClient(self.config) volume_id = self.get_volume_id_from_name(block_storage, name) if volume_id is not None: block_storage.delete_volume(volume_id=volume_id) # wait for termination oci.wait_until( block_storage, block_storage.get_volume(volume_id), 'lifecycle_state', 'TERMINATED' ).data v = block_storage.list_volumes(self.config['compartment_id']) results = v.data result = self.update_dict(results) except Exception as e: Console.error("Problem deleting volume", traceflag=True) print(e) raise RuntimeError return result def add_tag(self, **kwargs): """ This function add tag to a volume. :param kwargs: NAME: name of volume key: name of tag value: value of tag :return: Dictionary of volume """ try: name = kwargs['NAME'] key = kwargs['key'] value = kwargs['value'] block_storage = oci.core.BlockstorageClient(self.config) volume_id = self.get_volume_id_from_name(block_storage, name) block_storage.update_volume( volume_id, oci.core.models.UpdateVolumeDetails( freeform_tags={key: value}, ) ) result = self.list(NAME=name, refresh=True)[0] except Exception as e: Console.error("Problem adding tag", traceflag=True) print(e) raise RuntimeError return result def migrate(self, name=None, fvm=None, tvm=None, fregion=None, tregion=None, fservice=None, tservice=None, fcloud=None, tcloud=None, cloud=None, region=None, service=None): """ Migrate volume from one vm to another vm. :param name: name of volume :param fvm: name of vm where volume will be moved from :param tvm: name of vm where volume will be moved to :param fregion: the region where the volume will be moved from :param tregion: region where the volume will be moved to :param fservice: the service where the volume will be moved from :param tservice: the service where the volume will be moved to :param fcloud: the provider where the volume will be moved from :param tcloud: the provider where the volume will be moved to :param cloud: the provider where the volume will be moved within :param region: the region where the volume will be moved within :param service: the service where the volume will be moved within :return: dict """ raise NotImplementedError def sync(self, volume_id=None, zone=None, cloud=None): """ sync contents of one volume to another volume :param volume_id: id of volume A :param zone: zone where new volume will be created :param cloud: the provider where volumes will be hosted :return: str """ raise NotImplementedError
def do_ip(self, args, arguments): """ :: Usage: ip list [--cloud=CLOUD] [--output=OUTPUT] ip create [N] [--cloud=CLOUD] ip delete [IP] [--cloud=CLOUD] ip attach [NAME] [IP] ip detach [NAME] [IP] Options: -h help message --cloud=CLOUD Name of the cloud --output=OUTPUT The output format [default: table] Arguments: N Number of IPS to create IP IP Address NAME Name of the service Description: ip list floating [--cloud=CLOUD] [--output=OUTPUT] returns a list of all the floating IPS in the cloud ip add floating [--cloud=CLOUD] adds a floating ip to the pool of available floating ips ip delete floating [IP] [--cloud=CLOUD] deletes a floating ip to the pool of available floating ips ip add NAME [IP] add the ip to the named vm ip delete NAME [IP] deletes the ip from the vm """ def get_ip(ip): if ip is None: # find a free one try: ip = provider.find_available_public_ip() return ip except Exception as e: Console.error("No free floating ip found") return "" map_parameters(arguments, "cloud", "output") arguments.vm = arguments.NAME variables = Variables() if arguments.list: cloud = Parameter.find("cloud", arguments, variables) print(f"cloud {cloud}") provider = Provider(name=cloud) ips = provider.list_public_ips() provider.Print(ips, output=arguments.output, kind="ip") elif arguments.create: cloud = Parameter.find("cloud", arguments, variables) n = arguments.N or 1 print(f"cloud {cloud}") provider = Provider(name=cloud) for i in range(0, int(n)): ips = provider.create_public_ip() ips = provider.list_public_ips() provider.Print(ips, output=arguments.output, kind="ip") elif arguments.delete: cloud = Parameter.find("cloud", arguments, variables) print(f"cloud {cloud}") provider = Provider(name=cloud) ip = arguments.IP ip = get_ip(arguments.IP) ips = provider.delete_public_ip(ip) ips = provider.list_public_ips() provider.Print(ips, output=arguments.output, kind="ip") elif arguments.attach: name = Parameter.find("vm", arguments, variables) cm = CmDatabase() vm = cm.find_name(name, kind="vm")[0] cloud = vm["cm"]["cloud"] print(f"cloud {cloud}") provider = Provider(name=cloud) ip = get_ip(arguments.IP) try: ips = provider.attach_public_ip(name=name, ip=ip) except Exception as e: print(e) Console.error("Could not assign public ip.") elif arguments.detach: name = Parameter.find("vm", arguments, variables) cm = CmDatabase() vm = cm.find_name(name, kind="vm")[0] cloud = vm["cm"]["cloud"] print(f"cloud {cloud}") provider = Provider(name=cloud) ip = provider.get_public_ip(name=name) print(name, ip) try: ips = provider.detach_public_ip(name=name, ip=ip) except Exception as e: print(e) Console.error("can not detach ip")
def do_ssh(self, args, arguments): """ :: Usage: ssh ssh config list [--output=OUTPUT] ssh config add NAME IP [USER] [KEY] ssh config delete NAME ssh host delete NAME ssh host add NAME ssh [--name=VMs] [--user=USERs] [COMMAND] Arguments: NAME Name or ip of the machine to log in list Lists the machines that are registered and the commands to login to them PARAMETERS Register te resource and add the given parameters to the ssh config file. if the resource exists, it will be overwritten. The information will be written in /.ssh/config Options: -v verbose mode --output=OUTPUT the format in which this list is given formats includes cat, table, json, yaml, dict. If cat is used, it is just printed as is. [default: table] --user=USERs overwrites the username that is specified in ~/.ssh/config --name=CMs the names of the VMS to execute the command on Description: ssh config list lists the hostsnames that are present in the ~/.ssh/config file ssh config add NAME IP [USER] [KEY] registers a host i ~/.ssh/config file Parameters are attribute=value pairs Note: Note yet implemented ssh [--name=VMs] [--user=USERs] [COMMAND] executes the command on the named hosts. If user is specified and is greater than 1, it must be specified for each vm. If only one username is specified it is used for all vms. However, as the user is typically specified in the cloudmesh database, you probably do not have to specify it as it is automatically found. Examples: ssh config add blue 192.168.1.245 blue Adds the following to the !/.ssh/config file Host blue HostName 192.168.1.245 User blue IdentityFile ~/.ssh/id_rsa.pub """ map_parameters(arguments, "name", "user", "output") if arguments.config and arguments.list: # ssh config list [--output=OUTPUT]" hosts = dict(ssh_config().hosts) print( Printer.dict_table( hosts, order=['host', 'HostName', 'User', 'IdentityFile'])) elif arguments.config and arguments.add: # ssh config add NAME IP [USER] [KEY] variables = Variables() user = Parameter.find("user", arguments, variables.dict()) key = Parameter.find("key", arguments, variables.dict(), {"key": "~/.ssh/id_rsa.pub"}) name = arguments.NAME or variables['vm'] ip = arguments.IP hosts = ssh_config() if name in hosts.hosts: Console.error("Host already in ~/.ssh/config") return "" hosts.generate(host=name, hostname=ip, identity=key, user=user) elif arguments.config and arguments.delete: # ssh config delete NAME raise NotImplementedError elif arguments.config and arguments.add: # ssh host add NAME location = path_expand("~/.ssh/known_hosts") name = arguments.NAME os.system("ssh-keygen -R {name}") os.system(f"ssh-keyscan -H {name} >> {location}") elif arguments.config and arguments.delete: # ssh host delete NAME name = arguments.NAME os.system("ssh-keygen -R {name}") elif (arguments.name and arguments.COMMAND) or arguments.COMMAND: # ssh [--name=VMs] [--user=USERs] [COMMAND]" variables = Variables() if arguments.name is None: name = arguments.NAME or variables['vm'] names = [name] else: names = Parameter.expand(arguments.name) users = Parameter.expand(arguments.users) command = arguments.COMMAND if command is None and len(names) > 1: raise ValueError("For interactive shells the number of vms " "must be 1") elif command is None and len(names) == 1: # find the cloud cm = CmDatabase() vm = cm.find_name(names[0], kind="vm")[0] cloud = vm['cm']['cloud'] # update the cloud provider = Provider(name=cloud) # update the vm provider.list() vm = cm.find_name(names[0], kind="vm")[0] # run ssh result = provider.ssh(vm=vm, command=command) print(result) return "" if len(names) > 1 and len(users) == 1: users = [users] * len(names) if len(names) > 1 and len(users) > 1 and len(names) != len(users): raise ValueError("vms and users have different length") for name in names: cm = CmDatabase() try: vm = cm.find_name(name, kind="vm")[0] except IndexError: Console.error( "VM not found, make sure the vm exists in the list below: " ) os.system('cms vm list') return cloud = vm['cm']['cloud'] provider = Provider(name=cloud) result = provider.ssh(vm=vm, command=command) print(result) else: # ssh with no argument last_vm = Variables()['vm'] cm = CmDatabase() vm = cm.find_name(last_vm, kind="vm")[0] cloud = vm['cm']['cloud'] provider = Provider(name=cloud) provider.ssh(vm=vm)
def do_vm(self, args, arguments): """ :: Usage: vm ping [NAMES] [--cloud=CLOUDS] [--count=N] vm check [NAMES] [--cloud=CLOUDS] [--username=USERNAME] vm status [NAMES] [--cloud=CLOUDS] [--output=OUTPUT] vm console [NAME] [--force] vm log [NAME] [--force] vm stop [NAMES] [--dryrun] vm start [NAMES] [--dryrun] vm terminate [NAMES] [--cloud=CLOUD] [--dryrun] vm delete [NAMES] [--cloud=CLOUD] [--dryrun] vm refresh [--cloud=CLOUDS] vm list [NAMES] [--cloud=CLOUDS] [--output=OUTPUT] [--refresh] vm boot [--n=COUNT] [--name=VMNAMES] [--cloud=CLOUD] [--username=USERNAME] [--image=IMAGE] [--flavor=FLAVOR] [--network=NETWORK] [--public] [--secgroup=SECGROUPs] [--group=GROUPs] [--key=KEY] [--dryrun] [-v] vm meta list [NAME] vm meta set [NAME] KEY=VALUE... vm meta delete [NAME] KEY... vm script [--name=NAMES] [--username=USERNAME] [--key=KEY] [--dryrun] [--dir=DESTINATION] SCRIPT vm ip assign [NAMES] [--cloud=CLOUD] vm ip show [NAMES] [--group=GROUP] [--cloud=CLOUD] [--output=OUTPUT] [--refresh] vm ip inventory [NAMES] vm ssh [NAMES] [--username=USER] [--quiet] [--ip=IP] [--key=KEY] [--command=COMMAND] vm put SOURCE DESTINATION [NAMES] vm get SOURCE DESTINATION [NAMES] vm rename [OLDNAMES] [NEWNAMES] [--force] [--dryrun] vm wait [--cloud=CLOUD] [--interval=INTERVAL] [--timeout=TIMEOUT] vm info [--cloud=CLOUD] [--output=OUTPUT] vm username USERNAME [NAMES] [--cloud=CLOUD] vm resize [NAMES] [--size=SIZE] Arguments: OUTPUT the output format COMMAND positional arguments, the commands you want to execute on the server(e.g. ls -a) separated by ';', you will get a return of executing result instead of login to the server, note that type in -- is suggested before you input the commands NAME server name. By default it is set to the name of last vm from database. NAMES server name. By default it is set to the name of last vm from database. KEYPAIR_NAME Name of the vm keypair to be used to create VM. Note this is not a path to key. NEWNAMES New names of the VM while renaming. OLDNAMES Old names of the VM while renaming. Options: -v verbose, prints the dict at the end --output=OUTPUT the output format -H --modify-knownhosts Do not modify ~/.ssh/known_hosts file when ssh'ing into a machine --username=USERNAME the username to login into the vm. If not specified it will be guessed from the image name and the cloud --ip=IP give the public ip of the server --cloud=CLOUD give a cloud to work on, if not given, selected or default cloud will be used --count=COUNT give the number of servers to start --detail for table, a brief version is used as default, use this flag to print detailed table --flavor=FLAVOR give the name or id of the flavor --group=GROUP give the group name of server --secgroup=SECGROUP security group name for the server --image=IMAGE give the name or id of the image --key=KEY specify a key to use, input a string which is the full path to the private key file --keypair_name=KEYPAIR_NAME Name of the vm keypair to be used to create VM. Note this is not a path to key. --user=USER give the user name of the server that you want to use to login --name=NAME give the name of the virtual machine --force rename/ delete vms without user's confirmation --command=COMMAND specify the commands to be executed Description: commands used to boot, start or delete servers of a cloud vm default [options...] Displays default parameters that are set for vm boot either on the default cloud or the specified cloud. vm boot [options...] Boots servers on a cloud, user may specify flavor, image .etc, otherwise default values will be used, see how to set default values of a cloud: cloud help vm start [options...] Starts a suspended or stopped vm instance. vm stop [options...] Stops a vm instance . vm delete [options...] Delete servers of a cloud, user may delete a server by its name or id, delete servers of a group or servers of a cloud, give prefix and/or range to find servers by their names. Or user may specify more options to narrow the search vm floating_ip_assign [options...] assign a public ip to a VM of a cloud vm ip show [options...] show the ips of VMs vm ssh [options...] login to a server or execute commands on it vm list [options...] same as command "list vm", please refer to it vm status [options...] Retrieves status of last VM booted on cloud and displays it. vm refresh [--cloud=CLOUDS] this command refreshes the data for virtual machines, images and flavors for the specified clouds. vm ping [NAMES] [--cloud=CLOUDS] [--count=N] [--processors=PROCESSORS] pings the specified virtual machines, while using at most N pings. The ping is executed in parallel. If names are specifies the ping is restricted to the given names in parameter format. If clouds are specified, names that are not in these clouds are ignored. If the name is set in the variables this name is used. cms vm ssh --command=\"uname -a\" executes the uname command on the last booted vm vm script [--name=NAMES] [--username=USERNAME] [--key=KEY] [--dryrun] [--dir=DESTINATION] [--shell=SHELL] SCRIPT The script command copies a shell script to the specified vms into the DESTINATION directory and than execute it. With SHELL you can set the shell for executing the command, this coudl even be a python interpreter. Examples for SHELL are /bin/sh, /usr/bin/env python vm put SOURCE DESTINATION [NAMES] puts the file defined by SOURCE into the DESINATION folder on the specified machines. If the file exists it is overwritten, so be careful. vm get SOURCE DESTINATION [NAMES] gets the file defined by SOURCE into the DESINATION folder on the specified machines. The SOURCE is on the remote machine. If one machine is specified, the SOURCE is the same name as on the remote machine. If multiple machines are specified, the name of the machine will be a prefix to the filename. If the filenames exists, they will be overwritten, so be careful. Tip: give the VM name, but in a hostlist style, which is very convenient when you need a range of VMs e.g. sample[1-3] => ['sample1', 'sample2', 'sample3'] sample[1-3,18] => ['sample1', 'sample2', 'sample3', 'sample18'] Quoting commands: cm vm login gregor-004 --command=\"uname -a\" Limitations: Azure: rename is not supported """ map_parameters(arguments, 'active', 'cloud', 'command', 'dryrun', 'flavor', 'force', 'group' 'output', 'group', 'image', 'interval', 'timeout', 'ip', 'key', 'modify-knownhosts', 'n', 'name', 'public', 'quiet', 'secgroup', 'size', 'username', 'output', 'count', 'network', 'refresh') variables = Variables() database = CmDatabase() arguments.output = Parameter.find("output", arguments, variables, "table") arguments.refresh = Parameter.find_bool("refresh", arguments, variables) if (arguments.meta and arguments.list): name = arguments.NAME if arguments.NAME is None: name = variables['vm'] if name is None: Console.error("No vm specified") cloud = "chameleon" # cloud = Parameter.find(arguments, variables) print(f"vm metadata for {name} on {cloud}") provider = Provider(name=cloud) r = provider.get_server_metadata(name) print(r) elif arguments.meta and arguments.set: metadata = {} pairs = arguments['KEY=VALUE'] for pair in pairs: key, value = pair.split("=", 1) metadata[key] = value name = arguments.NAME if arguments.NAME is None: name = variables['vm'] if name is None: Console.error("No vm specified") cloud = "chameleon" # cloud = Parameter.find(arguments, variables) print(f"cloud {cloud} {name}") provider = Provider(name=cloud) provider.set_server_metadata(name, **metadata) r = provider.get_server_metadata(name) pprint(r) elif arguments.meta and arguments.delete: metadata = {} keys = arguments['KEY'] name = arguments.NAME if arguments.NAME is None: name = variables['vm'] if name is None: Console.error("No vm specified") cloud = "chameleon" # cloud = Parameter.find(arguments, variables) print(f"cloud {cloud} {name}") provider = Provider(name=cloud) for key in keys: provider.delete_server_metadata(name, key) r = provider.get_server_metadata(name) pprint(r) elif arguments.list and arguments.refresh: names = [] clouds, names = Arguments.get_cloud_and_names( "list", arguments, variables) for cloud in clouds: print(f"cloud {cloud}") provider = Provider(name=cloud) vms = provider.list() provider.Print(vms, output=arguments.output, kind="vm") return "" elif arguments.list: names = [] clouds, names = Arguments.get_cloud_and_names( "list", arguments, variables) try: for cloud in clouds: print(f"List {cloud}") p = Provider(cloud) kind = p.kind collection = "{cloud}-vm".format(cloud=cloud, kind=p.kind) db = CmDatabase() vms = db.find(collection=collection) p.Print(vms, output=arguments.output, kind="vm") except Exception as e: Console.error("Error in listing ", traceflag=True) VERBOSE(e) return "" elif arguments.ping: """ vm ping [NAMES] [--cloud=CLOUDS] [--count=N] """ if arguments.NAMES: variables['vm'] = arguments.NAMES if arguments['--cloud']: variables['cloud'] = arguments['--cloud'] clouds, names = Arguments.get_cloud_and_names( "status", arguments, variables) count = arguments.count if arguments.count: count = int(count) else: count = 1 def get_ips(): ips = [] for cloud in clouds: params = {} # gets public ips from database cursor = database.db[f'{cloud}-vm'] for name in names: for node in cursor.find({'name': name}): ips.append(node['ip_public']) ips = list(set(ips)) pprint(ips) return ips ips = get_ips() if len(ips) == 0: Console.warning("no public ip found.") for cloud in clouds: print(f"refresh for cloud {cloud}") provider = Provider(name=cloud) vms = provider.list() ips = get_ips() if len(ips) == 0: Console.error("No vms with public IPS found.") Console.error(" Make sure to use cms vm list --refresh") for ip in ips: result = Shell.ping(host=ip, count=count) banner(ip) print(result) print() elif arguments.check: raise NotImplementedError """ vm check [NAMES] [--cloud=CLOUDS] [--username=USERNAME] """ """ THIS IS ALL WRONG AS PROVIDER DEPENDENT !!! if arguments.NAMES: variables['vm'] = arguments.NAMES if arguments['--cloud']: variables['cloud'] = arguments['--cloud'] clouds, names = Arguments.get_cloud_and_names("status", arguments, variables) for cloud in clouds: provider = Provider(cloud) params = {} params['key'] = \ provider.p.spec["credentials"]['EC2_PRIVATE_KEY_FILE_PATH'] + \ provider.p.spec["credentials"]['EC2_PRIVATE_KEY_FILE_NAME'] params['username'] = arguments['--username'] # or get from db processors = arguments['--processors'] if processors: params['processors'] = int(processors[0]) # gets public ips from database public_ips = [] cursor = database.db['{cloud}-vm'] for name in names: for node in cursor.find({'name': name}): public_ips.append(node['public_ips']) public_ips = [y for x in public_ips for y in x] Host.check(hosts=public_ips, **params) """ elif arguments.status: if arguments.NAMES: variables['vm'] = arguments.NAMES if arguments['--cloud']: variables['cloud'] = arguments['--cloud'] clouds, names = Arguments.get_cloud_and_names( "status", arguments, variables) # gets status from database for cloud in clouds: provider = Provider(cloud) status = [] cursor = database.db[f'{cloud}-vm'] print(cloud) for name in names: for node in cursor.find({'name': name}): status.append(node) provider.Print(status, output=arguments.output, kind="status") return "" elif arguments.start: # TODO: not tested if arguments.NAMES: names = variables['vm'] = arguments.NAMES if arguments['--cloud']: variables['cloud'] = arguments['--cloud'] clouds, names = Arguments.get_cloud_and_names( "stop", arguments, variables) cloud = clouds[0] print(cloud) print(names) for name in names: provider = Provider(cloud) if arguments['--dryrun']: print(f"start node {name}") else: vms = provider.start(name=name, cloud=cloud) provider.Print(vms, output=arguments.output, kind="vm") return "" elif arguments.stop: # TODO: not tested if arguments.NAMES: variables['vm'] = arguments.NAMES if arguments['--cloud']: variables['cloud'] = arguments['--cloud'] clouds, names = Arguments.get_cloud_and_names( "stop", arguments, variables) for cloud in clouds: params = {} provider = Provider(cloud) if arguments['--dryrun']: Console.ok(f"Dryrun stop: " f" {cloud}\n" f" {names}" f" {provider}") else: for name in names: vms = provider.stop(name) provider.Print(vms, output=arguments.output, kind="vm") elif arguments.terminate: # TODO: not tested if arguments.NAMES: variables['vm'] = arguments.NAMES if arguments['--cloud']: variables['cloud'] = arguments['--cloud'] clouds, names = Arguments.get_cloud_and_names( "stop", arguments, variables) for cloud in clouds: params = {} provider = Provider(cloud) if arguments['--dryrun']: Console.ok(f"Dryrun terminate: " f" {cloud}\n" f" {names}" f" {provider}") else: for name in names: vms = provider.destroy(name) provider.Print(vms, output=arguments.output, kind="vm") elif arguments.delete: if arguments.NAMES: variables['vm'] = arguments.NAMES if arguments['--cloud']: variables['cloud'] = arguments['--cloud'] clouds, names = Arguments.get_cloud_and_names( "stop", arguments, variables) if names is not None: pass elif clouds is not None: for cloud in clouds: provider = Provider(cloud) vms = provider.list() for vm in vms: r = provider.destroy(name=vm) return "" else: return "" for cloud in clouds: provider = Provider(cloud) vms = provider.list() for vm in vms: name = vm["cm"]["name"] if name in names: r = provider.destroy(name=name) # TODO: username, secgroup elif arguments.boot: # not everything works """ vm boot [--name=VMNAMES] [--cloud=CLOUD] [--username=USERNAME] [--image=IMAGE] [--flavor=FLAVOR] [--network=NETWORK] [--public] [--secgroup=SECGROUP] [--key=KEY] [--group=GROUP] [--dryrun] """ # for name in names: # node = p.create(name=name, size=flavor, image=image) # VERBOSE(arguments) parameters = dotdict() names = Parameter.expand(arguments.name) cloud = Parameter.find("cloud", arguments, variables.dict()) defaults = Config()[f"cloudmesh.cloud.{cloud}.default"] groups = Parameter.find("group", arguments, variables.dict(), {"group": "default"}) parameters = dotdict() # parameters.names = arguments.name parameters.group = groups for attribute in [ "image", "username", "flavor", "key", "network", "secgroup" ]: parameters[attribute] = Parameter.find(attribute, arguments, variables.dict(), defaults) if arguments.username is None: parameters.user = Image.guess_username(parameters.image) provider = Provider(name=cloud) parameters.secgroup = arguments.secgroup or "default" # # determine names # if names and arguments.n and len(names) > 1: Console.error( f"When using --n={arguments.n}, you can only specify one name" ) return "" # cases # # only name --name = "a[1,2]" # name and count # --name="a" --n=3, names must be of length 1 # only count --n=2 names are read form var # nothing, just use one vm # determin names _names = [] if not names: if not arguments.n: count = 1 else: count = int(arguments.n) for i in range(0, count): if names is None: n = Name() n.incr() name = str(n) else: n = names[i] name = str(n) _names.append(name) names = _names elif len(names) == 1 and arguments.n: name = names[0] for i in range(0, int(arguments.n)): _names.append(f"{name}-{i}") names = _names # pprint(parameters) for name in names: parameters.name = name if arguments['--dryrun']: banner("boot") pprint(parameters) Console.ok(f"Dryrun boot {name}: \n" f" cloud={cloud}\n" f" names={names}\n" f" provider={provider}") print() for attribute in parameters: value = parameters[attribute] Console.ok(f" {attribute}={value}") else: # parameters.progress = len(parameters.names) < 2 try: vms = provider.create(**parameters) except TimeoutError: Console.error( f"Timeout during vm creation. There may be a problem with the cloud {cloud}" ) except Exception as e: Console.error("create problem", traceflag=True) print(e) return "" variables['vm'] = str(n) if arguments["-v"]: banner("Details") pprint(vms) # provider.Print(arguments.output, "vm", vms) elif arguments.info: """ vm info [--cloud=CLOUD] [--output=OUTPUT] """ print("info for the vm") cloud, names = Arguments.get_cloud_and_names( "info", arguments, variables) raise NotImplementedError elif arguments.rename: raise NotImplementedError # Not tested print("rename the vm") v = Variables() cloud = v["cloud"] p = Provider(cloud) try: oldnames = Parameter.expand(arguments["OLDNAMES"]) newnames = Parameter.expand(arguments["NEWNAMES"]) force = arguments["--force"] if oldnames is None or newnames is None: Console.error("Wrong VMs specified for rename", traceflag=False) elif len(oldnames) != len(newnames): Console.error("The number of VMs to be renamed is wrong", traceflag=False) else: print(oldnames) print(newnames) for i in range(0, len(oldnames)): oldname = oldnames[i] newname = newnames[i] if arguments["--dryrun"]: Console.ok("Rename {} to {}".format( oldname, newname)) else: print(f"rename {oldname} -> {newname}") p.rename(source=oldname, destination=newname) msg = "info. OK." Console.ok(msg) except Exception as e: Error.traceback(e) Console.error("Problem renaming instances", traceflag=True) elif arguments["ip"] and arguments["show"]: raise NotImplementedError print("show the ips") """ vm ip show [NAMES] [--group=GROUP] [--cloud=CLOUD] [--output=OUTPUT] [--refresh] """ elif arguments["ip"] and arguments["assign"]: raise NotImplementedError """ vm ip assign [NAMES] [--cloud=CLOUD] """ print("assign the public ip") elif arguments["ip"] and arguments["inventory"]: raise NotImplementedError """ vm ip inventory [NAMES] """ print("list ips that could be assigned") elif arguments.default: raise NotImplementedError print("sets defaults for the vm") elif arguments.script: raise NotImplementedError clouds, names = Arguments.get_cloud_and_names( "run", arguments, variables) username = arguments['--username'] script = arguments.SCRIPT for cloud in clouds: provider = Provider(cloud) name_ips = {} cursor = database.db['{}-node'.format(cloud)] for name in names: for node in cursor.find({'name': name}): name_ips[name] = node['public_ips'] if arguments['--dryrun']: print("run script {} on vms: {}".format(script, names)) else: provider.ssh(name_ips, username=username, script=script) elif arguments.username: raise NotImplementedError """ vm username USERNAME [NAMES] [--cloud=CLOUD] """ print("sets the username for the vm") elif arguments.resize: raise NotImplementedError """ vm resize [NAMES] [--size=SIZE] """ pass elif arguments.ssh: """ vm ssh [NAMES] [--username=USER] [--quiet] [--ip=IP] [--key=KEY] [--command=COMMAND] """ # VERBOSE(arguments) clouds, names, command = Arguments.get_commands( "ssh", arguments, variables) # print (clouds) # print(names) # print (command) if arguments.command is None and len(names) > 1: Console.error("Interactive shell can only be done on one vm") return "" elif arguments.command is None and len(names) == 1: name = names[0] cloud = clouds[0] cm = CmDatabase() try: vm = cm.find_name(name, "vm")[0] except IndexError: Console.error(f"could not find vm {name}") return "" # VERBOSE(vm) cloud = vm["cm"]["cloud"] provider = Provider(name=cloud) try: provider.ssh(vm=vm) except KeyError: vms = provider.list() provider.Print(vms, output=arguments.output, kind="vm") provider.ssh(vm=vm) return "" else: # command on all vms if clouds is None or names is None or command is None: return "" else: for cloud in clouds: p = Provider(cloud) for name in names: cm = CmDatabase() try: vm = cm.find_name(name, "vm")[0] except IndexError: Console.error(f"could not find vm {name}") continue r = p.ssh(vm=vm, command=command) print(r) return "" elif arguments.console: # why is this not vm clouds, names, command = Arguments.get_commands( "ssh", arguments, variables) print(clouds) print(names) print(command) for cloud in clouds: p = Provider(cloud) for name in names: cm = CmDatabase() try: vm = cm.find_name(name, "vm")[0] except IndexError: Console.error(f"could not find vm {name}") continue r = p.console(vm=vm) print(r) return "" elif arguments.log: # why is this not vm clouds, names, command = Arguments.get_commands( "ssh", arguments, variables) print(clouds) print(names) print(command) for cloud in clouds: p = Provider(cloud) for name in names: cm = CmDatabase() try: vm = cm.find_name(name, "vm")[0] except IndexError: Console.error(f"could not find vm {name}") continue r = p.log(vm=vm) print(r) return "" elif arguments.wait: """ vm wait [--cloud=CLOUD] [--interval=INTERVAL] [--timeout=TIMEOUT] """ # why is this not vm clouds, names, command = Arguments.get_commands( "ssh", arguments, variables) # print (clouds) # print (names) # print (command) for cloud in clouds: p = Provider(cloud) for name in names: cm = CmDatabase() try: vm = cm.find_name(name, "vm")[0] except IndexError: Console.error(f"could not find vm {name}") continue r = p.wait(vm=vm, interval=arguments.interval, timeout=arguments.timeout) if r: Console.ok("Instance available for SSH") else: Console.error( f"Instance unavailable after timeout of {arguments.timeout}" ) # print(r) return "" elif arguments.put: """ vm put SOURCE DESTINATION """ clouds, names, command = Arguments.get_commands( "ssh", arguments, variables) key = variables['key'] source = arguments['SOURCE'] destination = arguments['DESTINATION'] for cloud in clouds: p = Provider(name=cloud) cm = CmDatabase() for name in names: try: vms = cm.find_name(name, "vm") except IndexError: Console.error(f"could not find vm {name}") return "" # VERBOSE(vm) for vm in vms: try: ip = vm['public_ips'] except: try: ip = p.get_public_ip(name=name) except: Console.error( f"could not find a public ip for vm {name}", traceflag=True) return Console.error( f"could not find a public ip for vm {name}", traceflag=True) return # get the username try: # username not in vm...guessing imagename = list( cm.collection(cloud + '-image').find( {'ImageId': vm['ImageId']}))[0]['name'] print(imagename) user = Image.guess_username(image=imagename, cloud=cloud) except: try: user = vm['os_profile']['admin_username'] except: Console.error( f"could not find a valid username for " f"{name}, try refreshing the image list", traceflag=True) return Console.error( f"could not find a valid username for {name}, try refreshing the image list" ) return cmd = f'scp -i {key} {source} {user}@{ip}:{destination}' print(cmd) os.system(cmd) return ""
class Provider(VolumeABC): kind = "google" sample = """ cloudmesh: volume: {name}: cm: active: true heading: {name} host: cloud.google.com label: {name} kind: google version: v1 service: volume default: zone: us-central1-a type: projects/{project_id}/zones/{zone}/diskTypes/pd-standard sizeGb: 10 credentials: project_id: {project_id} path_to_service_account_json: ~/.cloudmesh/service_account.json """ output = { "volume": { "sort_keys": ["cm.name"], "order": ["cm.name", "cm.kind", "cm.cloud", "status", "sizeGb", "type", "creationTimestamp", "zone", "users", "description", "labels"], "header": ["Name", "Kind", "Cloud", "Status", "Size", "Type", "Created", "Zone", "Attached to VMs", "Description", "Tags"] } } def __init__(self, name): """ Get Google Cloud credentials and defaults from cloudmesh.yaml and set scopes for Google Compute Engine :param name: name of cloud provider in cloudmesh.yaml file under cloudmesh.volume """ self.cloud = name config = Config() self.cm = CmDatabase() self.default = config[f"cloudmesh.volume.{name}.default"] self.credentials = config[f"cloudmesh.volume.{name}.credentials"] self.compute_scopes = [ 'https://www.googleapis.com/auth/compute', 'https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/compute.readonly'] def _wait(self, time=None): """ This function waiting for volume to be updated :param time: time to wait in seconds """ sleep(time) def update_dict(self, elements): """ This function adds a cloudmesh cm dict to each dict in the list elements. Typically this method is used internally. :param elements: the list of original dicts. If elements is a single dict a list with a single element is returned. :return: The list with the modified dicts """ if elements is None: return None elif type(elements) == list: _elements = elements else: _elements = [elements] d = [] for entry in _elements: if '/' in entry['type']: entry['type'] = entry['type'].rsplit('/', 1)[1] if '/' in entry['zone']: entry['zone'] = entry['zone'].rsplit('/', 1)[1] if 'targetLink' in entry: name = entry['targetLink'].rsplit('/', 1)[1] else: name = entry['name'] if 'users' in entry: _users = [] for user in entry['users']: _users.append(user) for item in _users: if '/' in item: _users = [] remove_user_url = user.rsplit('/', 1)[1] _users.append(remove_user_url) entry['users'] = _users _labels = [] if 'labels' in entry: for label in entry['labels']: _labels.append(label) entry['labels'] = _labels if "cm" not in entry: entry['cm'] = {} entry["cm"].update({ "kind": 'volume', "cloud": self.cloud, "name": name, "status": entry['status'] }) d.append(entry) return d def _get_credentials(self, path_to_service_account_file, scopes): """ Method to get the credentials using the Service Account JSON file. :param path_to_service_account_file: Service Account JSON File path. :param scopes: Scopes needed to provision. :return: credentials used to get compute service """ _credentials = service_account.Credentials.from_service_account_file( filename=path_to_service_account_file, scopes=scopes) return _credentials def _get_compute_service(self): """ Method to get google compute service v1. :return: Google Compute Engine API """ service_account_credentials = self._get_credentials( self.credentials['path_to_service_account_json'], self.compute_scopes) # Authenticate using service account. if service_account_credentials is None: print('Credentials are required') raise ValueError('Cannot Authenticate without Credentials') else: compute_service = build('compute', 'v1', credentials=service_account_credentials) return compute_service def _get_disk(self, zone, disk): """ Get the specified persistent disk from the cloud :param zone: name of the zone in which the disk is located :param disk: name of the disk :return: a dict representing the disk """ compute_service = self._get_compute_service() disk = compute_service.disks().get( project=self.credentials["project_id"], zone=zone, disk=disk).execute() return disk def _list_instances(self, instance=None): """ Gets a list of available VM instances :return: list of dicts representing VM instances """ compute_service = self._get_compute_service() instance_list = compute_service.instances().aggregatedList( project=self.credentials["project_id"], orderBy='creationTimestamp desc').execute() found_instances = [] items = instance_list["items"] for item in items: if "instances" in items[item]: instances = items[item]["instances"] if instance is not None: for vm in instances: if vm == instance: found_instances.append(vm) continue else: for vm in instances: found_instances.append(vm) return found_instances def list(self, **kwargs): """ Retrieves an aggregated list of persistent disks with most recently created disks listed first. :return: an array of dicts representing the disks """ compute_service = self._get_compute_service() if kwargs and kwargs['refresh'] is False: result = self.cm.find(cloud=self.cloud, kind='volume') for key in kwargs: if key == 'NAME' and kwargs['NAME']: result = self.cm.find_name(name=kwargs['NAME']) elif key == 'NAMES' and kwargs['NAMES']: result = self.cm.find_names(names=kwargs['NAMES']) found = [] if kwargs['region'] is not None: disk_list = compute_service.disks().list( project=self.credentials['project_id'], zone=kwargs['region'], orderBy='creationTimestamp desc').execute() if 'items' in disk_list: disks = disk_list['items'] for disk in disks: found.append(disk) result = self.update_dict(found) if kwargs['NAMES'] is not None or kwargs['vm'] is not None: disk_list = compute_service.disks().aggregatedList( project=self.credentials["project_id"], orderBy='creationTimestamp desc').execute() if kwargs['NAMES'] is not None: items = disk_list["items"] for item in items: if "disks" in items[item]: disks = items[item]["disks"] for disk in disks: if disk in kwargs['NAMES']: found.append(disk) if kwargs['vm'] is not None: items = disk_list["items"] for item in items: if "disks" in items[item]: disks = items[item]["disks"] for disk in disks: if 'users' in disk: users = disk['users'] for user in users: remove_user_url = user.rsplit('/', 1)[1] if remove_user_url == kwargs['vm']: found.append(disk) else: items = disk_list["items"] for item in items: if "disks" in items[item]: disks = items[item]["disks"] for disk in disks: found.append(disk) result = self.update_dict(found) return result elif kwargs and kwargs['refresh'] is True: found = [] if kwargs['region'] is not None: disk_list = compute_service.disks().list( project=self.credentials['project_id'], zone=kwargs['region'], orderBy='creationTimestamp desc').execute() if 'items' in disk_list: disks = disk_list['items'] for disk in disks: found.append(disk) elif kwargs['NAMES'] is not None or kwargs['vm'] is not None: disk_list = compute_service.disks().aggregatedList( project=self.credentials["project_id"], orderBy='creationTimestamp desc').execute() if kwargs['NAMES'] is not None: items = disk_list["items"] for item in items: if "disks" in items[item]: disks = items[item]["disks"] for disk in disks: if disk in kwargs['NAMES']: found.append(disk) elif kwargs['vm'] is not None: items = disk_list["items"] for item in items: if "disks" in items[item]: disks = items[item]["disks"] for disk in disks: if 'users' in disk: users = disk['users'] for user in users: remove_user_url = user.rsplit('/', 1)[1] if remove_user_url == kwargs['vm']: found.append(disk) else: disk_list = compute_service.disks().aggregatedList( project=self.credentials["project_id"], orderBy='creationTimestamp desc').execute() items = disk_list["items"] for item in items: if "disks" in items[item]: disks = items[item]["disks"] for disk in disks: found.append(disk) result = self.update_dict(found) return result else: disk_list = compute_service.disks().aggregatedList( project=self.credentials["project_id"], orderBy='creationTimestamp desc').execute() found = [] items = disk_list["items"] for item in items: if "disks" in items[item]: disks = items[item]["disks"] for disk in disks: found.append(disk) result = self.update_dict(found) return result def create(self, **kwargs): """ Creates a persistent disk in the specified project using the data in the request. :return: a list containing the newly created disk """ compute_service = self._get_compute_service() volume_type = kwargs['volume_type'] size = kwargs['size'] description = kwargs['description'] zone = kwargs['region'] if volume_type is None: volume_type = self.default["type"] if size is None: size = self.default["sizeGb"] if zone is None: zone = self.default['zone'] compute_service.disks().insert( project=self.credentials["project_id"], zone=self.default['zone'], body={'type': volume_type, 'name': kwargs['NAME'], 'sizeGb': str(size), 'description': description}).execute() new_disk = self._get_disk(self.default['zone'], kwargs['NAME']) # wait for disk to finish being created while new_disk['status'] != 'READY': self._wait(1) new_disk = self._get_disk(zone, kwargs['NAME']) update_new_disk = self.update_dict(new_disk) return update_new_disk def delete(self, name=None): """ Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. :param name: Name of the disk to delete """ compute_service = self._get_compute_service() disk_list = self.list() zone = None for disk in disk_list: # find disk in list and get zone if disk['name'] == name: zone = str(disk['zone']) if zone is None: banner(f'{name} was not found') return compute_service.disks().delete( project=self.credentials["project_id"], zone=zone, disk=name).execute() # attempt to call disk from cloud try: deleted_disk = self._get_disk(zone, name) # wait for disk to be deleted if found in cloud if deleted_disk['status'] == 'DELETING': while deleted_disk['status'] == 'DELETING': self._wait(1) try: deleted_disk = self._get_disk(zone, name) except HttpError: pass except HttpError: pass def _get_instance(self, zone, instance): """ Get the specified instance from the cloud :param zone: zone in which the instance is located :param instance: name of the instance :return: a dict representing the instance """ compute_service = self._get_compute_service() vm = compute_service.instances().get( project=self.credentials["project_id"], zone=zone, instance=instance).execute() return vm def _stop_instance(self, name=None, zone=None): """ stops the instance with the given name :param name: name of the instance :zone: zone in which the instance is located """ compute_service = self._get_compute_service() compute_service.instances().stop( project=self.credentials['project_id'], zone=zone, instance=name).execute() vm = self._get_instance(zone, name) # Wait for the instance to stop while vm['status'] != 'TERMINATED': self._wait(1) vm = self._get_instance(zone, name) def _start_instance(self, name=None, zone=None): """ starts the instance with the given name :param name: name of the instance :zone: zone in which the instance is located """ compute_service = self._get_compute_service() compute_service.instances().start( project=self.credentials['project_id'], zone=zone, instance=name).execute() vm = self._get_instance(zone, name) # Wait for the instance to start while vm['status'] != 'RUNNING': self._wait(1) vm = self._get_instance(zone, name) def attach(self, names, vm=None): """ Attach one or more disks to an instance. GCP requires that the instance be stopped when attaching a disk. If the instance is running when the attach function is called, the function will stop the instance and then restart the instance after attaching the disk. :param names: name(s) of disk(s) to attach :param vm: instance name which the volume(s) will be attached to :return: updated disks with current status """ compute_service = self._get_compute_service() instance_list = self._list_instances() zone_url = None instance_status = None for instance in instance_list: if instance['name'] == vm: zone_url = instance['zone'] instance_status = instance['status'] zone = zone_url.rsplit('/', 1)[1] # Stop the instance if necessary if instance_status == 'RUNNING': banner(f"Stopping VM {vm}") self._stop_instance(vm, zone) # get URL source to disk(s) from list of disks disk_list = self.list() for name in names: source = None for disk in disk_list: if disk['name'] == name: source = disk['selfLink'] banner(f"Attaching {name}") compute_service.instances().attachDisk( project=self.credentials['project_id'], zone=zone, instance=vm, body={'source': source, 'deviceName': name}).execute() new_attached_disks = [] for name in names: get_disk = self._get_disk(zone, name) # wait for disk to finish attaching while 'users' not in get_disk: self._wait(1) get_disk = self._get_disk(zone, name) new_attached_disks.append(get_disk) # update newly attached disks result = self.update_dict(new_attached_disks) # Restart the instance if previously running if instance_status == 'RUNNING': banner(f"Restarting VM {vm}") self._start_instance(vm, zone) return result def detach(self, name=None): """ Detach a disk from all instances. GCP requires that the instance be stopped when detaching a disk. If the instance is running when the detach function is called, the function will stop the instance and then restart the instance after detaching the disk. :param name: name of disk to detach :return: dict representing updated status of detached disk """ compute_service = self._get_compute_service() instances = [] zone = None disk_list = self.list() for disk in disk_list: if disk['name'] == name: zone = disk['zone'] for user in disk['users']: instances.append(user) # detach disk from all instances result = None for instance in instances: vm = self._get_instance(zone, instance) instance_status = vm['status'] # Stop the instance if necessary if instance_status == 'RUNNING': banner(f"Stopping VM {instance}") self._stop_instance(instance, zone) banner(f"Detaching {name}") compute_service.instances().detachDisk( project=self.credentials['project_id'], zone=zone, instance=instance, deviceName=name).execute() # Wait for disk to detach detached_disk = self._get_disk(zone, name) if 'users' in detached_disk: while instance in detached_disk['users']: self._wait(1) detached_disk = self._get_disk(zone, name) # Restart the instance if necessary if instance_status == 'RUNNING': banner(f"Restarting VM {instance}") self._start_instance(instance, zone) # update newly detached disk result = self.update_dict(detached_disk) return result[0] def add_tag(self, **kwargs): """ Add a key:value label to the disk Unable to change the name of a disk in Google Cloud :param kwargs: name of the disk with a key and a value for the label :return: updated list of disks with new label """ compute_service = self._get_compute_service() disk_list = self.list() # find disk in list and get zone zone = None label_fingerprint = None for disk in disk_list: if disk['name'] == kwargs['NAME']: zone = str(disk['zone']) label_fingerprint = disk['labelFingerprint'] compute_service.disks().setLabels( project=self.credentials['project_id'], zone=zone, resource=kwargs['NAME'], body={'labelFingerprint': label_fingerprint, 'labels': {kwargs['key']: str(kwargs['value'])}}).execute() tagged_disk = self._get_disk(self.default['zone'], kwargs['NAME']) # wait for tag to be applied while 'labels' not in tagged_disk: self._wait(1) tagged_disk = self._get_disk(self.default['zone'], kwargs['NAME']) updated_disk = self.update_dict(tagged_disk) return updated_disk[0] def status(self, name=None): """ Get status of specified disk, such as 'READY' :param name: name of disk :return: list containing dict representing the disk """ disk_list = self.list() vol = [] for disk in disk_list: if disk['name'] == name: vol.append(disk) break result = self.update_dict(vol) return result def migrate(self, name=None, from_vm=None, to_vm=None): """ Migrate volume from one vm to another vm. :param name: name of volume :param from_vm: name of vm where volume will be moved from :param to_vm: name of vm where volume will be moved to :return: dict of disk with updated info """ self.detach(name) self.attach(name, vm=to_vm) result = self.status(name) # this only would work when disk and instances are all in the same zone # include how to migrate disks between zones and regions raise NotImplementedError def sync(self, from_volume=None, to_volume=None): """ Sync contents of one volume to another volume. It is a copy of all changed content from one volume to the other. :param from_volume: name of the from volume :param to_volume: name of the to volume :return: str """ # delete to_volume then recreate from source of from_volume? raise NotImplementedError