def destroy(self, name=None): """ Destroys the node :param name: the name of the node :return: the dict of the node """ instances = self._get_instance_id(self.ec2_resource, name) for each_instance in instances: try: # self.ec2_resource.create_tags(Resources = [each_instance.instance_id], # Tags=[{'Key': 'cm.status', 'Value': "Terminated"}]) self.ec2_client.terminate_instances( InstanceIds=[each_instance.instance_id], ) self.add_server_metadata(name=name, tags=[{ 'Key': 'cm.status', 'Value': "TERMINATED" }]) except ClientError: Console.error( "Currently instance cant be terminated...Please try again") Console.msg("Terminating Instance..Please wait...") waiter = self.ec2_client.get_waiter('instance_terminated') waiter.wait(Filters=[{ 'Name': 'instance-id', 'Values': [each_instance.instance_id] }]) Console.ok(f"Instance having Tag:{name} and " f"Instance-Id:{each_instance.instance_id} terminated")
def do_login(self, args, arguments): """ :: Usage: login [KEY] This command does some useful things. Arguments: KEY a file name Options: -f specify the file """ # m = Manager() if arguments.KEY is None: arguments.KEY = "~/.ssh/id_rsa.pub" pprint(arguments) key = path_expand(arguments.KEY) Console.msg("Login with", key) Console.error("not implemented") return ""
def do_echo(self, args, arguments): """ :: Usage: echo [-r COLOR] TEXT Arguments: TEXT The text message to print COLOR the color Options: -r COLOR The color of the text. [default: BLACK] Prints a text in the given color """ color = arguments["-r"] or "black" color = color.upper() text = arguments["TEXT"] if color is "black": Console.msg(text) else: Console.cprint(color, "", text) return ""
def __init__(self, service=None, config="~/.cloudmesh/.cloudmesh.yaml"): super(Provider, self).__init__(service=service, config=config) self.config = Config() self.kind = config[f"cloudmesh.storage.{service}.cm.kind"] self.cloud = service self.service = service Console.msg("FOUND Kind", self.kind) if self.kind in ["awsS3"]: self.p = AwsStorageProvider( service=service, config=config) elif self.kind in ["box"]: self.p = BoxStorageProvider( service=service, config=config) elif self.kind in ["gdrive"]: self.p = GdriveStorageProvider( service=service, config=config) elif self.kind in ["azureblob"]: self.p = AzureblobStorageProvider( service=service, config=config) else: raise NotImplementedError
def stop(self, name=None, hibernate=False): """ stops the node with the given name :param name: the instance name :param hibernate: stop or hibernate :return: The dict representing the node including updated status """ if name is None: Console.error("Please provide instance id...") return instances = self._get_instance_id(self.ec2_resource, name) for each_instance in instances: try: self.ec2_client.stop_instances( InstanceIds=[each_instance.instance_id], Hibernate=hibernate) self.add_server_metadata(name=name, tags=[ {'Key': 'cm.status', 'Value': "STOPPED"}]) except ClientError: Console.error( "Currently instance cant be stopped...Please try again") Console.msg("Stopping Instance..Please wait...") waiter = self.ec2_client.get_waiter('instance_stopped') waiter.wait(Filters=[ {'Name': 'instance-id', 'Values': [each_instance.instance_id]}]) Console.ok( f"Instance having Tag:{name} and " "Instance-Id:{each_instance.instance_id} stopped")
def install(self, clean=False, pull=True): """ Creates the Mongo image :return: """ Console.msg(f"Version: {self.version}") if pull: script = f"docker pull mongo:{self.version}" self.run(script) if clean: try: shutil.rmtree(self.mongo_path) except: pass try: shutil.rmtree(self.mongo_log) except: pass try: os.mkdir(self.mongo_path) except FileExistsError: Console.info(f"Folder {self.mongo_path} already exists") try: os.mkdir(self.mongo_log) except FileExistsError: Console.info(f"Folder {self.mongo_log} already exists")
def importAsFile(self, data, collection, db): mode = self.data['MODE'] if mode == 'docker': Console.error("ImportasFile: Docker is not yet supported") raise NotImplementedError self.start_if_not_running() tmp_folder = path_expand('~/.cloudmesh/tmp') if not os.path.exists(tmp_folder): os.makedirs(tmp_folder) tmp_file = path_expand('~/.cloudmesh/tmp/tmp_import_file.json') Console.msg("Saving the data to file ") with open(tmp_file, 'w') as f: for dat in data: f.write(json.dumps(dat) + '\n') username = self.config["cloudmesh.data.mongo.MONGO_USERNAME"] password = self.config["cloudmesh.data.mongo.MONGO_PASSWORD"] cmd = f'mongoimport --db {db}' \ f' --collection {collection} ' \ f' --authenticationDatabase admin ' \ f' --username {username}' \ f' --password {password} ' \ f' --drop' \ f' --file {tmp_file}' Console.msg("Importing the saved data to database") result = Shell.run2(cmd) print(result)
def boot(self, order='price', refresh=False, cloud=None): clouds = ['aws', 'azure', 'gcp'] if cloud in clouds: clouds = [cloud] Console.msg(f"Checking to see which providers are bootable ...") reachdict = {} for cloud in clouds: try: tempProv = Provider( name=cloud, configuration="~/.cloudmesh/cloudmesh.yaml") Console.msg(cloud + " reachable ...") reachdict[cloud] = tempProv except: Console.msg(cloud + " not available ...") flavorframe = self.list(order, 10000000, refresh, printit=False) keysya = list(reachdict.keys()) flavorframe = flavorframe[flavorframe['provider'].isin(keysya)] Console.msg(f"Showing top 5 options, booting first option now...") converted = flavorframe.head(5).to_dict('records') print(Printer.write(converted)) cheapest = converted[0] var_list = Variables(filename="~/.cloudmesh/var-data") var_list['cloud'] = cheapest['provider'] Console.msg(f'new cloud is ' + var_list['cloud'] + ', booting up the vm with flavor ' + cheapest['machine-name']) vmcom = VmCommand() vmcom.do_vm('boot --flavor=' + cheapest['machine-name']) return ""
def do_echo(self, args, arguments): """ :: Usage: echo [-r COLOR] TEXT Arguments: TEXT The text message to print COLOR the color Options: -r COLOR The color of the text. [default: NORMAL] Prints a text in the given color """ color = arguments["-r"] or "normal" color = color.upper() text = arguments["TEXT"] if color == "NORMAL": Console.msg(text) else: Console.cprint(color=color, prefix="", message=text) return ""
def start(self, name=None): """ start a node :param name: the unique instance name :return: The dict representing the node """ instances = self._get_instance_id(self.ec2_resource, name) for each_instance in instances: try: self.ec2_client.start_instances( InstanceIds=[each_instance.instance_id]) self.add_server_metadata(name=name, tags=[{ 'Key': 'cm.status', 'Value': "ACTIVE" }]) except ClientError: Console.error( "Currently instance cant be started...Please try again") Console.msg("Starting Instance..Please wait...") waiter = self.ec2_client.get_waiter('instance_running') waiter.wait(Filters=[{ 'Name': 'instance-id', 'Values': [each_instance.instance_id] }]) Console.ok(f"Instance having Tag:{name} and " f"Instance-Id:{each_instance.instance_id} started")
def debug(cls, msg, debug=True): """ prints a debug message. :param msg: the message :return: """ if debug: Console.msg(msg)
def search_run(self, specification): directory = specification['path'] filename = specification['filename'] recursive = specification['recursive'] len_dir = len(massage_path(directory)) if len_dir > 0: file_path = f"{massage_path(directory)}/{filename}" else: file_path = filename self.s3_resource, self.s3_client = self.get_s3_resource_client() info_list = [] objs = [] if (len_dir > 0) and recursive is False: objs = list( self.s3_resource.Bucket( self.container_name).objects.filter(Prefix=file_path)) elif (len_dir == 0) and recursive is False: objs = list( self.s3_resource.Bucket( self.container_name).objects.filter(Prefix=file_path)) elif (len_dir > 0) and recursive is True: objs = list( self.s3_resource.Bucket(self.container_name).objects.filter( Prefix=massage_path(directory))) elif (len_dir == 0) and recursive is True: objs = list( self.s3_resource.Bucket(self.container_name).objects.all()) if len(objs) > 0: for obj in objs: if os.path.basename(obj.key) == filename: metadata = self.s3_client.head_object( Bucket=self.container_name, Key=obj.key) info = { "fileName": obj.key, "lastModificationDate": metadata['ResponseMetadata']['HTTPHeaders'] ['last-modified'], "contentLength": metadata['ResponseMetadata']['HTTPHeaders'] ['content-length'] } info_list.append(info) if len(info_list) == 0: Console.warning("File not found") else: Console.msg("File found") self.pretty_print(data=info_list, data_type="files", output="table") specification['status'] = 'completed' return specification
def clean(self): """ Removes the database and the log files :return: """ shutil.rmtree(self.parameters['dbpath']) shutil.rmtree(self.parameters['logpath']) r = Shell.mkdir(self.parameters['dbpath']) Console.msg(r)
def start(self): """starts the mongo service.""" command = 'ulimit -n 1024; mongod --port {port} -dbpath {dbpath} -bind_ip {bind_ip} --fork --logpath {logpath}' \ .format(**self.parameters) r = Shell.mkdir(self.parameters['dbpath']) Console.msg(r) Console.msg(command) os.system(command) Console.ok('started') self.status()
def _list(org, match=None, verbose=False): if verbose: Console.msg(f"Organization: {org}") for r in org.get_repos(): if match is None: print(r.name, r.description) else: name = r.name or "" description = r.description or "" if match in name or match in description: print(r.name, r.description)
def f(test): msg = "This is a test {test}".format(**locals()) print(" jj ", locals()) from cloudmesh.common.debug import VERBOSE d = {'test': 'Gergor'} VERBOSE(d, "a", "RED", 100) from cloudmesh.common.console import Console msg = 'my message' Console.ok(msg) # prins a green message Console.error(msg) # prins a red message proceeded with ERROR Console.msg(msg) # prins a regular black message from cloudmesh.common.variables import Variables variables = Variables() variables['debug'] = True variables['trace'] = True variables['verbose'] = 10 m = {'key': 'value'} VERBOSE(m) a = {'p': "ac"} print(a['p']) from cloudmesh.common.Shell import Shell result = Shell.execute('pwd') print(result) result = Shell.execute('ls', ['-l', '-a']) print(result) result = Shell.execute('ls', '-l -a') print(result) result = Shell.ls('-aux') print(result) result = Shell.ls('-a') print(result) result = Shell.pwd() print(result) from cloudmesh.common.StopWatch import StopWatch from time import sleep StopWatch.start('test') sleep(1) StopWatch.stop('test') print(StopWatch.get('test'))
def key_delete(self, name=None): """ deletes the key with the given name :param name: The name of the key :return: """ cloud = self.cloud Console.msg(f"delete the key: {name} -> {cloud}") r = self.cloudman.delete_keypair(name) return r
def key_delete(self, name=None): """ deletes the key with the given name :param name: The name of the key :return: the dict of the key """ cloud = self.cloud Console.msg(f"deleting the key: {name} -> {cloud}") r = self.ec2_client.delete_key_pair(KeyName=name) return r
def images(self, **kwargs): """ Lists the images on the cloud :return: the dict of the images """ Console.msg( f"Getting the list of images for {self.cloud} cloud, this might take a few minutes ...") images = self.ec2_client.describe_images() Console.ok(f"Images list for {self.cloud} cloud retrieved successfully") data = self.update_dict(images['Images'], kind="image") self.get_images_and_import(data)
def __init__(self, name=None, configuration="~/.cloudmesh/.cloudmesh4.yaml"): self.kind = Config( configuration)["cloudmesh"]["cloud"][name]["cm"]["kind"] self.name = name Console.msg("FOUND Kind", self.kind) if self.kind in ["openstack"]: self.p = LibCloudProvider(name=name, configuration=configuration) print(self.p) # print (self.p.kind) print(self.kind)
def key_upload(self, key=None): """ uploads the key specified in the yaml configuration to the cloud :param key: :return: """ name = key["name"] cloud = self.cloud Console.msg(f"upload the key: {name} -> {cloud}") try: r = self.cloudman.create_keypair(name, key['public_key']) except: # openstack.exceptions.ConflictException: raise ValueError(f"key already exists: {name}") return r
def resume(self, name=None): """ resume the named node :param name: the name of the node :return: the dict of the node """ instances = self._get_instance_id(self.ec2_resource, name) for each_instance in instances: instance = self.ec2_resource.Instance(each_instance.instance_id) instance.reboot() Console.msg("Rebooting Instance..Please wait...") Console.ok(f"Instance having Tag:{name} and " "Instance-Id:{each_instance.instance_id} rebooted")
def detach_public_ip(self, node, ip): instances = self._get_instance_id(self.ec2_resource, node) instance_id = [] for each_instance in instances: instance_id.append(each_instance.instance_id) if not instance_id: raise ValueError("Invalid instance name provided...") if ip not in self.find_available_public_ip(): raise ValueError("IP address is not in pool") try: response = self.ec2_client.disassociate_address( AssociationId=self._get_allocation_ids( self.ec2_client, ip).get('AssociationId'), ) except ClientError as e: Console.error(e) Console.msg(response)
def bucket_create(self, name=None): """ gets the source name from the put function :param name: the bucket name which needs to be created :return: dict,Boolean """ try: self.s3_client.create_bucket( ACL='private', Bucket=name, ) Console.msg("Bucket Created:", name) file_content = "" file_path = massage_path(name) self.storage_dict['action'] = 'bucket_create' self.storage_dict['bucket'] = name dir_files_list = [] self.container_name = name obj = list( self.s3_resource.Bucket(self.container_name).objects.filter( Prefix=file_path + '/')) if len(obj) == 0: marker_object = self.s3_resource.Object( self.container_name, self.dir_marker_file_name).put(Body=file_content) # make head call to extract meta data # and derive obj dict metadata = self.s3_client.head_object( Bucket=self.container_name, Key=self.dir_marker_file_name) dir_files_list.append( extract_file_dict(massage_path(name), metadata)) self.storage_dict['message'] = 'Bucket created' self.storage_dict['objlist'] = dir_files_list VERBOSE(self.storage_dict) dict_obj = self.update_dict(self.storage_dict['objlist']) return dict_obj except botocore.exceptions.ClientError as e: if e: message = "One or more errors occurred while creating the " \ "bucket: {}".format(e) raise Exception(message)
def start_local(self, port, dbpath, ip): # Setting defaults if no argument is provided if port is None: port = 27017 if dbpath is None: dbpath = "/home/pi/data/db" if ip is None: ip = "127.0.0.1" Console.msg("mongod instance started on IP=" + ip + " PORT=" + port + " \nwith DBPATH=" + dbpath) command = f"sudo mongod --config=/etc/mongodb.conf --dbpath={dbpath} --port={port} --bind_ip={ip}" output = subprocess.run(command.split(" "), shell=False, capture_output=True) Console.msg(output.stdout.decode('utf-8')) banner("MongoDB service started succesfully") return
def kill(self, name=None): """ Kills all Containers :return: """ try: if name is None: name = self.NAME script = f"docker container ls -aq --filter name={name}" id = self.run(script, verbose=False) if id != "": Console.msg(f"Kill container with id: {id}") script = \ f"docker stop {id}" \ f"docker rm {id}" self.run(script, verbose=False) except: Console.ok("No container found.")
def __init__(self, service=None, config="~/.cloudmesh/.cloudmesh.yaml"): super(Provider, self).__init__(service=service, config=config) self.config = Config() self.kind = config[f"cloudmesh.storage.{service}.cm.kind"] self.cloud = service self.service = service Console.msg("FOUND Kind", self.kind) if self.kind in ["awsS3"]: from cloudmesh.storage.provider.awss3 import \ Provider as AwsStorageProvider self.p = AwsStorageProvider(service=service, config=config) elif self.kind in ["parallelawsS3"]: from cloudmesh.storage.provider.parallelawss3 import \ Provider as ParallelAwsStorageProvider self.p = ParallelAwsStorageProvider(service=service, config=config) elif self.kind in ["box"]: from cloudmesh.storage.provider.box import \ Provider as BoxStorageProvider self.p = BoxStorageProvider(service=service, config=config) elif self.kind in ["gcpbucket"]: from cloudmesh.google.storage.Provider import \ Provider as GCPStorageProvider self.p = GCPStorageProvider(service=service, config=config) elif self.kind in ["gdrive"]: from cloudmesh.storage.provider.gdrive import \ Provider as GdriveStorageProvider self.p = GdriveStorageProvider(service=service, config=config) elif self.kind in ["azureblob"]: from cloudmesh.storage.provider.azureblob import \ Provider as AzureblobStorageProvider self.p = AzureblobStorageProvider(service=service, config=config) elif self.kind in ["oracle"]: from cloudmesh.oracle.storage.Provider import \ Provider as OracleStorageProvider self.p = OracleStorageProvider(service=service, config=config) else: raise NotImplementedError
def run(self, script, verbose=True, terminate=False): if verbose: Console.msg(script) if self.dryrun: return "dryrun" else: try: installer = Script.run(script, debug=False) if verbose: print(installer) return installer except Exception as e: if verbose: Console.error("Script returned with error") print(e) if terminate: sys.exit() return "error"
def key_upload(self, key=None): # The gey is stored in the database, we do not create a new keypair, # we upload our local key to aws # BUG name=None, wrong? # ~/.ssh/id_rsa.pub """ uploads the key specified in the yaml configuration to the cloud :param key :return: the dict of the key """ key_name = key["name"] cloud = self.cloud Console.msg(f"uploading the key: {key_name} -> {cloud}") try: r = self.ec2_client.import_key_pair( KeyName=key_name, PublicKeyMaterial=key['public_key']) except ClientError as e: # Console.error("Key already exists") VERBOSE(e) raise ValueError # this is raised because key.py catches valueerror return r
def get_google_pricing(refresh=False): # connect to cm db and check for Google info cm = CmDatabase() googleinfo = cm.collection('gcp-frugal') if googleinfo.estimated_document_count() > 0 and not refresh: Console.msg(f"Using local db gcp flavors...") return googleinfo else: Console.msg(f"Pulling gcp flavor price information...") googleinfo = requests.get( 'https://cloudpricingcalculator.appspot.com/static/data/pricelist.json?v=1570117883807' ).json()['gcp_price_list'] google_list = [] for machine, locations in googleinfo.items(): if type( locations ) is dict and 'cores' in locations and 'memory' in locations: cores = locations['cores'] if cores == 'shared': continue memory = locations['memory'] for location in locations: # 'cores' is end of regions, so stop if found if location == 'cores': break else: if type(locations[location]) is str: print(locations[location]) google_list.append( np.array([ 'gcp', machine, location, float(cores), float(memory), float(locations[location]) ])) googleinforeturn = np.stack(google_list, axis=0) googleinfo = np.stack(googleinforeturn, axis=0) googleinfo = helpers.format_mat(googleinfo) # convert to list of dicts googleinfo = googleinfo.to_dict('records') # write back to cm db for entry in googleinfo: entry["cm"] = { "kind": 'frugal', "driver": 'gcp', "cloud": 'gcp', "name": str(entry['machine-name'] + '-' + entry['location']), "updated": str(datetime.utcnow()), } Console.msg(f"Writing back to db ...") cm.update(googleinfo, progress=True) return cm.collection('gcp-frugal')