def copy(self, service="aws", sourceObj="abcd.txt", target="local", targetObj=None, debug=True): """ copy method copies files/directories from local storage to target CSP :param service: "aws" for this provider :param sourceObj: A file/directory to be copied :param targetObj: Name of the target object :param debug: Boolean indicating debug mode :return: None """ # To copy the whole cmStorage directory, pls provide sourceObj=None # Validating if target S3 bucket/blob container exists if self.target_kind == 'local': if Path(self.target_container).expanduser().is_dir(): Console.ok(f"local storage {self.target_container} exists.") else: Console.error( f"AWS local storage {self.target_container} does " f"not exist.") return elif self.target_kind == 'azureblob': if self.blob_service.exists(self.target_container): Console.ok(f"Azure blob container {self.target_container} " f"exists") else: Console.error(f"Azure blob container {self.target_container} " f"does not exist.") return else: raise NotImplementedError # TODO : Check CLI option # CLI option # aws s3 cp C:\Users\kpimp\cmStorage # s3://bucket-iris.json/cmStorage --recursive # TODO: Check if source file/dir exists in S3 bucket # Deciding the source location of file/directory to be copied # if sourceObj: # self.local_location = Path(self.yaml_content_source['default'][ # 'directory'], sourceObj) # print("=====> local location ", self.local_location) # else: # sourceObj = "cmStorage" # self.local_location = self.yaml_content_source['default'][ # 'directory'] if targetObj is None: targetObj = sourceObj source_path = Path(self.local_location) object_queue = queue.Queue() # Creating a queue with all the objects to be copied to local # TODO: implement queue if self.target_kind == "local": # constructing targetObj as a string of absolute path of target # location targetObj = str( Path(self.target_container).expanduser().joinpath(targetObj)) try: s3_resource = boto3.resource('s3') print(self.container, sourceObj, targetObj) s3_resource.meta.client.download_file(self.container, sourceObj, targetObj) Console.ok("Downloaded file: ", sourceObj) except Exception as e: print(e) elif self.target_kind == "azureblob": raise NotImplementedError
def create(self): # Added special code for windows. Cant do start service and set_auth in same cms execution. mode = self.data['MODE'] if mode == 'docker': from cloudmesh.mongo.MongoDocker import MongoDocker mongo = MongoDocker() mongo.initialize() return if platform.lower() == 'win32': Console.info("Starting mongo without authentication ... ") self.start(security=False) Console.info("Creating admin user ... ") self.set_auth() Console.info("Stopping the service ... ") self.stop() else: Console.info("Starting mongo without authentication ... ") self.start(security=False) Console.info("Creating admin user ... ") self.set_auth() Console.info("Stopping the service ... ") self.stop()
def install(self, sudo=True): """ check where the MongoDB is installed in mongo location. if MongoDB is not installed, python help install it """ if self.dryrun: print(self.mongo_path) # pprint(self.data) mode = self.data['MODE'] Console.msg(f"Installing mongo in mode: {mode}") if mode == 'docker': Console.ok( "Installing mongoDB in a docker container cloudmesh-mongo") from cloudmesh.mongo.MongoDocker import MongoDocker mongo = MongoDocker() mongo.kill() mongo.install(clean=True, pull=True) Console.ok("Shutting mongoDB down") Console.msg("") Console.ok("Start the mongodb service with") Console.msg("") Console.msg(" cms admin mongo create") Console.msg(" cms admin mongo start") Console.msg("") return "" if not self.data["MONGO_AUTOINSTALL"]: Console.error("Mongo auto install is off") print("You can set it with") print() Console.ok( " cms config set cloudmesh.data.mongo.MONGO_AUTOINSTALL=True" ) print() if self.machine == 'darwin': print("To install it with brew you need to set also") print() Console.ok( " cms config set cloudmesh.data.mongo.MONGO_BREWINSTALL=True" ) print() return "" # # the path test may be wrong as we need to test for mongo and mongod # # print ('OOO', os.path.isdir(path), self.data["MONGO_AUTOINSTALL"] ) if self.force or (not os.path.isdir(self.mongo_home) and self.data["MONGO_AUTOINSTALL"]): print(f"MongoDB is not installed in {self.mongo_home}") # # ask if you like to install and give info where it is being installed # # use cloudmesh yes no question see cloudmesh 3 # # print(f"Auto-install the MongoDB into {mongo_path}") self.local = self.data["LOCAL"] if self.machine == 'linux': self.linux() elif self.machine == 'darwin': self.darwin() elif self.machine == 'win32': # Replaced windows with win32 self.windows() else: print("platform not found", platform) elif os.path.isdir(self.mongo_home): Console.error(f"Folder {self.mongo_home} already exists")
def _configure_worker_interfaces(cls, worker, user='******'): """ Configures the network interface of the worker to use the master as an internet gateway :param worker: A single string hostname for the worker (ie. --hostname option from cm-pi-burn) :param user: The user we will use to ssh/scp into the worker :return: """ if cls.dryrun: Console.info("Configuring worker info.") Console.info(f"scp /etc/network/interfaces from {cls.master} to {user}@{worker}") Console.info("Configure default gateway and DNS for {cls.priv_interface} on {user}@{worker}") else: # Get gateway and netmask for worker conf = cls._system('ifconfig') conf = conf.split('\n') # Search for priv_interface info = None for i in range(len(conf)): if cls.priv_interface in conf[i]: info = conf[i + 1].split() if info == None: Console.error(f"Interface {cls.priv_interface} not found") sys.exit(1) elif info[0] != 'inet': Console.error(f"Interface {cls.priv_interface} found, but there appears to be no iPv4 connection") sys.exit(1) else: # info ex: ['inet', '192.168.1.34', 'netmask', '255.255.255.0', 'broadcast', '192.168.1.255'] gateway = info[1] netmask = info[3] # Use scp to get /etc/network/interfaces from worker cls._system('mkdir -p ~/.cloudmesh/tmp') tmp = f'~/.cloudmesh/tmp/{worker}-interfaces.tmp' ignore_setting = '-o "StrictHostKeyChecking no"' StopWatch.start(f'Talking to {user}@{worker}') exit_code = cls._system(f'scp {ignore_setting} {user}@{worker}:/etc/network/interfaces {tmp}', exitcode=True) StopWatch.stop(f'Talking to {user}@{worker}') StopWatch.status(f'Talking to {user}@{worker}', exit_code == 0) # Write new interfaces file try: interfaces = readfile(tmp).rstrip().split('\n') except: Console.error(f"Could not open {tmp}") sys.exit(1) try: ind = interfaces.index(f'auto {cls.priv_interface}') except: Console.error(f"Could not find {cls.priv_interface} configuration in interfaces file") interface_config = [line.lstrip() for line in interfaces[ind: ind + 3]] interface_config.append(f'gateway {gateway}') interface_config.append(f'netmask {netmask}') dnss = " ".join(cls.dns) + "\n" interface_config.append(f'dns-nameservers {dnss}') new_config = interfaces[:ind] + interface_config writefile(tmp, '\n'.join(new_config)) # New config file now written on local machine. Move to worker in tmp directory remote_cmd1 = 'mkdir -p ~/.cloudmesh/tmp' remote_path = '~/.cloudmesh/tmp/interface.tmp' cls._system(f'ssh {ignore_setting} {user}@{worker} {remote_cmd1}') cls._system(f'scp {ignore_setting} {tmp} {user}@{worker}:{remote_path}') remote_cmd2 = 'sudo cp ~/.cloudmesh/tmp/interface.tmp /etc/network/interfaces' cls._system(f'ssh {ignore_setting} {user}@{worker} {remote_cmd2}')
def run_os(self): """ Start an openapi server by creating a physical flask script :return: pid for started server """ Console.ok("starting server") # For windows convert backslashes to forward slashes to be python compatible if sys.platform == 'win32': self.spec = "/".join(self.spec.replace('C:', '').split('\\')) self.directory = "/".join( self.directory.replace('C:', '').split('\\')) today_dt = date.today().strftime("%m%d%Y") VERBOSE("spec path: ", self.spec) flask_script = textwrap.dedent(f''' import connexion # Create the application instance app = connexion.App(__name__, specification_dir='{self.directory}') # Read the yaml file to configure the endpoints app.add_api('{self.spec}') if __name__ == '__main__': app.run(host='{self.host}', port={self.port}, debug={self.debug}, server='{self.server}') ''') VERBOSE("server script: ", f"{self.directory}/{self.name}_cmsoaserver.py") # Write out flask python script to file so that it can be run in background try: version = open(f"{self.directory}/{self.name}_cmsoaserver.py", 'w').write(flask_script) except IOError: Console.error("Unable to write server file") except Exception as e: print(e) # Run python flask script in background pid = "" try: # TODO: need to write log somewhere else or use a logger to write to common log. Tried devnull and that does not work. logname = f"{self.directory}/{self.name}_server.{today_dt}.log" f = open(logname, "w") #f = open(os.devnull, "w") process = subprocess.Popen([ sys.executable, f"{self.directory}/{self.name}_cmsoaserver.py" ], stdout=f, stderr=subprocess.STDOUT, shell=False) pid = process.pid except Exception as e: Console.error("Unable to start server") print(e) with open(self.spec, "r") as stream: try: details = yaml.safe_load(stream) except yaml.YAMLError as e: print(e) assert False, "Yaml file has syntax error" url = details["servers"][0]["url"] print() print(" Starting:", self.name) print(" PID: ", pid) print(" Spec: ", self.spec) print(" URL: ", url) print() registry = Registry() registry.add_form_file(details, pid=pid, spec=self.spec, directory=self.directory, port=self.port, host=self.host, url=url) return pid
def set(cls, workers=None, addresses=None): """ Assigns static IP addresses to the workers :param workers: List of worker hostnames :param addresses: List of IP addresses """ if cls.dryrun: Console.info(f"DRYRUN: Setting {workers} to {addresses}") else: if not Path('/etc/dnsmasq.conf').is_file(): Console.error('This Pi is not configured as a bridge yet. See "cms bridge create" ') sys.exit(1) elif len(workers) != len(addresses): Console.error('The number of hostnames and addresses given do not match') sys.exit(1) else: conf_line = cls._system('cat /etc/dnsmasq.conf | grep dhcp-range') interm = conf_line.split(',') upper = interm[1] # 10.1.1.20 lower = interm[0].split('=')[1] # 10.1.1.2 cls.ip_range = lower, upper to_add = [] for i in range(len(workers)): host = workers[i] ip = addresses[i] Console.info(f"Setting {host} to {ip}") if not cls._in_range(ip): Console.warning(f"Suitable ip range is {cls.ip_range[0]} - {cls.ip_range[1]}") Console.error(f'Address {ip} for {host} is out of range. See "cms bridge create --range=RANGE" to reconfigure the IP range') sys.exit(1) # Check if static IP assignment already exists conf = sudo_readfile('/etc/dnsmasq.conf') start = f'dhcp-host={host}' line = f'{start},{ip}' # dhcp-host=red001,10.1.1.1 ipPattern = re.compile(f'dhcp-host=.*{ip}') hostPattern = re.compile(f'{start}.*') ipResults = list(filter(ipPattern.search, conf)) # see if ip is already assigned hostResults = list(filter(hostPattern.search, conf)) # see if host is already assigned # If ip already assigned if ipResults: Console.error(f'{ip} is already assigned. Please try a different IP') sys.exit(1) # If new host if not hostResults: to_add.append(line) else: Console.warning(f"Previous IP assignment for {host} found. Overwriting.") if len(hostResults) > 1: Console.warning(f"Found too many assignments for {host}. Overwriting first one") key = conf.index(hostResults[0]) conf[key] = line conf += to_add sudo_writefile('/etc/dnsmasq.conf', '\n'.join(conf) + '\n') Console.ok("Added IP's to dnsmasq")
def restart(cls, priv_iface='eth0', workers=None, user='******', nohup=False): """ :param workers: List of workers to restart if needed :return: """ banner("Restarting bridge on master...", color='CYAN') # Clear lease log if Path('/var/lib/misc/dnsmasq.leases').is_file(): Console.info("Clearing leases file...") sudo_writefile('/var/lib/misc/dnsmasq.leases', "\n") # If nohup is true, do not restart dhcpcd if not nohup: Console.info("Restarting dhcpcd please wait...") status = cls._system('sudo service dhcpcd restart', exitcode=True) if status != 0: Console.error(f'Did not restart master networking service correctly') sys.exit(1) Console.info("Restarted dhcpcd") Console.info("Verifying dhcpcd status...") # Give the service a change to adjust time.sleep(2) if not cls._dhcpcd_active(iface=priv_iface): Console.error('Timeout: Could not boot dhcpcd in the allotted amont of time. Use `sudo service dhcpcd status` for more info.') sys.exit(1) Console.ok("Verified dhcpcd status successfuly") Console.info("Restarting dnsmasq please wait...") status = cls._system('sudo service dnsmasq restart', exitcode=True) if status != 0: Console.error(f'Did not restart master dnsmasq service correctly') sys.exit(1) Console.ok("Restarted dnsmasq successfuly") Console.ok("Restarted bridge service on master") if workers is not None: banner("Restart networking service on workers...", color='CYAN') ignore_setting = '-o "StrictHostKeyChecking no"' for worker in workers: Console.info(f"Restarting dhcpcd on {worker} please wait...") status = cls._system(f'ssh {ignore_setting} {user}@{worker} sudo service dhcpcd restart', exitcode=True) if status != 0: Console.error(f'Did not restart {worker} DHCP service correctly') sys.exit(1) Console.ok(f"Restarted dhcpcd on {worker}") Console.ok("Restarted DHCP service on workers")
def list(self, source=None, dir_only=False, recursive=False): """ lists all files specified in the source :param source: this can be a file or directory :param recursive: in case of directory the recursive refers to all subdirectories in the specified source :param dir_only: boolean, enlist only directories :return: dict """ HEADING() blob_file, blob_folder = self.cloud_path(source) print("File : ", blob_file) print("Folder: ", blob_folder) obj_list = [] fold_list = [] file_list = [] if blob_folder is None: # SOURCE specified is File only if not recursive: if self.storage_service.exists(self.container, blob_file): blob_prop = self.storage_service.get_blob_properties( self.container, blob_file) blob_size = self.storage_service.get_blob_properties( self.container, blob_file).properties.content_length obj_list.append(blob_prop) else: return Console.error( "File does not exist: {file}".format(file=blob_file)) else: file_found = False srch_gen = self.storage_service.list_blobs(self.container) for blob in srch_gen: if os.path.basename(blob.name) == blob_file: obj_list.append(blob) file_found = True if not file_found: return Console.error( "File does not exist: {file}".format(file=blob_file)) else: if blob_file is None: # SOURCE specified is Directory only if not recursive: file_found = False srch_gen = self.storage_service.list_blobs(self.container) for blob in srch_gen: if os.path.dirname(blob.name) == blob_folder: obj_list.append(blob) file_list.append(os.path.basename(blob.name)) file_found = True if blob_folder == '': if re.search('/', blob.name): srch_fold = \ os.path.dirname(blob.name).split('/')[0] file_found = True if srch_fold not in fold_list: fold_list.append(srch_fold) else: if blob not in obj_list: if len(os.path.dirname( blob.name).split('/')) == len( blob_folder.split('/')) + 1: fold_match = 'Y' for e in os.path.dirname( blob.name).split('/')[:-1]: if e not in blob_folder.split('/'): fold_match = 'N' if fold_match == 'Y': srch_fold = \ os.path.dirname(blob.name).split( '/')[ len(blob_folder.split('/'))] file_found = True if srch_fold not in fold_list: fold_list.append(srch_fold) if not file_found: return Console.error( "Directory does not exist: {directory}".format( directory=blob_folder)) else: file_found = False srch_gen = self.storage_service.list_blobs(self.container) for blob in srch_gen: if (os.path.dirname(blob.name) == blob_folder) or \ (os.path.commonpath( [blob.name, blob_folder]) == blob_folder): obj_list.append(blob) file_list.append(blob.name) file_found = True if not file_found: return Console.error( "Directory does not exist: {directory}".format( directory=blob_folder)) else: # SOURCE is specified with Directory and file if not recursive: if self.storage_service.exists(self.container, source[1:]): blob_prop = self.storage_service.get_blob_properties( self.container, source[1:]) blob_size = self.storage_service.get_blob_properties( self.container, source[1:]).properties.content_length obj_list.append(blob_prop) else: return Console.error( "File does not exist: {file}".format( file=source[1:])) else: return Console.error( "Invalid arguments, recursive not applicable") dict_obj = self.update_dict(obj_list) # pprint(dict_obj) if len(file_list) > 0: hdr = '#' * 90 + '\n' + 'List of files in the folder ' + '/' + blob_folder + ':' Console.cprint("BLUE", "", hdr) print(file_list) if len(fold_list) == 0: trl = '#' * 90 Console.cprint("BLUE", "", trl) if len(fold_list) > 0: hdr = '#' * 90 + '\n' + 'List of Sub-folders under the folder ' + '/' + blob_folder + ':' Console.cprint("BLUE", "", hdr) print(fold_list) trl = '#' * 90 Console.cprint("BLUE", "", trl) return dict_obj
def get(self, source=None, destination=None, recursive=False): """ Downloads file from Destination(Service) to Source(local) :param source: the source can be a directory or file :param destination: the destination can be a directory or file :param recursive: in case of directory the recursive refers to all subdirectories in the specified source :return: dict """ HEADING() # Determine service path - file or folder # blob_file, blob_folder = self.cloud_path(destination) blob_file, blob_folder = self.cloud_path(source) print("File : ", blob_file) print("Folder: ", blob_folder) # Determine local path i.e. download-to-folder src_path = self.local_path(destination) err_flag = 'N' rename = 'N' if os.path.isdir(src_path): rename = 'N' else: if os.path.isfile(src_path): Console.msg("WARNING: A file already exists with same name, " "overwrite issued") rename = 'Y' else: if os.path.isdir(os.path.dirname(src_path)): rename = 'Y' else: err_flag = 'Y' if err_flag == 'Y': return Console.error( "Local directory not found or file already exists: {" "src_path}") else: obj_list = [] if blob_folder is None: # file only specified if not recursive: if self.storage_service.exists(self.container, blob_file): if rename == 'Y': download_path = os.path.join( os.path.dirname(src_path), blob_file) else: download_path = os.path.join(src_path, blob_file) obj_list.append( self.storage_service.get_blob_to_path( self.container, blob_file, download_path)) if rename == 'Y': rename_path = src_path os.rename(download_path, rename_path) else: return Console.error( f"File does not exist: {blob_file}") else: file_found = False get_gen = self.storage_service.list_blobs(self.container) for blob in get_gen: if os.path.basename(blob.name) == blob_file: download_path = os.path.join(src_path, blob_file) obj_list.append( self.storage_service.get_blob_to_path( self.container, blob.name, download_path)) file_found = True if not file_found: return Console.error( "File does not exist: {file}".format( file=blob_file)) else: if blob_file is None: # Folder only specified if not recursive: file_found = False get_gen = self.storage_service.list_blobs( self.container) for blob in get_gen: if os.path.dirname(blob.name) == blob_folder: download_path = os.path.join( src_path, os.path.basename(blob.name)) obj_list.append( self.storage_service.get_blob_to_path( self.container, blob.name, download_path)) file_found = True if not file_found: return Console.error( "Directory does not exist: {directory}".format( directory=blob_folder)) else: file_found = False srch_gen = self.storage_service.list_blobs( self.container) for blob in srch_gen: if (os.path.dirname(blob.name) == blob_folder) or \ (os.path.commonpath([blob.name, blob_folder]) == blob_folder): cre_path = os.path.join( src_path, os.path.dirname(blob.name)) if not os.path.isdir(cre_path): os.makedirs(cre_path, 0o777) download_path = os.path.join( src_path, blob.name) obj_list.append( self.storage_service.get_blob_to_path( self.container, blob.name, download_path)) file_found = True if not file_found: return Console.error( "Directory does not exist: {directory}".format( directory=blob_folder)) else: # SOURCE is specified with Directory and file if not recursive: if self.storage_service.exists(self.container, source[1:]): if rename == 'Y': download_path = os.path.join( os.path.dirname(src_path), blob_file) else: download_path = os.path.join( src_path, blob_file) obj_list.append( self.storage_service.get_blob_to_path( self.container, source[1:], download_path)) if rename == 'Y': rename_path = src_path os.rename(download_path, rename_path) else: return Console.error( "File does not exist: {file}".format( file=source[1:])) else: return Console.error( "Invalid arguments, recursive not applicable") dict_obj = self.update_dict(obj_list) # pprint(dict_obj) return dict_obj
def copy(self, source=None, destination=None, recursive=False): """ Copies object(s) from source to destination :param source: "awss3:source.txt" the source is combination of source CSP name and source object name which either can be a directory or file :param destination: "azure:target.txt" the destination is combination of destination CSP and destination object name which either can be a directory or file :param recursive: in case of directory the recursive refers to all subdirectories in the specified source :return: dict """ # Fetch CSP names and object names if source: source, source_obj = source.split(':') else: source, source_obj = None, None if destination: target, target_obj = destination.split(':') else: target, target_obj = None, None source_obj.replace(":", "") # oracle provider expects a target name if target_obj is None or \ len(target_obj.strip()) == 0: # print("DEBUG:", Path(source_obj).parts) target_obj = Path(source_obj).parts[-1] source_obj = str(Path(source_obj).expanduser()) target_obj = str(Path(target_obj).expanduser()) print("DEBUG Provider: values= ", source, source_obj, target, target_obj) if source == "local": print(f"CALL PUT METHOD OF {self.kind} PROVIDER.") result = self.provider.put(source=source_obj, destination=target_obj, recursive=recursive) return result elif target == "local": print(f"CALL GET METHOD OF {self.kind} PROVIDER.") result = self.provider.get(source=source_obj, destination=target_obj, recursive=recursive) return result else: # VERBOSE(f"Copy from {source} to {destination}.") target_kind = self.kind target_provider = self.provider config = "~/.cloudmesh/cloudmesh.yaml" if source: super().__init__(service=source, config=config) source_kind = self.kind source_provider = self.get_source_provider(source_kind, source, config) # get local storage directory super().__init__(service="local", config=config) local_storage = self.config[ "cloudmesh.storage.local.default.directory"] local_target_obj = str(Path(local_storage).expanduser()) source_obj = str(Path(source_obj).expanduser()) try: result = source_provider.get(source=source_obj, destination=local_target_obj, recursive=recursive) if result and len(result) == 0: return Console.error(f"{source_obj} could not be found " f"in {source} CSP. Please check.") Console.ok(f"Fetched {source_obj} from {source} CSP") except Exception as e: return Console.error(f"Error while fetching {source_obj} from " f"{source} CSP. Please check. {e}") else: source_obj = str(Path(local_target_obj) / source_obj) try: result = target_provider.put(source=source_obj, destination=target_obj, recursive=recursive) if result is None: return Console.error(f"{source_obj} couldn't be copied" f" to {target} CSP.") Console.ok(f"Copied {source_obj} to {target} CSP") return result except Exception as e: return Console.error(f"Error while copying {source_obj} to " f"{target} CSP. Please check,{e}") finally: Console.info("\nRemoving local intermediate file.") from cloudmesh.storage.provider.local.Provider import \ Provider as LocalProvider local_provider = LocalProvider(service="local", config=config) try: local_provider.delete(source_obj, recursive=recursive) except Exception as e: Console.error(f"{source_obj} could not be deleted from " f"local storage.")
def put(self, source=None, destination=None, recursive=False): """ Uploads file from Source(local) to Destination(Service) :param source: the source can be a directory or file :param destination: the destination can be a directory or file :param recursive: in case of directory the recursive refers to all subdirectories in the specified source :return: dict """ HEADING() # Determine service path - file or folder if self.storage_service.exists(self.container, destination[1:]): return Console.error( "Directory does not exist: {directory}".format( directory=destination)) else: blob_folder = destination[1:] blob_file = None # Determine local path i.e. upload-from-folder src_path = self.local_path(source) if os.path.isdir(src_path) or os.path.isfile(src_path): obj_list = [] if os.path.isfile(src_path): # File only specified upl_path = src_path if blob_folder == '': upl_file = os.path.basename(src_path) else: upl_file = blob_folder + '/' + os.path.basename(src_path) self.storage_service.create_blob_from_path( self.container, upl_file, upl_path) obj_list.append( self.storage_service.get_blob_properties( self.container, upl_file)) else: # Folder only specified - Upload all files from folder if recursive: ctr = 1 old_root = "" new_dir = blob_folder for (root, folder, files) in os.walk(src_path, topdown=True): if ctr == 1: if len(files) > 0: for base in files: upl_path = os.path.join(root, base) if blob_folder == '': upl_file = base else: upl_file = blob_folder + '/' + base self.storage_service.create_blob_from_path( self.container, upl_file, upl_path) obj_list.append( self.storage_service. get_blob_properties( self.container, upl_file)) else: if os.path.dirname(old_root) != os.path.dirname( root): blob_folder = new_dir new_dir = os.path.join(blob_folder, os.path.basename(root)) self.create_dir(service=None, directory='/' + new_dir) if len(files) > 0: for base in files: upl_path = os.path.join(root, base) upl_file = new_dir + '/' + base self.storage_service.create_blob_from_path( self.container, upl_file, upl_path) obj_list.append( self.storage_service. get_blob_properties( self.container, upl_file)) old_root = root ctr += 1 else: return Console.error( "Source is a folder, recursive expected in arguments") else: return Console.error( "Directory or File does not exist: {directory}".format( directory=src_path)) dict_obj = self.update_dict(obj_list) pprint(dict_obj) return dict_obj
def do_key(self, args, arguments): """ :: Usage: key -h | --help key list --cloud=CLOUDS [--output=OUTPUT] key list --source=ssh [--dir=DIR] [--output=OUTPUT] key list --source=git [--output=OUTPUT] [--username=USERNAME] key list [--output=OUTPUT] key init key add NAME --filename=FILENAME [--output=OUTPUT] key add [NAME] [--source=FILENAME] key add [NAME] [--source=git] key add [NAME] [--source=ssh] key delete NAMES [--cloud=CLOUDS] [--dryrun] key upload [NAMES] [--cloud=CLOUDS] [--dryrun] key upload [NAMES] [VMS] [--dryrun] key group upload [NAMES] [--group=GROUPNAMES] [--cloud=CLOUDS] [--dryrun] key group add [--group=GROUPNAMES] [--cloud=CLOUDS] [--dryrun] key group add --file=FILENAME key group delete [--group=GROUPNAMES] [NAMES] [--dryrun] key group list [--group=GROUPNAMES] [--output=OUTPUT] key group export --group=GROUNAMES --filename=FILENAME key gen (rsa | ssh) [--filename=FILENAME] [--nopass] [--set_path] key verify (ssh | pem) --filename=FILENAME [--pub] Arguments: VMS Parameterized list of virtual machines CLOUDS The clouds NAME The name of the key. SOURCE db, ssh, all KEYNAME The desired full path name to the key file OUTPUT The format of the output (table, json, yaml) FILENAME The filename with full path in which the key is located Options: --dir=DIR the directory with keys [default: ~/.ssh] --filename=FILENAME the name and full path to the file --nopass Flag indicating if the key has no password --output=OUTPUT the format of the output [default: table] --pub Indicates that the public key is passed in --set_path Sets the security key paths to KEYNAME --source=SOURCE the source for the keys --username=USERNAME the source for the keys [default: none] Description: Please note that some values are read from the cloudmesh.yaml file. One such value is cloudmesh.profile.user Manages public keys is an essential component of accessing virtual machine sin the cloud. There are a number of sources where you can find public keys. This includes teh ~/.ssh directory and for example github. Keys will be uploaded into cloudmesh database with the add command under the given NAME. If the name is not specified the name cloudmesh.profile.user is assumed. key add NAME --source=ssh adds the default key in ~/.ssh/id_rsa.pub key add NAME --source=FILENAME adds the key specified by the filename with the given name key add NAME --git --username=username adds a named github key from a user with the given github username. key set adds the ~/.ssh/id_rsa.pub key with the name specified in cloudmesh.profile.user. It also sets the variable key to that user. Once the keys are uploaded to github, they can be listed To list these keys the following list functions are provided. key list --source=git [--username=USERNAME] lists all keys in git for the specified user. If the name is not specified it is read from cloudmesh.yaml key list --source=ssh [--dir=DIR] [--output=OUTPUT] lists all keys in the directory. If the directory is not specified the default will be ~/.ssh key list NAMES lists all keys in the named virtual machines. List command can use the [--output=OUTPUT] option list the keys loaded to cloudmesh in the given format: json, yaml, table. table is default. The NAME can be specified and if omitted the name cloudmesh.profile.user is assumed. To get keys from the cloudmesh database the following commands are available: key delete NAMES deletes the Named keys. This may also have an impact on groups key rename NAME NEW renames the key from NAME to NEW in the cloudmesh database. Group management of keys is an important concept in cloudmesh, allowing multiple users to be added to virtual machines while managing the keys associated with them. The keys must be uploaded to cloudmesh database with a name so they can be used in a group. The --dryrun option executes the command without uploading the information to the clouds. If no group name is specified the group name default is assumed. If no cloudnamesh are specified, all active clouds are assumed. active clouds can be set in the cloudmesh.yaml file. key group delete [GROUPNAMES] [NAMES] [--dryrun] deletes the named keys from the named groups. key group list [GROUPNAMES] [--output=OUTPUT] list the key names and details in the group. key group upload [GROUPNAMES] [CLOUDS] [--dryrun] uploads the named groups to the specified clouds. In some cases you may want to store the public keys in files. For this reason we support the following commands. key group add --group=GROUPNAME --file=FILENAME the command adds the keys to the given group. The keys are written in the files in yaml format. key group export --group=GROUNAMES --filename=FILENAME the command exports the keys to the given group. The keys are written in the files in yaml format. The yaml format is as follows: cloudmesh: keys: NAMEOFKEY: name: NAMEOFKEY key: ssh-rsa AAAA..... comment group: - GROUPNAME ... If a key is included in multiple groups they will be added to the grouplist of the key """ def print_keys(keys): print( Printer.write( keys, sort_keys=["name"], order=["name", "type", "fingerprint", "comment"], header=["Name", "Type", "Fingerprint", "Comment"], output=arguments.output)) map_parameters(arguments, 'cloud', 'dir', 'dryrun', 'filename', 'name', 'nopass', 'output', 'pub', 'pwd', 'set_path', 'source') variables = Variables() if arguments.list and arguments.source == "git": config = Config() username = config["cloudmesh.profile.github"] keys = SSHkey().get_from_git(username) print_keys(keys) return "" elif arguments.list and arguments.source == "ssh": # this is much simpler sshkey = SSHkey() print_keys([sshkey]) return "" elif arguments.list and arguments.cloud: clouds = Parameter.expand(arguments.cloud) if len(clouds) == 0: variables = Variables() cloudname = variables['cloud'] clouds = [cloudname] keys = [] for cloud in clouds: print(f"cloud {cloud}") provider = Provider(name=cloud) keys = provider.keys() provider.Print(keys, output=arguments.output, kind="key") return "" elif arguments.list: cloud = "local" db = CmDatabase() keys = db.find(collection=f"{cloud}-key") print_keys(keys) return "" elif arguments.add: """ key add [NAME] [--source=FILENAME] # NOT IMPLEMENTED YET key add [NAME] [--source=git] key add [NAME] [--source=ssh] """ key = Key() if arguments["--source"] == "ssh": name = arguments.NAME or "ssh" key.add(name, "ssh") elif arguments["--source"] == "git": name = arguments.NAME or "git" key.add("git", "git") else: config = Config() name = config["cloudmesh.profile.user"] kind = "ssh" key.add(name, kind) elif arguments.init: """ key init """ config = Config() username = config["cloudmesh.profile.user"] if username == "TBD": Console.error( "Please set cloudmesh.profile.user in ~/.cloudmesh.yaml") u = os.environ["USER"].lower().replace(" ", "") Console.msg( f"To change it you can use the command. Define a NAME such as '{u}' e.g." ) Console.msg("") Console.msg(f" cms config set cloudmesh.profile.user={u}") Console.msg("") return "" key = Key() key.add(username, "ssh") variables['key'] = username elif arguments.upload: """ key upload [NAMES] [--cloud=CLOUDS] [--dryrun] key upload [NAMES] [VMS] [--dryrun] """ names = Parameter.expand(arguments.NAMES) # this may have a bug if NAMES is ommitted # # Step 0. Set keyname to variable # if names is None or len(names) == 0: config = Config() username = config["cloudmesh.profile.user"] names = [username] if len(names) == 1: name = names[0] variables = Variables() if "key" in variables: old = variables["key"] if old != name: Console.msg( f"Changing defualt key from {old} to {name}") variables["key"] = name # # Step 1. keys = find keys to upload # cloud = "local" db = CmDatabase() db_keys = db.find(collection=f"{cloud}-key") keys = [] for key in db_keys: if key["name"] in names: keys.append(key) if len(keys) == 0: Console.error( f"No keys with the names {names} found in cloudmesh. \n" " Use the command 'key add' to add the key.") # # Step 2. iterate over the clouds to upload # clouds, names = Arguments.get_cloud_and_names( "list", arguments, variables) for cloud in clouds: print(f"cloud {cloud}") provider = Provider(name=cloud) for key in db_keys: name = key['name'] if name in names: try: r = provider.key_upload(key) Console.ok(f"upload key '{name} successful'. ") except ValueError as e: Console.error( f"key '{name} already exists in {cloud}.") return "" elif arguments.delete and arguments.cloud and arguments.NAMES: # key delete NAMES --cloud=CLOUDS [--dryrun] names = Parameter.expand(arguments.NAMES) clouds = Parameter.expand(arguments.cloud) for cloud in clouds: provider = Provider(name=cloud) for name in names: if arguments.dryrun: Console.ok(f"Dryrun: delete {name} in {cloud}") else: images = provider.key_delete(name) return "" elif arguments.gen: """ key gen (rsa | ssh) [--filename=FILENAME] [--nopass] [--set_path] Generate an RSA key pair with pem or ssh encoding for the public key. The private key is always encoded as a PEM file. """ config = Config() # Check if password will be requested ap = not arguments.nopass if not ap: Console.warning("Private key will NOT have a password") cnt = yn_choice(message="Continue, despite risk?", default="N") if not cnt: sys.exit() # Discern the name of the public and private keys rk_path = None uk_path = None if arguments.filename: if arguments.filename[-4:] == ".pub": rk_path = path_expand(arguments.name[-4:]) uk_path = path_expand(arguments.name) elif arguments.filename[-5:] == ".priv": rk_path = path_expand(arguments.name) uk_path = path_expand(arguments.name[-5:]) else: rk_path = path_expand(arguments.filename) uk_path = rk_path + ".pub" else: rk_path = path_expand(config['cloudmesh.security.privatekey']) uk_path = path_expand(config['cloudmesh.security.publickey']) # Set the path if requested if arguments.set_path and arguments.filename: config['cloudmesh.security.privatekey'] = rk_path config['cloudmesh.security.publickey'] = uk_path config.save() Console.msg(f"\nPrivate key: {rk_path}") Console.msg(f"Public key: {uk_path}\n") # Generate the Private and Public keys kh = KeyHandler() r = kh.new_rsa_key() u = kh.get_pub_key(priv=r) # Serialize and write the private key to the path sr = kh.serialize_key(key=r, key_type="PRIV", encoding="PEM", format="PKCS8", ask_pass=ap) kh.write_key(key=sr, path=rk_path) # Determine the public key format and encoding enc = None forma = None if arguments.ssh: enc = "SSH" forma = "SSH" elif arguments.rsa: enc = "PEM" forma = "SubjectInfo" # Serialize and write the public key to the path su = kh.serialize_key(key=u, key_type="PUB", encoding=enc, format=forma, ask_pass=False) kh.write_key(key=su, path=uk_path) Console.ok("Success") elif arguments.verify: """ key verify (ssh | pem) --filename=FILENAME --pub Verifies the encoding (pem or ssh) of the key (private or public) """ kh = KeyHandler() fp = arguments.filename kt = None enc = None # Discern key type if arguments.pub: kt = "public" # Discern public key encoding if arguments.ssh: enc, e = "OpenSSH", "SSH" elif arguments.pem: #PEM encoding enc = e = "PEM" # Load the public key, if no error occurs formatting is correct u = kh.load_key(path=fp, key_type="PUB", encoding=e, ask_pass=False) else: kt, enc = "private", "PEM" # Load the private key to verify the formatting and password of # the key file. If no error occurs the format and pwd are correct r = kh.load_key(path=fp, key_type="PRIV", encoding=enc, ask_pass=True) m = f"Success the {kt} key {fp} has proper {enc} format" Console.ok(m) elif arguments.delete and arguments.NAMES: # key delete NAMES [--dryrun] names = Parameter.expand(arguments.NAMES) cloud = "local" db = CmDatabase() db_keys = db.find(collection=f"{cloud}-key") error = [] for key in db_keys: name = key['name'] if name in names: if arguments.dryrun: Console.ok(f"Dryrun: delete {name}") else: db.delete(collection="local-key", name=name) Console.ok(f"delete {name}") return "" elif arguments.group: raise NotImplementedError return ""
def get(self, service=None, source=None, destination=None, recursive=False): if not os.path.exists(source): os.makedirs(source) if recursive: query_params = "name='" + destination + "' and trashed=false" sourceid = self.driveService.files().list( q=query_params, fields= "nextPageToken, files(id, name, mimeType, parents,size,modifiedTime,createdTime)" ).execute() print(sourceid) if len(sourceid) == 0: Console.error('No files found') sys.exit(1) file_id = sourceid['files'][0]['id'] file_name = sourceid['files'][0]['name'] mime_type = sourceid['files'][0]['mimeType'] tempres = [] if mime_type == 'application/vnd.google-apps.folder': items = self.driveService.files().list( pageSize=self.limitFiles, fields= "nextPageToken, files(id, name, mimeType, parents,size,modifiedTime,createdTime)" ).execute() for item in items: if item['mimeType'] != 'application/vnd.google-apps.folder': self.download_file(source, item['id'], item['name'], item['mimeType']) tempres.append(item) else: self.download_file(source, file_id, file_name, mime_type) tempres.append(sourceid['files'][0]) return self.update_dict(tempres) else: query_params = "name='" + destination + "' and trashed=false" sourceid = self.driveService.files().list( q=query_params, fields= "nextPageToken, files(id, name, mimeType, parents,size,modifiedTime,createdTime)" ).execute() print(sourceid) if len(sourceid) == 0: Console.error('No files found') sys.exit(1) file_id = sourceid['files'][0]['id'] file_name = sourceid['files'][0]['name'] mime_type = sourceid['files'][0]['mimeType'] tempres = [] if mime_type == 'application/vnd.google-apps.folder': items = self.driveService.files().list( pageSize=self.limitFiles, fields= "nextPageToken, files(id, name, mimeType, parents,size,modifiedTime,createdTime)" ).execute() for item in items: if item['mimeType'] != 'application/vnd.google-apps.folder': self.download_file(source, item['id'], item['name'], item['mimeType']) tempres.append(item) else: self.download_file(source, file_id, file_name, mime_type) tempres.append(sourceid['files'][0]) return self.update_dict(tempres)
def do_volume(self, args, arguments): """ :: Usage: volume register which volume register [NAME] [--cloud=CLOUD] [ARGUMENTS...] volume list [NAMES] [--vm=VM] [--region=REGION] [--cloud=CLOUD] [--refresh] [--dryrun] [--output=FORMAT] volume create [NAME] [--size=SIZE] [--volume_type=TYPE] [--description=DESCRIPTION] [--dryrun] [ARGUMENTS...] volume status [NAMES] [--cloud=CLOUD] volume attach [NAMES] [--vm=VM] volume detach [NAMES] volume delete NAMES volume migrate NAME FROM_VM TO_VM volume sync FROM_VOLUME TO_VOLUME This command manages volumes accross different clouds Arguments: NAME the name of the volume vm the name of the vm Options: --vm=VMNAME The name of the virtual machine --region=REGION The name of the region --cloud=CLOUD The name of the cloud --refresh If refresh the information is taken from the cloud --volume_type=TYPE The type of the volume --output=FORMAT Output format [default: table] Description: TBD """ VERBOSE(arguments) variables = Variables() def get_last_volume(): cm = CmDatabase() cloud = arguments['--cloud'] or variables["cloud"] #how to get updated cloud names? or only search the last volume in --cloud? last_entry = cm.find(cloud=cloud, kind='volume')[-1] cm.close_client() for tag in last_entry['Tags']: if tag['key'] == 'Name': name = tag['Value'] else: raise ("Please name the volume!") return name def create_name(): """ Gregor suggests to use from cloudmesh.management.configuration.Name import Name as VolumeName config = Config() n = VolumeName( user=config["cloudmesh.profile.username"], kind="volume", path=f"{config.location}/volume.yaml", schema="{user}-volume-{counter}" counter=1) n.incr() counter = n.get() :return: """ # please edit ~ /.cloudmesh / volume.yaml as following: # counter: 1 # kind: volume config = Config() directory = f"{config.location}" volume_yaml = path_expand(f"{directory}/volume.yaml") try: with open(volume_yaml) as file: dic = yaml.load(file, Loader=yaml.FullLoader) counter = dic["counter"] user = dic["user"] created_name = f"{user}-{cloud}-{counter}" dic["counter"] += 1 with open(volume_yaml, 'w') as file: documents = yaml.dump(dic, file) except: Console.error("the volume.yaml file does not exist." "You need to implement " "the case so it gets autogenerated") raise NotImplementedError return created_name map_parameters( arguments, "cloud", "vm", "region", "refresh", "dryrun", "output", "size", "volume_type", "description", ) arguments.output = Parameter.find( "output", arguments, variables, ) arguments.NAME = arguments.NAME or variables[ "volume"] #or get_last_volume() #path = arguments.PATH cloud = variables['cloud'] if arguments.list: if arguments.NAMES: raise NotImplementedError names = Parameter.expand(arguments.NAMES) for name in names: # kind = cm.kind provider = Provider(name=name) # result = provider.list(???) result = provider.list() elif arguments.cloud: # banner(f'get in arguments.cloud {arguments.cloud}') #print (arguments.cloud) provider = Provider(name=arguments.cloud) result = provider.list(**arguments) print( provider.Print(result, kind='volume', output=arguments.output)) #from pprint import pprint #pprint (result) return "" elif arguments.create: #parameters = Parameter.arguments_to_dict(arguments.ARGUMENTS) #print("parameters",parameters) if arguments.cloud == None: arguments['cloud'] = cloud #cloud from variable['volume'] if arguments.NAME == None: arguments.NAME = str(create_name()) provider = Provider(name=arguments.cloud) result = provider.create(**arguments) print( provider.Print(result, kind='volume', output=arguments.output)) elif arguments.delete: names = Parameter.expand(arguments["NAMES"]) config = Config() clouds = list(config["cloudmesh.volume"].keys()) for cloud in clouds: active = config[f"cloudmesh.volume.{cloud}.cm.active"] if active: p = Provider(name=cloud) deleted = [] for name in names: result = p.delete( name) # returns None if it is not in the cloud if result: deleted.append(name) if len(deleted) > 0: for name in deleted: del names[name] """ 2 spaces not fout ! cloudmesh: volume: aws1: cm: active: cloudmesh.cloud.aws.cm.active names = Parametrs.expnad(arguments.NAMES) config = Config() clouds = list(config["cloudmesh.volume"].keys()) for cloud in clouds : active = config[f"cloudmesh.volume.{cloud}.cm.active"] if active: found = [] # from cloudmesh.volume.Provider import Provider p = Provider(name=cloud) for name in names: volume = p.list(name=name) found = found.append(name) p.destroy(','.join(found) # delete all found volumes in the cloud """ elif arguments.attach: # cloud from variable['volume'] arguments.cloud = arguments.cloud or cloud names = arguments.NAMES or variables["volume"] vm = arguments.vm or variables["vm"] if names is None: Console.error("No Volume specified or found") return "" if vm is None: Console.error("No vm specified or found") return "" names = Parameter.expand(names) banner(f"Attach {arguments.NAMES} to {arguments.vm}") provider = Provider(name=arguments.cloud) for name in names: result = provider.attach(name, vm) print( provider.Print(result, kind='volume', output=arguments.output)) elif arguments.detach: # # this has a bug as the general provider needs a serach that finds # the vm given a volume name to find the vm it is attached to # this way you can write just a loop such as # # for volume in volumes: # v = provider.serach(volume) # if v['vm'] and v['cm.cloud'] == v['cm.cloud']: # result = provider.detach(name) # # or something similar dependent on how you defined the datastructure # cm. for a volume # volumes = arguments.NAMES or variables["volume"] if volumes is None: Console.error("No volumes specified or found") return "" volumes = Parameter.expand(volumes) banner(f"Detach {volumes}") for name in volumes: volume = Provider.search(name=name) cloud = volume["cm"]["cloud"] provider = Provider(name=cloud) result = provider.detach(name=name) print( provider.Print(result, kind='volume', output=arguments.output))
def version(self): """ This command returns the versions ov vagrant and virtual box :return: A dict with the information Description: The output looks like this {'vagrant': '2.2.4', 'virtualbox': {'extension': { 'description': 'USB 2.0 and USB 3.0 Host ' 'Controller, Host Webcam, ' 'VirtualBox RDP, PXE ROM, Disk ' 'Encryption, NVMe.', 'extensionpack': True, 'revision': '128413', 'usable': 'true', 'version': '6.0.4'}, 'version': '6.0.4'}} """ version = { "vagrant": None, "virtualbox": { "version": None, "extension": None } } try: result = Shell.execute("vagrant --version", shell=True) txt, version["vagrant"] = result.split(" ") if "A new version of Vagrant is available" in result: Console.error( "Vagrant is outdated. Please doenload a new version of vagrant" ) raise NotImplementedError except: pass try: result = Shell.execute(self.vboxmanage, shell=True) txt, version["virtualbox"]["version"] = result.split( "\n")[0].split("Version ") result = Shell.execute(self.vboxmanage + " list -l extpacks", shell=True) extension = {} for line in result.split("\n"): if "Oracle VM VirtualBox Extension Pack" in line: extension["extensionpack"] = True elif "Revision:" in line: extension["revision"] = line.split(":")[1].strip() elif "Usable:" in line: extension["usable"] = line.split(":")[1].strip() elif "Description:" in line: extension["description"] = line.split(":")[1].strip() elif "Version:" in line: extension["version"] = line.split(":")[1].strip() version["virtualbox"]["extension"] = extension except: pass return version
def create(self, name=None, image=None, network=None, size=None, location=None, timeout=360, key=None, secgroup=None, ip=None, user=None, public=True, group=None, metadata=None, cloud=None, label=None, **kwargs): """ creates a named node :param group: the list of groups the vm belongs to :param name: the name of the node :param image: the image used :param size: the size of the image :param timeout: a timeout in seconds that is invoked in case the image does not boot. The default is set to 3 minutes. :param kwargs: additional arguments HEADING(c=".")ed along at time of boot :return: """ image_use = None flavor_use = None # keyname = Config()["cloudmesh"]["profile"]["user"] # ex_keyname has to be the registered keypair name in cloud """ https://docs.openstack.org/openstacksdk/latest/user/connection.html#openstack.connection.Connection.create_server """ if 'flavor' in kwargs and size is None: size = kwargs['flavor'] if network is not None: pass elif 'network' in kwargs: network = kwargs['network'] elif 'network' in self.default: network = self.default['network'] # Guess user name if user is None: user = Image.guess_username(image) # image_name = image.lower() # if image_name.startswith("cc-"): # user = "******" # if "centos" in image_name: # user = "******" # elif "ubuntu" in image_name: # user = "******" # get IP if not ip and public: ip = self.find_available_public_ip() # pprint(entry) elif ip is not None: entry = self.list_public_ips(ip=ip, available=True) if len(entry) == 0: print("ip not available") raise ValueError(f"The ip can not be assigned {ip}") if type(group) == str: groups = Parameter.expand(group) vm_label = label or name banner("Create Server") Console.msg(f" Cloud: {self.cloud}") Console.msg(f" Name: {name}") Console.msg(f" Label: {vm_label}") Console.msg(f" User: {user}") Console.msg(f" IP: {ip}") Console.msg(f" Image: {image}") Console.msg(f" Size: {size}") Console.msg(f" Network: {network}") Console.msg(f" Public: {public}") Console.msg(f" Key: {key}") Console.msg(f" Location: {location}") Console.msg(f" Timeout: {timeout}") Console.msg(f" Secgroup: {secgroup}") Console.msg(f" Group: {group}") Console.msg(f" Groups: {groups}") Console.msg("") # image = self.cloudman.compute.find_image(image) # flavor = self.cloudman.compute.find_flavor(size) # network = self.cloudman.network.find_network(network) try: server = self.cloudman.create_server(name, network=network, flavor=size, image=image, key_name=key, security_groups=[secgroup], timeout=timeout # tags=groups, # wait=True ) """ server = self.cloudman.create_server(name, networks=[ {"uuid": "0fa8824d-8a3f-4890-90e1-c3596b3511c6"}], flavor=size, image=image, key_name=key, security_groups=[secgroup], timeout=timeout # tags=groups, # wait=True ) """ server['user'] = user r = self.cloudman.wait_for_server(server) s = self.cloudman.add_ips_to_server(server, ips=ip) variables = Variables() variables['vm'] = name if metadata is None: metadata = {} # # due to metadata limitation in openstack do not add the creation time # if 'created' in metadata: del metadata['created'] metadata['image'] = image metadata['flavor'] = size metadata['label'] = vm_label self.cloudman.set_server_metadata(server, metadata) self.add_secgroup(name=secgroup) # server = self.cloudman.compute.wait_for_server(server) # print("ssh -i {key} root@{ip}".format( # key=PRIVATE_KEYPAIR_FILE, # ip=server.access_ipv4)) except openstack.exceptions.ResourceTimeout: Console.error("Problem starting vm in time.") raise TimeoutError except Exception as e: Console.error("Problem starting vm", traceflag=True) print(e) raise RuntimeError return self.update_dict(server, kind="vm")[0]
def do_test(self, args, arguments): """ :: Usage: test This command is intended to check if your windows set up is correctly done. Bugs: This program is supposed to be implemented. It is at this time just a template Description: Checks we do 1. are you running python 3.8.1 2. are you having the newest version of pip 3. is cl installed 4. is nmake installed 5. is the username without spaces 6. are you running in a vnenv 7. is the default mongo port used 8. do you have docker installed 9. do you have vbox installed 10. how much memory do you have 11. do you have free diskspace Checks that are missing or need implemented 12. is hyperv switched on or off 13. are containers running 14. .... other tyings that can help us debug your environment """ tester = CloudmeshTest() # # Python setup # tester.check_venv() tester.check_python() tester.check_command("pip --version", test="20.0.2") # # command tool setup # if platform.system() == "Windows": if not tester.check_windows(): Console.error(" THIS VERSION OF WINDOWS IS NOT SUPPORTED.") return "" tester.which("cl") tester.which("nmake") tester.which("git") tester.which("ssh") tester.which("ssh-keygen") tester.which("docker") tester.which("yamllint") else: tester.check_command("git --version", test="git version") tester.check_command("ssh", test="usage", show=False) tester.check_command("ssh-keygen --help", test="usage", show=False) tester.check_command("docker --version", test="Docker version") tester.check_command("VirtualBox --help", test="Oracle VM VirtualBox VM Selector", show=False) tester.check_command("yamllint", test="usage: yamllint", show=False) tester.is_user_name_valid() tester.check_mongo() tester.usage() tester.check_yaml() return ""
def do_storage(self, args, arguments): """ :: Usage: storage [--storage=SERVICE] put FILENAME SOURCEDIR [DESTDIR] storage [--storage=SERVICE] get FILENAME DESTDIR storage [--storage=SERVICE] delete FILENAME storage [--storage=SERVICE] info FILENAME storage [--storage=SERVICE] search FILENAME storage [--storage=SERVICE] create dir DIRNAME [DESTDIR] storage [--storage=SERVICE] list dir [DIRNAME] storage [--storage=SERVICE] delete dir DIRNAME This command creates, deletes, and returns information on directories and files in cloud storage services. Arguments: FILE a file name DESTDIR destination directory for uploads and downloads SOURCEDIR source directory for syncing directories DIRNAME name of new folder Example: set storage=box storage put FILENAME SOURCEDIR DESTDIR is the same as storage --storage=box put FILENAME SOURCEDIR DESTDIR """ pprint(arguments) m = Manager() service = None try: service = arguments["--storage"] except Exception as e: try: v = Variables() service = v['storage'] except Exception as e: service = None if service is None: Console.error("storage service not defined") if arguments.get == True: m.get(service, arguments.FILENAME, arguments.DESTDIR) elif arguments.put == True: if arguments.DESTDIR is not None: m.put(service, arguments.FILENAME, arguments.SOURCEDIR, arguments.DESTDIR) else: m.put(service, arguments.FILENAME, arguments.SOURCEDIR) elif arguments.delete == True: m.delete(service, arguments.FILENAME) elif arguments.info == True: m.info(service, arguments.FILENAME) elif arguments.search == True: m.search(service, arguments.FILENAME) elif arguments.create == True and arguments.dir == True: if arguments.DESTDIR is not None: m.create_dir(service, arguments.DIRNAME, arguments.DESTDIR) else: m.create_dir(service, arguments.DIRNAME) elif arguments.list == True and arguments.dir == True: if arguments.DIRNAME is not None: m.list_dir(service, arguments.DIRNAME) else: m.list_dir(service) elif arguments.delete == True and arguments.dir == True: m.delete_dir(service, arguments.DIRNAME) else: print("Command not recognized.")
def test(cls, hosts=None): """ Command to test the connectivity to specified hostnames. First checks to see if the device has even registered with the bridge in ~/.cloudmesh/bridge/info which is a directory created when the bridge is first created. :param host: List of hostnames to check for connectivity. :return: """ try: info = sudo_readfile('~/.cloudmesh/bridge/info') index = info.index(cls.lease_bookmark) leases = info[index + 1:] except: Console.error('Could not find information on bridge. Has the bridge been created yet?') sys.exit(1) known_hosts = [] for lease in leases: lease_info = lease.split() host = lease_info[4] known_hosts.append(host) count = 0 hosts_to_check = len(hosts) Console.info("Beginning ping checks...") for host in hosts: if host not in known_hosts: Console.warning(f'{host} is not among the known devices of the bridge. No connection from {host} has been received before. Skipping test') hosts_to_check -= 1 else: Console.info(f'Pinging Host {host}. Please wait ...') status, stdout = cls._system(f'ping -c 1 {host}', warnuser=False, both=True) # I believe this is a sufficient check if status != 0: message = textwrap.dedent(f""" Could not ping {host} successfuly. Rebooting {host} may fix the problem. Manually ping {host} for more details. """) Console.warning(message) else: count += 1 Console.ok(f"Successfuly pinged {host}") Console.ok(f'Completed checks. {count} out of {hosts_to_check} checks succeeded.')
def execute(cls, cmd, arguments="", shell=False, cwd=None, traceflag=True, witherror=True): """Run Shell command :param witherror: if set to False the error will not be printed :param traceflag: if set to true the trace is printed in case of an error :param cwd: the current working directory in which the command is supposed to be executed. :param shell: if set to true the subprocess is called as part of a shell :param cmd: command to run :param arguments: we do not know yet :return: """ # print "--------------" result = None terminal = cls.terminal_type() # print cls.command os_command = [cmd] if terminal in ['linux', 'windows']: os_command = [cmd] elif 'cygwin' in terminal: if not cls.command_exists(cmd): print("ERROR: the command could not be found", cmd) return else: os_command = [cls.command[cls.operating_system()][cmd]] if isinstance(arguments, list): os_command = os_command + arguments elif isinstance(arguments, tuple): os_command = os_command + list(arguments) elif isinstance(arguments, str): os_command = os_command + arguments.split() else: print("ERROR: Wrong parameter type", type(arguments)) if cwd is None: cwd = os.getcwd() try: if shell: if platform.lower() == 'win32': import ctypes class disable_file_system_redirection: _disable = ctypes.windll.kernel32.Wow64DisableWow64FsRedirection _revert = ctypes.windll.kernel32.Wow64RevertWow64FsRedirection def __enter__(self): self.old_value = ctypes.c_long() self.success = self._disable( ctypes.byref(self.old_value)) def __exit__(self, type, value, traceback): if self.success: self._revert(self.old_value) if len(os_command) == 1: os_command = os_command[0].split(' ') with disable_file_system_redirection(): result = subprocess.check_output( os_command, stderr=subprocess.STDOUT, shell=True, cwd=cwd) else: result = subprocess.check_output(os_command, stderr=subprocess.STDOUT, shell=True, cwd=cwd) else: result = subprocess.check_output( os_command, # shell=True, stderr=subprocess.STDOUT, cwd=cwd) except: if witherror: Console.error("problem executing subprocess", traceflag=traceflag) if result is not None: result = result.strip().decode() return result
def create(cls, masterIP='10.1.1.1', ip_range=['10.1.1.2', '10.1.1.122'], master=None, workers=None, priv_interface='eth0', ext_interface='eth1', purge=False, dryrun=False): """ if worker(s) is missing the master is set up only :param master: expected to be a single string :param workers: :param ext_interface: The external interface through which the master connects to the internet :param priv_interface: The private interface through which the master and workers communicate :param dryrun: :return: """ cls.masterIP = masterIP cls.ip_range = ip_range cls.master = master cls.workers = workers cls.dryrun = dryrun cls.ext_interface = ext_interface cls.priv_interface = priv_interface # Master configuration StopWatch.start('Master Configuration') # Configure dhcpcd.conf of master. No restart StopWatch.start('dhcpcd.conf configuration') cls._dhcpcd_conf() StopWatch.stop('dhcpcd.conf configuration') StopWatch.status('dhcpcd.conf configuration', True) if purge: # Uninstall dnsmasq and all files StopWatch.start('dnsmasq purge') cls._purge_dnsmasq() StopWatch.start('dnsmasq purge') StopWatch.status('dnsmasq purge', True) # Install dnsmasq if it is not already installed if not cls._dnsmasq_exists(): StopWatch.start('dnsmasq installation') cls._install_dnsmasq() StopWatch.stop('dnsmasq installation') StopWatch.status('dnsmasq installation', True) else: Console.info("dnsmasq already installed. Skipping installation") # Configure dnsmasq StopWatch.start('dnsmasq config') cls._config_dnsmasq() StopWatch.stop('dnsmasq config') StopWatch.status('dnsmasq config', True) # iPv4 forwarding StopWatch.start('Enable iPv4 forwarding') cls._set_ipv4() StopWatch.stop('Enable iPv4 forwarding') StopWatch.status('Enable iPv4 forwarding', True) # iptables configuration StopWatch.start('iptables configuration') cls._set_iptables() StopWatch.stop('iptables configuration') StopWatch.status('iptables configuration', True) StopWatch.stop('Master Configuration') StopWatch.status('Master Configuration', True) Console.info("Finished configuration of master") cls._completion_message() Console.ok("Process completed")
def NotImplementedInWindows(): if sys.platform == "win32": Console.error("The method {f.__name__} is not implemented in Windows," " please implement, and/or submit an issue.") sys.exit()
def start(self, name=None, spec=None, foreground=False): """ Start up an OpenApi server :param name: server name :param spec: openapi spec yaml file name :param foreground: flag to run server in foreground. Default is False. :return: started server PID """ name = Server.get_name(name, spec) for active_server in Server.ps(): if active_server['name'] == name and os.getpid( ) != active_server['pid']: Console.error( f'Server {name} already running on PID {active_server["pid"]}' ) return None pid = "" if foreground: self._run_app() else: # We need below if condition for window machine. In window machine pid = os.fork() # does not work. So do not remove if condition. if sys.platform == 'win32': pid = self.run_os() else: self._run_deamon() if daemonpid is not None: pid = daemonpid else: pid = Server.ps(name=name)[1]["pid"] _spec = Server.ps(name=name)[1]["spec"] with open(_spec, "r") as stream: try: details = yaml.safe_load(stream) except yaml.YAMLError as e: print(e) assert False, "Yaml file has syntax error" url = details["servers"][0]["url"] print() print(" Starting:", name) print(" PID: ", pid) print(" Spec: ", _spec) print(" URL: ", url) print(" Cloudmesh UI: ", url + '/ui') print() try: details["info"]["title"] = name except KeyError as e: Console.info( "Issue setting name of server. Using spec instead") registry = Registry() registry.add_form_file(details, pid=pid, spec=_spec, directory=self.directory, port=self.port, host=self.host, url=url) return pid
def create_user(self): ''' Creates the user or if the user exists creates another access key :return: the name of the file containing the access key ''' email = input("Enter your email: ") passw = getpass.getpass("Enter your password: "******"https://console.aws.amazon.com/iam/home#/users") assert "Amazon Web Services" in self.driver.title, "Unexpected login page, aborting" sleep(1.5) self.driver.find_element_by_id("resolving_input").send_keys(email) self.driver.find_element_by_id("next_button").click() sleep(1.5) self.check_captcha() self.driver.find_element_by_id("password").send_keys(passw) self.driver.find_element_by_id("signin_button").click() sleep(1.5) self.check_captcha() # adding cloudmesh user if self.user_exists(): self.create_accesskey_on_current_account() return "accessKeys.csv" self.driver.find_element_by_link_text("Add user").click() sleep(1.5) self.driver.find_element_by_id("awsui-textfield-17").send_keys( "cloudmesh") self.driver.find_element_by_name("accessKey").click() sleep(1.5) self.driver.find_element_by_class_name("wizard-next-button").click() sleep(1.5) self.driver.find_element_by_class_name("awsui-util-pt-s").click() sleep(1.5) self.driver.find_element_by_xpath( "//awsui-textfield[@ng-model='createGroupModal.groupName']/input" ).send_keys("cloudmesh") sleep(1.5) self.driver.find_element_by_xpath( '//*/policies-table//table-search//search/div/input').send_keys( "AmazonEC2FullAccess") sleep(1.5) self.driver.find_element_by_xpath( '//div[@data-item-id="arn:aws:iam::aws:policy/AmazonEC2FullAccess"]//awsui-checkbox//div' ).click() sleep(1.5) self.driver.find_element_by_xpath( "//awsui-button[@click-tracker='CreateGroup']").click() sleep(1.5) Console.info("'cloudmesh' group created") self.driver.find_element_by_class_name("wizard-next-button").click() sleep(1.5) self.driver.find_element_by_class_name("wizard-next-button").click() sleep(1.5) Console.info("'cloudmesh' user created") self.driver.find_element_by_class_name("wizard-next-button").click() sleep(1.5) self.driver.find_element_by_xpath( "//awsui-button[@text='Download .csv']").click() Console.info("credentials.csv downloaded") sleep(2) self.driver.find_element_by_xpath( "//awsui-button[@text='Close']").click() sleep(4) return "credentials.csv"
def __init__(self, name=None, spec=None, directory=None, host="127.0.0.1", server="flask", port=8080, debug=True): """ Default constructor to initialize Server object :param name: server name :param spec: openapi spec yaml file :param directory: directory for input file :param host: host ip or dns name to be used to start service. Default is 127.0.0.1 :param server: type of service to start. Default is flask. :param port: port to use for service. Default is 8080. :param debug: flag to turn on debug logging. Default is true. :return: server object """ if spec is None: Console.error("No service specification file defined") raise FileNotFoundError self.spec = path_expand(spec) self.name = Server.get_name(name, self.spec) if directory is None: self.directory = os.path.dirname(self.spec) else: self.directory = directory self.host = host or "127.0.0.1" self.port = port or 8080 self.debug = debug or False self.server = server or "flask" self.server_command = "" data = dict(self.__dict__) #data['import'] = __name__ VERBOSE(data, label="Server parameters") if server == "tornado": try: import tornado except Exception as e: print(e) Console.error( "tornado not install. Please use `pip install tornado`") sys.exit(1) # return "" if self.debug: Console.error("Tornado does not support --verbose") sys.exit(1) # return "" Console.ok(self.directory)
def register(self, cloud='aws'): if platform == "linux" or platform == "linux2": # check if chrome installed chrome_ver1 = subprocess.getoutput( 'google-chrome-stable --version') chrome_ver2 = subprocess.getoutput('google-chrome --version') if 'not found' in chrome_ver1.lower( ) or 'not found' in chrome_ver2.lower(): Console.error("google chrome is not installed") return # self.driver = webdriver.Chrome() try: self.driver = webdriver.Chrome() # register = AWSRegister(self.driver) except WebDriverException as e: Console.error(e) Console.error( "Chrome geckodriver not installed. Follow these steps for installation: \n" "1) Download the driver from the following link: \n\t " "https://sites.google.com/a/chromium.org/chromedriver/downloads \n" "2) Copy the `chromedriver` to '/usr/bin' \n" "3) Set the permission using:\n\t" "'sudo chmod +x /usr/bin/chromedriver'") return credentials_file_name = self.create_user() credentials_csv_path = Path.home().joinpath('Downloads').joinpath( credentials_file_name).resolve() cloudmesh_folder = Path.home().joinpath('.cloudmesh').resolve() os.rename( credentials_csv_path, cloudmesh_folder.joinpath(credentials_file_name).resolve()) Console.info("{filename} moved to ~/.cloudmesh folder".format( filename=credentials_file_name)) creds = pandas.read_csv("{cm}/{filename}".format( cm=cloudmesh_folder, filename=credentials_file_name)) self.set_credentials(creds) self.config.save() Console.info( "AWS 'Access Key ID' and 'Secret Access Key' in the cloudmesh.yaml updated" ) elif platform == "darwin": chrome = Path( "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome") if not chrome.is_file(): Console.error("Google chrome is not installed") return try: self.driver = webdriver.Chrome() except WebDriverException as e: Console.error(e) Console.error( "Chrome geckodriver not installed. Follow these steps for installation: \n" "1) Download the driver from the following link: \n\t " "https://sites.google.com/a/chromium.org/chromedriver/downloads \n" "2) Copy the `chromedriver` to '/usr/local/bin' \n" "3) Set the permission using:\n\t" "'chmod +x /usr/local/bin/chromedriver'") return credentials_file_name = self.create_user() credentials_csv_path = Path.home().joinpath('Downloads').joinpath( credentials_file_name).resolve() # check if the DOwanloaded file exists # Path("~/.cloudmesh/{credentials_file_name}).resolve() cloudmesh_folder = Path.home().joinpath('.cloudmesh').resolve() os.rename( credentials_csv_path, cloudmesh_folder.joinpath(credentials_file_name).resolve()) Console.info( f"{credentials_file_name} moved to ~/.cloudmesh folder") creds = pandas.read_csv("{cm}/{filename}".format( cm=cloudmesh_folder, filename=credentials_file_name)) self.set_credentials(creds) Console.info( "AWS 'Access Key ID' and 'Secret Access Key' in the cloudmesh.yaml updated" ) elif platform == "win32": chrome = Path( "C:\Program Files (x86)\Google\Chrome\Application\Chrome.exe") if not chrome.is_file(): Console.error("Google chrome is not installed") return try: self.driver = webdriver.Chrome() except WebDriverException as e: Console.error(e) Console.error( "Chrome geckodriver not installed. Follow these steps for installation: \n" "1) Download the driver from the following link: \n\t " "https://sites.google.com/a/chromium.org/chromedriver/downloads \n" "2) Copy the `chromedriver` to path, for instance you can add " "it to the followtin path: " "\n\t %USERPROFILE%\AppData\Local\Microsoft\WindowsApps") return credentials_file_name = self.create_user() credentials_csv_path = Path.home().joinpath('Downloads').joinpath( credentials_file_name).resolve() cloudmesh_folder = Path.home().joinpath('.cloudmesh').resolve() os.rename( credentials_csv_path, cloudmesh_folder.joinpath(credentials_file_name).resolve()) Console.info( f"{credentials_file_name} moved to ~/.cloudmesh folder") creds = pandas.read_csv("{cm}/{filename}".format( cm=cloudmesh_folder, filename=credentials_file_name)) self.set_credentials(creds) Console.info( "AWS 'Access Key ID' and 'Secret Access Key' in the cloudmesh.yaml updated" )
def start(self, security=True): """ start the MongoDB server """ mode = self.data['MODE'] if mode == "running": return if mode == 'docker': from cloudmesh.mongo.MongoDocker import MongoDocker mongo = MongoDocker() mongo.start(auth=security) # mongo.wait() # mongo.ps() return auth = "" if security: auth = "--auth" mongo_host = self.data['MONGO_HOST'] if platform.lower() == 'win32': try: # command = 'where mongo' # proc = subprocess.Popen(command, shell=True, # stdin=subprocess.PIPE, # stdout=subprocess.PIPE) # out, err = proc.communicate() # print ("MMM", command) # print ("O", out) # print ("E", err) # if out == b'': # Console.error("mongo command not found") # sys.exit() mongo_runner = f"mongod {auth} " \ f"--bind_ip {mongo_host}" \ f" --dbpath \"{self.mongo_path}\" --logpath \"{self.mongo_log}\mongod.log\"" # mongo_runner = f"\"{self.mongo_home}\\bin\mongod\" {auth} " \ # f"--bind_ip {mongo_host}" \ # f" --dbpath \"{self.mongo_path}\" --logpath \"{self.mongo_log}\mongod.log\"" print(mongo_runner) if not os.path.isfile(f'{self.mongo_path}\\invisible.vbs'): with open(f'{self.mongo_path}\\invisible.vbs', 'w') as f: f.write( 'CreateObject("Wscript.Shell").Run """" & WScript.Arguments(0) & """", 0, False' ) if not os.path.isfile(f'{self.mongo_path}\\mongo_starter.bat'): with open(f'{self.mongo_path}\\mongo_starter.bat', 'w') as f: f.write(mongo_runner) script = f'wscript.exe "{self.mongo_path}\\invisible.vbs" "{self.mongo_path}\\mongo_starter.bat"' print(script) p = subprocess.Popen(script, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) result = "mongod child process should be started successfully." except Exception as e: result = "Mongo in windows could not be started: \n\n" + str(e) else: try: script = f"mongod {auth} --bind_ip {mongo_host}" \ f" --dbpath {self.mongo_path} --logpath {self.mongo_log}/mongod.log --fork" result = Script.run(script) except Exception as e: result = "Mongo could not be started." + str(e) if "successfully" in result: print(Console.ok(result)) else: print(Console.error(result))
def do_init(self, args, arguments): """ :: Usage: init [CLOUD] [--debug] init yaml Description: Initializes cloudmesh while using data from ~/.cloudmesh/cloudmesh.yaml. If no cloud is specified a number of local collections are created. If a cloud is specified it also uploads the information about images, flavors, vms. It also uploads the security groups defined by default to the cloud. Bug: cms init cms init On Windows you have to run the cms init command twice upon first installation """ if arguments.CLOUD == "yaml": config = Config() location = path_expand("~/.cloudmesh/cloudmesh.yaml") path = Path(location) if path.is_file(): print() if yn_choice( "The file ~/.cloudmesh/cloudmesh.yaml exists, do you wnat to overwrite it", default='n'): config.fetch() print() Console.ok("File cloudmesh.yaml downloaded from Github") else: print() Console.warning("Download canceled") print() else: variables = Variables() config = Config() try: print("MongoDB stop") MongoDBController().stop() except: Console.ok("MongoDB is not running. ok") machine = platform.lower() location = path_expand(config[ f'cloudmesh.data.mongo.MONGO_DOWNLOAD.{machine}.MONGO_PATH']) try: shutil.rmtree(location) print("MongoDB folder deleted") except: Console.error(f"Could not delete {location}") if platform == 'win32': Console.error(f"Please try to run cms init again ... ") exit(1) config = Config() user = config["cloudmesh.profile.user"] secgroup = "flask" print("Set key") if user == "TBD": Console.error( "the user is not set in the yaml file for cloudmesh.profile.user" ) sys.exit() print("MongoDB create") os.system("cms admin mongo create") os.system("cms admin mongo start") os.system("cms sec load") os.system(f"cms key add {user} --source=ssh") if arguments.CLOUD is not None: cloud = arguments.CLOUD variables['cloud'] = cloud os.system(f"cms key upload {user} --cloud={cloud}") os.system(f"cms flavor list --refresh") os.system(f"cms image list --refresh") os.system(f"cms vm list --refresh") os.system(f"cms sec group load {secgroup} --cloud={cloud}") os.system(f"cms set secgroup={secgroup}") if arguments.debug: variables['debug'] = True variables['timer'] = 'on' variables['trace'] = True variables['verbose'] = '10' print() print("Variables") print() for name in variables: value = variables[name] print(f" {name}={value}")
else: color = "green" choice = [gui.Checkbox(cloud, text_color=color, default=active)] layout.append(choice) layout.append([gui.Text('_' * 100, size=(65, 1))]) layout.append([gui.Submit(), gui.Cancel()]) window = gui.Window('Cloudmesh Configuration', layout, font=("Helvetica", 12)) event, values = window.Read() selected = [] for i in range(0, len(clouds)): cloud = clouds[i] if values[i]: selected.append(cloud) Console.ok(f"Activate Cloud {cloud}") for cloud in clouds: active = False if cloud in selected: active = True config[f"cloudmesh.cloud.{cloud}.cm.active"] = str(active) config.save()
def __init__(self, service=None, config="~/.cloudmesh/cloudmesh.yaml", **kwargs): super().__init__(service=service, config=config) if kwargs.get("debug"): print("Inside init of local provider") print(self.kind) print(kwargs.get('sourceObj')) print(kwargs.get('target')) print(kwargs.get('targetObj')) print(self.credentials) # Processing --source/service and --target arguments separately. # This is a provider class for aws storage hence --source/service will # always be "aws" self.sourceCSP = self.service try: self.config = Config(config_path=config) self.yaml_content_source = self.config["cloudmesh.storage." f"{self.sourceCSP}"] self.source_kind = self.yaml_content_source["cm"]["kind"] self.source_credentials = self.yaml_content_source["credentials"] self.container = self.source_credentials["container"] print("Accessing AWS S3 details:\n") if 'TBD' == self.source_credentials["access_key_id"] \ or 'TBD' == self.source_credentials["secret_access_key"] \ or 'TBD' == self.source_credentials["region"]: Console.error("Critical details missing from .yaml file. " "TBD not allowed. Please check.") try: self.s3_client = boto3.client( 's3', aws_access_key_id=self.source_credentials["access_key_id"], aws_secret_access_key=self. source_credentials["secret_access_key"], region_name=self.source_credentials["region"]) Console.ok(f"Successful connection to {self.kind} is " f"made.") except ClientError as e: Console.error(e, prefix=True, traceflag=True) if kwargs.get('sourceObj'): self.local_location = Path( self.yaml_content_source['default']['directory'], kwargs.get('sourceObj')) else: self.local_location = self.yaml_content_source['default'][ 'directory'] if kwargs.get("debug"): print(f"\nLocal location to access {self.local_location}") if kwargs.get('target'): self.targetCSP = kwargs.get('target') self.yaml_content_target = self.config["cloudmesh.storage." f"{self.targetCSP}"] self.target_kind = self.yaml_content_target["cm"]["kind"] self.target_credentials = self.yaml_content_target[ "credentials"] # Taking the default directory as the target container for # local storage if self.target_kind == "local": self.target_container = self.yaml_content_target[ 'default']['directory'] else: self.target_container = self.target_credentials[ "container"] banner("self.target_container: ", self.target_container) except Exception as e: Console.error(f"Couldn't access cloudmesh.yaml. Error - {e}") return () if kwargs.get("debug"): VERBOSE(self.yaml_content_source) if kwargs.get('target'): VERBOSE(self.yaml_content_target) banner(f"Source CSP: {self.source_kind}") if kwargs.get('target'): banner(f"Target CSP: {self.target_kind}") # Creating connection with the target CSP. This done only if the # --target argument is provided. Only "copy" command is expected to # have --target argument. if kwargs.get('target'): if self.target_kind == "local": print("Local storage connected.") # if 'TBD' == self.target_credentials["access_key_id"] \ # or 'TBD' == self.target_credentials["secret_access_key"] \ # or 'TBD' == self.target_credentials["region"]: # Console.error("Critical details missing from .yaml file. " # "TBD not allowed. Please check.") # # try: # self.s3_client = boto3.client( # 's3', # aws_access_key_id=self.target_credentials[ # "access_key_id"], # aws_secret_access_key=self.target_credentials[ # "secret_access_key"], # region_name=self.target_credentials["region"] # ) # Console.ok( # f"Successful connection to {self.target_kind} is " # f"made.") # except ClientError as e: # Console.error(e, prefix=True, traceflag=True) elif self.target_kind == "azureblob": banner("Connecting to Azure Blob Storage.") self.azure_acct_name = self.target_credentials['account_name'] self.azure_acct_key = self.target_credentials['account_key'] self.target_container = self.target_credentials['container'] try: self.blob_service = BlockBlobService( self.azure_acct_name, self.azure_acct_key) print("Successfully connected to Azure Blob storage.") except Exception as e: print("Connection to Azure failed:", e) else: raise NotImplementedError