def info(cls): try: info = readfile('~/.cloudmesh/bridge/info').split('\n') info = info[:info.index(cls.lease_bookmark) + 1] except: Console.error("Cannot execute info command. Has the bridge been made yet?") sys.exit(1) try: curr_leases = sudo_readfile('/var/lib/misc/dnsmasq.leases') # If cur_leases is not empty, then the first element of each row is the epoch time of the lease expiration date for i in range(len(curr_leases)): curr_leases[i] = curr_leases[i].split() curr_leases[i][0] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(curr_leases[i][0]))) curr_leases[i] = ' '.join(curr_leases[i]) curr_leases = '\n' + '\n'.join(curr_leases) except: Console.warning("dnsmasq.leases file not found. No devices have been connected yet") curr_leases = "\n" toWrite = '\n'.join(info) + curr_leases sudo_writefile('~/.cloudmesh/bridge/info', toWrite) banner(toWrite, color='CYAN')
def check_venv(self): # banner(f"checking python venv") if self.is_venv(): Console.ok("OK. you are running in a venv") print(" VIRTUAL_ENV=", os.environ.get("VIRTUAL_ENV"), sep="") else: Console.error("You are not running in a venv") if "ENV3" not in os.environ.get("VIRTUAL_ENV"): Console.warning("your venv is not called ENV3. That may be ok") if platform.system() == "Windows": venv = os.environ.get("VIRTUAL_ENV") where = path_expand(venv) activate_path = f"{where}\\Scripts\\activate.bat" # check if the dir exists at where if os.path.isdir(where): Console.ok("OK. ENV3 directory exists") else: Console.error("ENV3 directory does not exists") # check if activate exists in ~\ENV3\Scripts\activate.bat if os.path.isfile(activate_path): Console.ok(f"OK. Activate exists in {activate_path}") else: Console.error(f"Could not find {activate_path}")
def _set_ipv4(cls): """ Turns on iPv4 Forwarding on the system and saves rules upon eventual reboot :return: """ if cls.dryrun: Console.info("DRYRUN: Turning on iPv4") else: new_line='net.ipv4.ip_forward=1' # First turn on ipv4 forwarding cls._system(f'sudo sysctl -w {new_line}') # Save for next boot old_conf = sudo_readfile('/etc/sysctl.conf') if new_line not in old_conf: # The default sysctl has the new_line commented out. Try to uncomment it try: old_conf[old_conf.index('#' + new_line)] = new_line except ValueError: Console.warning("Could not find iPv4 setting. Perhaps /etc/sysctl.conf has been changed from default. Process continues by adding iPv4 setting") old_conf.append('net.ipv4.ip_forward=1') except: Console.error("Could not set iPv4 forwarding. Unknown error occurred") finally: sudo_writefile('/etc/sysctl.conf', '\n'.join(old_conf)) else: Console.info("iPv4 forwarding already set. Skipping iPv4 setup")
def get(self, storage_provider, storage_bucket_name): '''Loads the lifecycle configuration defined for a bucket. :param storage_provider: Name of the cloud service provider :param storage_bucket_name: Name of the storage bucket :exception: Exception :returns: Result of operation as string ''' try: # Invoke service result = self.s3_client.get_bucket_lifecycle_configuration( Bucket=storage_bucket_name) # Debug Console.ok(json.dumps(result, indent=4, sort_keys=True)) except ClientError as error: if error.response['Error'][ 'Code'] == 'NoSuchLifecycleConfiguration': Console.warning(error.response['Error']['Code']) return [] else: # e.response['Error']['Code'] == 'NoSuchBucket', etc. Console.error(error, prefix=True, traceflag=True) return None return result['Rules']
def __getitem__(self, item): """ gets an item form the dict. The key is . separated use it as follows get("a.b.c") :param item: :type item: :return: """ try: if "." in item: keys = item.split(".") else: return self.data[item] element = self.data[keys[0]] for key in keys[1:]: element = element[key] except KeyError: path = self.config_path Console.warning( "The key '{item}' could not be found in the yaml file '{path}'".format( **locals())) raise KeyError(item) # sys.exit(1) except Exception as e: print(e) sys.exit(1) # if element.lower() in ['true', 'false']: # element = element.lower() == 'true' return element
def __init__(self): """ Choose which Registry protocol to use: mongo or pickle. Check config for configured protocol """ if Registry.PROTOCOL_NAME is None: try: Registry.PROTOCOL_NAME = Config().get( Registry.RESGISTRY_CONFIG) except KeyError as e: Console.warning("No provider setting found in config") config = Config() config.set(Registry.RESGISTRY_CONFIG, "pickle") Registry.PROTOCOL_NAME = Config().get( Registry.RESGISTRY_CONFIG) if Registry.PROTOCOL_NAME == "mongo": self.protocol = RegistryMongoDB() elif Registry.PROTOCOL_NAME == "pickle": self.protocol = RegistryPickle() else: Console.error( f"Unsupported Registry Type {Registry.PROTOCOL_NAME}") raise ValueError( f"Unsupported Registry Type {Registry.PROTOCOL_NAME}") Console.ok(f"INIT: Using {Registry.PROTOCOL_NAME} Protocol")
def get(self, key, default=None): """ A helper function for reading values from the config without a chain of `get()` calls. Usage: mongo_conn = conf.get('db.mongo.MONGO_CONNECTION_STRING') default_db = conf.get('default.db') az_credentials = conf.get('data.service.azure.credentials') :param default: :param key: A string representing the value's path in the config. """ try: return self.__getitem__(key) except KeyError: if default is None: path = self.config_path Console.warning( "The key '{key}' could not be found in the yaml file '{path}'".format( **locals())) # sys.exit(1) raise KeyError(key) return default except Exception as e: print(e) sys.exit(1)
def new_func2(*args, **kwargs): warnings.simplefilter('always', DeprecationWarning) warnings.warn(fmt2.format(name=func2.__name__), category=DeprecationWarning, stacklevel=2) warnings.simplefilter('default', DeprecationWarning) Console.warning(fmt2.format(name=func2.__name__, reason=reason)) return func2(*args, **kwargs)
def warning(cls, msg, debug=True): """ prints a warning message. :param msg: :return: """ if debug: Console.warning(msg)
def search_run(self, specification): directory = specification['path'] filename = specification['filename'] recursive = specification['recursive'] len_dir = len(massage_path(directory)) if len_dir > 0: file_path = f"{massage_path(directory)}/{filename}" else: file_path = filename self.s3_resource, self.s3_client = self.get_s3_resource_client() info_list = [] objs = [] if (len_dir > 0) and recursive is False: objs = list( self.s3_resource.Bucket( self.container_name).objects.filter(Prefix=file_path)) elif (len_dir == 0) and recursive is False: objs = list( self.s3_resource.Bucket( self.container_name).objects.filter(Prefix=file_path)) elif (len_dir > 0) and recursive is True: objs = list( self.s3_resource.Bucket(self.container_name).objects.filter( Prefix=massage_path(directory))) elif (len_dir == 0) and recursive is True: objs = list( self.s3_resource.Bucket(self.container_name).objects.all()) if len(objs) > 0: for obj in objs: if os.path.basename(obj.key) == filename: metadata = self.s3_client.head_object( Bucket=self.container_name, Key=obj.key) info = { "fileName": obj.key, "lastModificationDate": metadata['ResponseMetadata']['HTTPHeaders'] ['last-modified'], "contentLength": metadata['ResponseMetadata']['HTTPHeaders'] ['content-length'] } info_list.append(info) if len(info_list) == 0: Console.warning("File not found") else: Console.msg("File found") self.pretty_print(data=info_list, data_type="files", output="table") specification['status'] = 'completed' return specification
def get_from_diskutil(device=None): import plistlib external = subprocess.check_output("diskutil list -plist external".split(" ")) r = dict(plistlib.loads(external)) details = [] if len(r['AllDisksAndPartitions']) == 0: Console.error("No partition found") return "" no = 0 for cards in r['AllDisksAndPartitions']: try: for partition in r['AllDisksAndPartitions'][no]['Partitions']: if 'MountPoint' not in partition: partition['MountPoint'] = None if partition['Content'] == 'Linux': partition['Content'] = 'ext4' elif partition['Content'] == 'Windows_FAT_32': partition['Content'] = 'FAT32' info = partition['MountPoint'] entry = { "dev": f"/dev/{partition['DeviceIdentifier']}", "active": info is not None, "info": info, "readable": info is not None, "formatted": partition['Content'], "empty": partition['Size'] == 0, "size": humanize.naturalsize(partition['Size']), "direct-access": True, "removable": True, "writeable": 'VolumeName' in partition } if device is None or device in entry["dev"]: details.append(entry) no = no + 1 except KeyError as e: # noqa: F841 Console.warning("No partitions found for device") partition = r['AllDisksAndPartitions'][no] entry = { "dev": f"/dev/{partition['DeviceIdentifier']}", "active": False, "info": "Not Formatted", "readable": False, "formatted": False, "empty": partition['Size'] == 0, "size": partition['Size'], "direct-access": True, "removable": True, "writeable": 'VolumeName' in partition } details.append(entry) return details
def install(force=False): if os_is_mac(): return if not Imager.installed() or force: if os_is_linux() or os_is_pi(): Sudo.password() os.system("sudo apt uninstall -y rpi-imager") else: Console.warning("Installation is not supported")
def test(cls, hosts=None): """ Command to test the connectivity to specified hostnames. First checks to see if the device has even registered with the bridge in ~/.cloudmesh/bridge/info which is a directory created when the bridge is first created. :param host: List of hostnames to check for connectivity. :return: """ try: info = sudo_readfile('~/.cloudmesh/bridge/info') index = info.index(cls.lease_bookmark) leases = info[index + 1:] except: Console.error( 'Could not find information on bridge. Has the bridge been created yet?' ) sys.exit(1) known_hosts = [] for lease in leases: lease_info = lease.split() host = lease_info[4] known_hosts.append(host) count = 0 hosts_to_check = len(hosts) Console.info("Beginning ping checks...") for host in hosts: if host not in known_hosts: Console.warning( f'{host} is not among the known devices of the bridge. No connection from {host} has been received before. Skipping test' ) hosts_to_check -= 1 else: Console.info(f'Pinging Host {host}. Please wait ...') status, stdout = cls._system(f'ping -c 1 {host}', warnuser=False, both=True) # I believe this is a sufficient check if status != 0: message = textwrap.dedent(f""" Could not ping {host} successfuly. Rebooting {host} may fix the problem. Manually ping {host} for more details. """) Console.warning(message) else: count += 1 Console.ok(f"Successfuly pinged {host}") Console.ok( f'Completed checks. {count} out of {hosts_to_check} checks succeeded.' )
def mkdir_run(self, specification): """ function to create a directory the function will first check if the bucket exists or not, if the bucket doesn't exist it will create the bucket and it will create the directory specified. the name of the bucket will come from YAML specifications and the directory name comes from the arguments. :param specification: :return: """ # cm: # number: {self.number} # kind: storage # id: {uuid_str} # cloud: {self.name} # name: {path} # collection: {self.collection} # created: {date} # # action: mkdir # path: {path} # status: waiting directory = specification['path'] self.s3_resource, self.s3_client = self.get_s3_resource_client() file_content = "" file_path = massage_path(directory) dir_files_list = [] bucket = self.container_name if not self.bucket_exists(name=bucket): self.bucket_create(name=bucket) obj = list( self.s3_resource.Bucket( self.container_name).objects.filter(Prefix=file_path + '/')) if len(obj) == 0: self.s3_resource.Object( self.container_name, f"{file_path}/{self.dir_marker_file_name}").put( Body=file_content) # make head call to extract meta data # and derive obj dict metadata = self.s3_client.head_object( Bucket=self.container_name, Key=f"{file_path}/{self.dir_marker_file_name}") dir_files_list.append(extract_file_dict(f"{file_path}/", metadata)) else: Console.warning('Directory already present') specification['status'] = 'completed' return specification
def add_to_file(filename, line, warning=None): """ adds a line to a file if it is not already in it :return: """ lines = readfile(filename) if line in lines: Console.warning(warning) return else: lines += f"\n{line}\n" writefile(filename, lines)
def get_names(arguments, variables): names = arguments.get("NAME") or arguments.get( "NAMES") or arguments.get("--name") or variables["vm"] # TODO: this only works for vm, but not images and so on if names is None: # this is a temporary patch for "image list --cloud=XX --refresh" so not to print the error if arguments.cloud and arguments.refresh and arguments.list: return None Console.warning( "you need to specify a vm to use automatic vm name completion." ) return None else: return Parameter.expand(names)
def _set_iptables(cls, flush=True): """ Sets up routing in iptables and saves rules for eventual reboot :flush: Remove all rules for related iptables :return: """ cmd1 = f"sudo iptables -A FORWARD -i {cls.priv_interface} -o {cls.ext_interface} -j ACCEPT" cmd2 = f"sudo iptables -A FORWARD -i {cls.ext_interface} -o {cls.priv_interface} -m state --state ESTABLISHED,RELATED -j ACCEPT" cmd3 = f"sudo iptables -t nat -A POSTROUTING -o {cls.ext_interface} -j MASQUERADE" if cls.dryrun: Console.info("DRYRUN: Setting iptables") print(f"DRYRUN: {cmd1}") print(f"DRYRUN: {cmd2}") print(f"DRYRUN: {cmd3}") else: if flush: cls._system('sudo iptables --flush') cls._system('sudo iptables -t nat --flush') cls._system(cmd1) cls._system(cmd2) cls._system(cmd3) # Save rules cls._system('sudo sh -c "iptables-save > /etc/iptables.ipv4.nat"') # Restore rules if reboot old_conf = sudo_readfile('/etc/rc.local', trim=True) # Exit 0 should be in the last entry of old_conf # Add ip table restoration lines just above restore_command = "iptables-restore < /etc/iptables.ipv4.nat" if old_conf[-1] != 'exit 0': Console.error( 'rc.local does not have exit 0 in last line. Contingency not handled in this version. Cannot enable iPv4 forwarding at this time' ) raise NotImplementedError if restore_command not in old_conf: old_conf.append(restore_command) old_conf[-1], old_conf[-2] = old_conf[-2], old_conf[ -1] # Places 'exit 0' below our restore_command sudo_writefile('/etc/rc.local', '\n'.join(old_conf) + '\n') else: Console.warning(f"iptables restoration already in rc.local")
def find_by_KeyValue(self, collection_name, KeyValue=None): collection = self.db[collection_name] if collection.count() == 0: Console.error("Collection {collection} not found".format( collection=collection_name)) return [] entries = collection.find(KeyValue) if entries.count() > 1: Console.warning("More than one instance with the same name was " "found in the Database") elif entries.count() == 0: Console.error("Entry not found: {KeyVal}".format(KeyVal=KeyValue)) return [] return entries
def ssh_config_add(self, label, host, user): config = readfile("~/.ssh/config") if f"Host {label}" in config: Console.warning(f"{label} is already in ~/.ssh/config") else: entry = textwrap.dedent(f""" Host {label} Hostname {host} User {user} IdentityFile ~/.ssh/id_rsa """) Console.info(f"adding {label} to ~/.ssh/config\n" + textwrap.indent(entry, prefix=" ")) config = config + entry writefile("~/.ssh/config", config)
def _dhcpcd_active(cls, iface='eth0', timeout=10, time_interval=5): """ Returns True if dhcpcd is active else False :param iface: the interface that is connected to the private network. Default eth0 :return boolean: """ # It's possible dhcpcd isn't fully started up yet after restarting. This is tricky as it says active even if it may fail # after probing all interfaces # Usually, dhcpcd is working once we see f'{interface}: no IPv6 Routers available' somewhere in the status message pattern = re.compile(f'{iface}: no IPv6 Routers available*') # Loop if necessary restartCount = 1 count = 1 while True: Console.info(f'Checking if dhcpcd is up - Attempt {count}') code, full_status = cls._system('sudo service dhcpcd status', warnuser=False, both=True) if pattern.search(full_status): Console.info('dhcpcd is done starting') status_line = cls._system( 'sudo service dhcpcd status | grep Active') return 'running' in status_line # Occassionally dhcpcd fails to start when using WiFi. # Unresolved bug as it works after a few restarts elif code != 0: if restartCount >= 5: return False else: Console.warning( f'dhcpcd failed to restart. Retrying in 5 seconds... Restart number {restartCount} - Maximum 5 restarts' ) time.sleep(time_interval) cls._system('sudo service dhcpcd restart') restartCount += 1 count = 1 continue if count >= timeout: status_line = cls._system( 'sudo service dhcpcd status | grep Active') return 'running' in status_line count += 1 Console.info('dhcpcd is not ready. Checking again in 5 seconds...') time.sleep(time_interval)
def sudo_writefile(filename, content, append=False): os.system('mkdir -p ~/.cloudmesh/tmp') tmp = "~/.cloudmesh/tmp/tmp.txt" if append: content = sudo_readfile(filename, split=False) + content writefile(tmp, content) result = subprocess.getstatusoutput(f"sudo cp {tmp} {filename}") # If exit code is not 0 if result[0] != 0: Console.warning(f"{filename} was not created correctly -> {result[1]}") return result[1]
def add_secgroup(self, name=None, description=None): """ Adds the :param name: Name of the group :param description: The description :return: """ if self.cloudman: if description is None: description = name try: self.cloudman.create_security_group(name, description) except: Console.warning(f"secgroup {name} already exists in cloud. " f"skipping.") else: raise ValueError("cloud not initialized")
def _dhcpcd_conf(cls): """ Configures manager with static ip masterIP on interface priv_interface in dhcpcd.conf. Considered as the IP address of the "default gateway" for the cluster network Note: Does not restart dhcpcd.service :return: """ if cls.dryrun: Console.info( f"DRYRUN: Setting ip on {cls.priv_interface} to {cls.masterIP}" ) else: banner(f""" Writing to dhcpcd.conf. Setting static IP of manager to {cls.masterIP} on {cls.priv_interface} """) iface = f'interface {cls.priv_interface}' static_ip = f'static ip_address={cls.masterIP}' curr_config = sudo_readfile('/etc/dhcpcd.conf') if iface in curr_config: Console.warning("Found previous settings. Overwriting") # If setting already present, replace it and the static ip line index = curr_config.index(iface) try: if 'static ip_address' not in curr_config[index + 1]: Console.warning( "Missing static ip_address assignment. Overwriting line" ) curr_config[index + 1] = static_ip except IndexError: Console.error('/etc/dhcpcd.conf ends abruptly. Aborting') sys.exit(1) else: curr_config.append(iface) curr_config.append(static_ip) curr_config.append('nolink\n') sudo_writefile('/etc/dhcpcd.conf', '\n'.join(curr_config)) Console.ok('Successfully wrote to /etc/dhcpcd.conf')
def download(source, destination, force=False): """ Downloads the file from source to destination :param source: The http source :param destination: The destination in the file system :param force: If True the file will be downloaded even if it already exists """ if os.path.isfile(destination) and not force: Console.warning(f"File {destination} already exists. " "Skipping download ...") else: directory = os.path.dirname(destination) Path(directory).mkdir(parents=True, exist_ok=True) r = requests.get(source, allow_redirects=True) open(destination, 'wb').write(r.content)
def _system(cls, command, exitcode=False, warnuser=True, both=False): """ :param command: :param exitcode: True if we only want exitcode :param warnuser: True if we want to warn the user of command errors :param both: True if we want both the exit code and the stdout. Takes precedent over exitcode :return: stdout of command """ exit, stdout = subprocess.getstatusoutput(command) # If exit code is not 0, warn user if exit != 0 and warnuser: Console.warning(f'Warning: "{command}" did not execute properly -> {stdout} :: exit code {exit}') if both: return exit, stdout elif exitcode: return exit else: return stdout
def volume_attach(self, node_id, volume_id): """ Function will attached the mentioned volume (by volume id) to the node :returns: None :rtype: NoneType """ driver = self._get_driver() node = '' volume = '' nodes = self.node_refresh(False) if len(nodes) == 0: #No Node available to attache volume #print("pass -No Node") Console.warning("No Node available to attache volume") else : if NODE_ID == '': #get the default 0th node from list node = nodes[0] else: for nd in nodes : if nd.id == NODE_ID: #attache the default/ 0th location node node = nd break volumes = self.volume_refresh(False) if len(volumes) == 0: #No Node available to attache volume Console.warning("No Volumes available") else : for vol in volumes : if vol.id == volume_id: #attache the default/ 0th location node volume = vol break if node and volume : isVolumeAttached = driver.attach_volume(node, volume, device=None) Console.ok("Is volume attached - ", isVolumeAttached) else: Console.info("Unable to attached volume to node, please verify your input") return
def add_secgroup(self, name=None, description=None, vcn_id=None): """ Adds the :param name: Name of the group :param description: The description :return: """ if description is None: description = name try: details = oci.core.models.CreateNetworkSecurityGroupDetails( compartment_id=self.compartment_id, display_name=name, vcn_id=vcn_id) secgroup = self.virtual_network.create_network_security_group( details) return secgroup.data except: Console.warning(f"secgroup {name} already exists in cloud. " f"skipping.")
def bucket_exists(self, name=None): """ gets the source from the put function :param name: the bucket name which needs to be checked for exists :return: Boolean """ try: self.s3_client.head_bucket(Bucket=name) return True except botocore.exceptions.ClientError as e: # If a client error is thrown, then check that it was a 404 error. # If it was a 404 error, then the bucket does not exist. error_code = int(e.response['Error']['Code']) if error_code == 403: Console.warning(f"Bucket {name} is private. Access forbidden!") return True elif error_code == 404: Console.warning(f"Bucket {name} does not exist") return False
def remove(service, name): removed_item = None try: # Update the google cloud section of cloudmesh.yaml config file. config = Config() config_service = config["cloudmesh"][service] if name in config_service: removed_item = config_service.pop(name, None) config.save() Console.ok(f"Removed {name} from {service} service.") else: Console.warning( f"{name} is not registered for cloudmesh.{service}") except Exception as se: Console.error(f"Error removing {service}-{name} :: {se}") return removed_item
def get_images(self): """ Downloads all tags found in self.configs """ tags = set() for config in self.configs.values(): try: tags.add(config['tag']) except KeyError as e: Console.warning( f'Could not find tag for {config["host"]}. Skipping') banner("Downloading Images", figlet=True) image = Image() for tag in tags: Console.info(f'Attempting to download {tag}') res = image.fetch(tag=[tag]) if not res: Console.error('Failed Image Fetch.') raise Exception('Failed Image Fetch')