def ubuntu(self): """ install MongoDB in Linux system (Ubuntu) """ # check if openssl and curl is installed chk_script = "openssl version && curl --version" Script.run(chk_script) script = f""" mkdir -p {self.mongo_path} mkdir -p {self.mongo_home} mkdir -p {self.mongo_log} wget -q -O /tmp/mongodb.tgz {self.mongo_code} tar -zxvf /tmp/mongodb.tgz -C {self.local}/mongo --strip 1 echo \"export PATH={self.mongo_home}/bin:$PATH\" >> ~/.bashrc """ if self.dryrun: print(script) else: installer = Script.run(script) Console.info("MongoDB installation successful!") print() Console.info("Activate it with \n\n" f"export PATH={self.mongo_home}/bin:$PATH\n\n" "We also added this to ~/.bashrc\n") print()
def install(self, clean=False, pull=True): """ Creates the Mongo image :return: """ Console.msg(f"Version: {self.version}") if pull: script = f"docker pull mongo:{self.version}" self.run(script) if clean: try: shutil.rmtree(self.mongo_path) except: pass try: shutil.rmtree(self.mongo_log) except: pass try: os.mkdir(self.mongo_path) except FileExistsError: Console.info(f"Folder {self.mongo_path} already exists") try: os.mkdir(self.mongo_log) except FileExistsError: Console.info(f"Folder {self.mongo_log} already exists")
def _set_ipv4(cls): """ Turns on iPv4 Forwarding on the system and saves rules upon eventual reboot :return: """ if cls.dryrun: Console.info("DRYRUN: Turning on iPv4") else: new_line='net.ipv4.ip_forward=1' # First turn on ipv4 forwarding cls._system(f'sudo sysctl -w {new_line}') # Save for next boot old_conf = sudo_readfile('/etc/sysctl.conf') if new_line not in old_conf: # The default sysctl has the new_line commented out. Try to uncomment it try: old_conf[old_conf.index('#' + new_line)] = new_line except ValueError: Console.warning("Could not find iPv4 setting. Perhaps /etc/sysctl.conf has been changed from default. Process continues by adding iPv4 setting") old_conf.append('net.ipv4.ip_forward=1') except: Console.error("Could not set iPv4 forwarding. Unknown error occurred") finally: sudo_writefile('/etc/sysctl.conf', '\n'.join(old_conf)) else: Console.info("iPv4 forwarding already set. Skipping iPv4 setup")
def darwin(self, brew=False): """ install MongoDB in Darwin system (Mac) """ if brew: print("mongo installer via brew") if not self.dryrun: Brew.install("mongodb") path = Shell.which("mongod") SystemPath.add(f"{path}") else: script = f""" mkdir -p {self.mongo_path} mkdir -p {self.mongo_home} mkdir -p {self.mongo_log} curl -o /tmp/mongodb.tgz {self.mongo_code} tar -zxvf /tmp/mongodb.tgz -C {self.local}/mongo --strip 1 """ print(script) if self.dryrun: print(script) else: installer = Script.run(script) SystemPath.add(f"{self.mongo_home}/bin".format(**self.data)) # THIS IS BROKEN AS ITS A SUPBROCESS? '. ~/.bashrc' Console.info("MongoDB installation successful!")
def network_list(self, kwargs=None): """List of docker networks :returns: None :rtype: NoneType """ try: scode, networks = Rest.get('Network') except docker.errors.APIError as e: Console.error(e.explanation) return if len(networks) == 0: Console.info("No network exist") return n = 1 e = {} data = [] for network in networks: d = {} d['Ip'] = network['Ip'] d['Id'] = network['Id'] d['Name'] = network['Name'] d['Containers'] = network['Containers'] e[n] = d n = n + 1 Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Name', 'Containers'])))
def backup(self, device=None, to_file=None, blocksize="4m"): if device is None: Console.error("Device must have a value") if to_file is None: Console.error("To file must have a value") else: Sudo.password() to_file = path_expand(to_file) size = SDCard.size(device) to_file = path_expand(to_file) # # speed up burning on MacOS # if device.startswith("/dev/disk"): device = device.replace("/dev/disk", "/dev/rdisk") command = f"sudo dd if={device} bs={blocksize} |" \ f' tqdm --bytes --total {size} --ncols 80|' \ f"dd of={to_file} bs={blocksize}" print() Console.info(command) print() os.system(command)
def add_secgroup(self, name=None, description=None): """ Adds named security group :param name: Adds security group :param description: name = name of the security group to be added/created description: Description of the security group. If its none then default description is added with user name and time of creation :return: None """ response = self.ec2_client.describe_vpcs() vpc_id = response.get('Vpcs', [{}])[0].get('VpcId', '') if description is None: description = f'security group crated at {str(datetime.utcnow())} by {self.user}' if self._is_group_name_valid(name): try: response = self.ec2_client.create_security_group( GroupName=name, Description=description, VpcId=vpc_id) security_group_id = response['GroupId'] Console.ok( f'Security Group Created {security_group_id} in vpc{vpc_id}' ) except ClientError as e: Console.info("Security group can't be added")
def images_list(self, kwargs=None): """List of docker images :returns: None :rtype: NoneType """ try: scode, images = Rest.get('Image') except docker.errors.APIError as e: Console.error(e.explanation) return if len(images) == 0: Console.info("No images exist") return n = 1 e = {} for image in images: d = {} d['Ip'] = image['Ip'] d['Id'] = image['Id'] if image['RepoTags'] == None: d['Repository'] = image['RepoDigests'][0] else: d['Repository'] = image['RepoTags'][0] # d['Size'] = image['Size'] d['Size(GB)'] = round(image['Size'] / float(1 << 30), 2) # Converting the size to GB e[n] = d n = n + 1 Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Repository', 'Size(GB)'])))
def register(self, cloud='azure'): # Opens web browser and prompts user to login subprocess.Popen('az login') # once user has logged in, collects account information, such as subscription id accountInfo = subprocess.getoutput('az account show') print(accountInfo) azoutput = self.azString2Dict(accountInfo) AZURE_SUBSCRIPTION_ID = azoutput['id'] AZURE_TENANT_ID = azoutput['tenantId'] # WARNING: FOLLOWING CODE WILL RENDER OLD SECRET KEY INVALID azAppKeyStr = subprocess.getoutput( 'az ad sp create-for-rbac --name http://cloudmesh') azAppKeyDict = self.azString2Dict(azAppKeyStr) AZURE_APPLICATION_ID = azAppKeyDict['appId'] AZURE_SECRET_KEY = azAppKeyDict['password'] creds = { 'AZURE_SUBSCRIPTION_ID': AZURE_SUBSCRIPTION_ID, 'AZURE_TENANT_ID': AZURE_TENANT_ID, 'AZURE_APPLICATION_ID': AZURE_APPLICATION_ID, 'AZURE_SECRET_KEY': AZURE_SECRET_KEY } self.set_credentials(creds) Console.info( "Azure Tenant, Subscription, Application, and Secret Key have been added to the cloudmesh.yaml file." )
def remove_secgroup(self, name=None): try: response = self.ec2_client.delete_security_group(GroupName=name) VERBOSE(response) except ClientError as e: Console.info( "Security group couldn't be removed as it doesn't exist")
def wait(self, vm=None, interval=None, timeout=None): name = vm['name'] if interval is None: # if interval is too low, OS will block your ip (I think) interval = 10 if timeout is None: timeout = 360 Console.info(f"waiting for instance {name} to be reachable: Interval: {interval}, Timeout: {timeout}") timer = 0 while timer < timeout: sleep(interval) timer += interval try: r = self.list() r = self.ssh(vm=vm,command='echo IAmReady').strip() if 'IAmReady' in r: return True except: pass return False
def node_refresh(self): """Refresh of swarm nodes :returns: None :rtype: NoneType """ filter = {} filter['Swarmmode'] = 'Manager' scode, hosts = Rest.get('Host', filter) filter = {} n = 1 e = {} data = [] for host in hosts: os.environ["DOCKER_HOST"] = host['Ip'] + ":" + str(host['Port']) filter = {} filter['Ip'] = os.environ["DOCKER_HOST"].split(':')[0] self.client = docker.from_env() try: nodes = self.client.nodes.list() except docker.errors.APIError as e: Console.error(e.explanation) return if len(nodes) == 0: Console.info("No nodes exist for manager" + os.environ["DOCKER_HOST"].split(':')) continue n = 1 e = {} data = [] for node in nodes: d = {} node_dict = node.__dict__['attrs'] d['Id'] = node_dict['ID'] data.append(node_dict) d['Role'] = node_dict['Spec']['Role'] d['Status'] = node_dict['Status']['State'] if d['Role'] == 'manager': d['Ip'] = node_dict['ManagerStatus']['Addr'].split(':')[0] d['Manager Ip'] = '' else: d['Ip'] = node_dict['Status']['Addr'] d['Manager Ip'] = os.environ["DOCKER_HOST"].split(':')[0] d['Host Name'] = node_dict['Description']['Hostname'] e[n] = d n = n + 1 Console.ok( str( Printer.dict_table(e, order=[ 'Ip', 'Host Name', 'Id', 'Role', 'Status', 'Manager Ip' ]))) Rest.delete('Node') Rest.post('Node', data)
def install(repos, dev=False): repos = OrderedSet(repos) for repo in repos: StopWatch.start("install " + repo) if dev: banner(f"dev install -> {repo}") Console.info(f"pip install -e .: {repo}") print() os.chdir(repo) os.system("pip install -e .") os.chdir("../") StopWatch.status("install " + repo, True) else: banner(f"install -> {repo}") Console.info(f"pip install: {repo}") print() os.system("pip install {repo}".format(repo=repo)) StopWatch.status("install " + repo, True) StopWatch.stop("install " + repo)
def write_key(self, key=None, path=None, mode="wb", force=False): """ Writes the key to the path, creating directories as needed" @param key: The data being written yca key instance @param path: Full path including file name @param mode: The mode for writing to the file @param force: Automatically overwrite file if it exists """ # Check if the key is empty if key is None: Console.error("Key is empty") sys.exit() if path is None: Console.error("Path is empty") sys.exit() # Create directories as needed for the key dirs = os.path.dirname(path) if not os.path.exists(dirs): Shell.mkdir(dirs) if not force: # Check if file exists at locations if os.path.exists(path): Console.info(f"{path} already exists") ovwr_r = yn_choice(message=f"overwrite {path}?", default="N") if not ovwr_r: Console.info(f"Not overwriting {path}. Quitting") sys.exit() # Write the file writefd(filename=path, content=key, mode=mode)
def add_secgroup_rule(self, name=None, # group name port=None, protocol=None, ip_range=None): """ Add rule to named security group :param name: Name of the security group to which rfule needs to be added :param port: The start and end port range for the TCP and UDP protocols :param protocol: :param ip_range: :return: """ try: portmin, portmax = port.split(":") except ValueError: portmin = -1 portmax = -1 try: data = self.ec2_client.authorize_security_group_ingress( GroupName=name, IpPermissions=[ {'IpProtocol': protocol, 'FromPort': int(portmin), 'ToPort': int(portmax), 'IpRanges': [{'CidrIp': ip_range}]}, ]) Console.ok(f'Ingress Successfully Set as {data}') except ClientError as e: Console.info("Rule couldn't be added to security group")
def load(self, name): """ Load cached model :param name: :return: """ cm = CmDatabase() # USER env variable is required by StopWatch if os.getenv('USER'): # Do nothing VERBOSE("USER env variable is already defined") else: os.environ['USER'] = '******' test = cm.find(cloud="local", kind="cache", query={"name": {'$regex': f"{name}"}}) cached_file = test[0]['cached_file'] Console.info(f"Loading serialized model: {cached_file}") StopWatch.start(f"Load pickle {name}") deserialized_model = self._load_pickle(cached_file) StopWatch.stop(f"Load pickle {name}") time_taken = StopWatch.get(f"Load pickle {name}") # TODO: figure out how useful the duration is and return to client if required deserialized_model_dict = { "model_name": name, "model_object": deserialized_model, "duration": time_taken # duration of deserialization function } return deserialized_model
def check_exists(path): if os.path.exists(path): Console.info(f"{path} already exists") ovwr_r = yn_choice(message=f"overwrite {path}?", default="N") if not ovwr_r: Console.info(f"Not overwriting {path}. Quitting") sys.exit()
def info(cls, msg, debug=True): """ prints an info msg. :param msg: the message :return: """ if debug: Console.info(msg)
def _purge_dnsmasq(cls): """ Uses apt-get remove along with --purge and --auto-remove to remove dnsmasq. """ Console.info("Purging dnsmasq. Please wait...") # cls._system('sudo apt-get --purge --auto-remove remove -y dnsmasq', warnuser=False) # We use os.system this time so that we can display the output as it is purged. os.system('sudo apt-get --purge --auto-remove remove -y dnsmasq') Console.info("Removed dnsmasq")
def test(cls, hosts=None): """ Command to test the connectivity to specified hostnames. First checks to see if the device has even registered with the bridge in ~/.cloudmesh/bridge/info which is a directory created when the bridge is first created. :param host: List of hostnames to check for connectivity. :return: """ try: info = sudo_readfile('~/.cloudmesh/bridge/info') index = info.index(cls.lease_bookmark) leases = info[index + 1:] except: Console.error( 'Could not find information on bridge. Has the bridge been created yet?' ) sys.exit(1) known_hosts = [] for lease in leases: lease_info = lease.split() host = lease_info[4] known_hosts.append(host) count = 0 hosts_to_check = len(hosts) Console.info("Beginning ping checks...") for host in hosts: if host not in known_hosts: Console.warning( f'{host} is not among the known devices of the bridge. No connection from {host} has been received before. Skipping test' ) hosts_to_check -= 1 else: Console.info(f'Pinging Host {host}. Please wait ...') status, stdout = cls._system(f'ping -c 1 {host}', warnuser=False, both=True) # I believe this is a sufficient check if status != 0: message = textwrap.dedent(f""" Could not ping {host} successfuly. Rebooting {host} may fix the problem. Manually ping {host} for more details. """) Console.warning(message) else: count += 1 Console.ok(f"Successfuly pinged {host}") Console.ok( f'Completed checks. {count} out of {hosts_to_check} checks succeeded.' )
def __init__(self, inventory=None, names=None, ssid=None, wifipassword=None, force_inv=False, country=None): # Get inventory self.ssid = ssid self.wifipasswd = wifipassword if inventory is None: names = Parameter.expand(names) manager, workers = Host.get_hostnames(names) if workers: worker_base_name = ''.join( [i for i in workers[0] if not i.isdigit()]) cluster_name = manager or worker_base_name inventory = path_expand( f'~/.cloudmesh/inventory-{cluster_name}.yaml') if not os.path.exists(inventory) or force_inv: if not manager: Console.error("No inventory found. Can not create an " "inventory without a " "manager.") return "" Inventory.build_default_inventory(filename=inventory, manager=manager, workers=workers) inv = Inventory(filename=inventory) else: inv = Inventory(filename=inventory) self.inventory = inv # Find managers and workers managers = inv.find(service='manager') if len(managers) > 0: if not self.ssid: self.ssid = get_ssid() if self.ssid == "": Console.info('Could not determine SSID, skipping wifi ' 'config') self.ssid = None if not self.wifipasswd and self.ssid: self.wifipasswd = getpass(f"Using --SSID={self.ssid}, please " f"enter wifi password:") workers = inv.find(service='worker') # No inherenet need to distinguish the configs by service configs = managers + workers # Create dict for them for easy lookup self.configs = dict((config['host'], config) for config in configs) self.get_images() self.country = country if country else Shell.locale().upper()
def write(self, filename=None): """ Writes a file to a location. Safe write for files on mounted partitions """ if filename is None: raise Exception('filename arg supplied is None') tmp_location = path_expand('~/.cloudmesh/user-data.tmp') writefile(tmp_location, str(self)) Console.info(f'Writing to {filename}') Shell.run(f'cat {tmp_location} | sudo tee {filename}')
def wait(self, time=None): """ This function waiting for volume to be updated :param time: time to wait in seconds :return: False """ Console.info("waiting for volume to be updated") sleep(time) return False
def service_refresh(self, kwargs=None): """List of docker images :returns: None :rtype: NoneType """ filter = {} filter['Swarmmode'] = 'Manager' scode, hosts = Rest.get('Host', filter) filter = {} n = 1 e = {} data = [] for host in hosts: os.environ["DOCKER_HOST"] = host['Ip'] + ":" + str(host['Port']) filter = {} filter['Ip'] = os.environ["DOCKER_HOST"].split(':')[0] self.client = docker.from_env() try: services = self.client.services.list(**kwargs) except docker.errors.APIError as e: Console.error(e.explanation) return if len(services) == 0: Console.info("No service exist on host" + host['Ip']) continue for servicem in services: d = {} service = servicem.__dict__['attrs'] service['Ip'] = os.environ["DOCKER_HOST"].split(':')[0] data.append(service) d['Ip'] = service['Ip'] d['Id'] = service['ID'] d['Name'] = service['Spec']['Name'] d['Image'] = service['Spec']['TaskTemplate']['ContainerSpec'][ 'Image'] d['Replicas'] = service['Spec']['Mode']['Replicated'][ 'Replicas'] e[n] = d n = n + 1 Rest.delete('Service') if len(data) == 0: Console.info("No service exist ") return Rest.post('Service', data) Console.ok( str( Printer.dict_table( e, order=['Ip', 'Id', 'Name', 'Image', 'Replicas'])))
def images_refresh(self, kwargs=None): """List of docker images :returns: None :rtype: NoneType """ filter = {} scode, hosts = Rest.get('Host', filter) filter = {} n = 1 e = {} data = [] for host in hosts: os.environ["DOCKER_HOST"] = host['Ip'] + ":" + str(host['Port']) filter = {} filter['Ip'] = os.environ["DOCKER_HOST"].split(':')[0] if host['Swarmmode'] == 'Worker': Rest.delete('Image', filter) continue self.client = docker.from_env() try: images = self.client.images.list(**kwargs) except docker.errors.APIError as e: Console.error(e.explanation) return if len(images) == 0: Console.info("No images exist on host " + host['Ip']) continue for imagem in images: image = imagem.__dict__['attrs'] image['Ip'] = os.environ["DOCKER_HOST"].split(':')[0] data.append(image) d = {} d['Ip'] = os.environ["DOCKER_HOST"].split(':')[0] d['Id'] = image['Id'] if image['RepoTags'] == None: d['Repository'] = image['RepoDigests'][0] else: d['Repository'] = image['RepoTags'][0] # d['Size'] = image['Size'] d['Size(GB)'] = round(image['Size'] / float(1 << 30), 2) e[n] = d n = n + 1 Rest.delete('Image', filter) Rest.post('Image', data) Console.ok( str( Printer.dict_table( e, order=['Ip', 'Id', 'Repository', 'Size(GB)'])))
def usage(self): hdd = psutil.disk_usage('/') Console.info("Disk Space") print(" Total: {total:.0f} GiB".format(total=hdd.total / (2**30))) print(" Used: {used:.0f} GiB".format(used=hdd.used / (2**30))) print(" Free: {free:.0f} GiB".format(free=hdd.free / (2**30))) mem = psutil.virtual_memory() total = mem.total >> 30 available = mem.available >> 30 print(f" Memory: {available}GB free from {total}GB")
def sha256sum(filename=None): Console.info("Verifying sha256") h = hashlib.sha256() with open(filename, 'rb') as f: # with mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ) as mm: # h.update(mm) while True: data = f.read(BUF_SIZE) if not data: break h.update(data) return h.hexdigest()
def _set_iptables(cls, flush=True): """ Sets up routing in iptables and saves rules for eventual reboot :flush: Remove all rules for related iptables :return: """ cmd1 = f"sudo iptables -A FORWARD -i {cls.priv_interface} -o {cls.ext_interface} -j ACCEPT" cmd2 = f"sudo iptables -A FORWARD -i {cls.ext_interface} -o {cls.priv_interface} -m state --state ESTABLISHED,RELATED -j ACCEPT" cmd3 = f"sudo iptables -t nat -A POSTROUTING -o {cls.ext_interface} -j MASQUERADE" if cls.dryrun: Console.info("DRYRUN: Setting iptables") print(f"DRYRUN: {cmd1}") print(f"DRYRUN: {cmd2}") print(f"DRYRUN: {cmd3}") else: if flush: cls._system('sudo iptables --flush') cls._system('sudo iptables -t nat --flush') cls._system(cmd1) cls._system(cmd2) cls._system(cmd3) # Save rules cls._system('sudo sh -c "iptables-save > /etc/iptables.ipv4.nat"') # Restore rules if reboot old_conf = sudo_readfile('/etc/rc.local', trim=True) # Exit 0 should be in the last entry of old_conf # Add ip table restoration lines just above restore_command = "iptables-restore < /etc/iptables.ipv4.nat" if old_conf[-1] != 'exit 0': Console.error( 'rc.local does not have exit 0 in last line. Contingency not handled in this version. Cannot enable iPv4 forwarding at this time' ) raise NotImplementedError if restore_command not in old_conf: old_conf.append(restore_command) old_conf[-1], old_conf[-2] = old_conf[-2], old_conf[ -1] # Places 'exit 0' below our restore_command sudo_writefile('/etc/rc.local', '\n'.join(old_conf) + '\n') else: Console.warning(f"iptables restoration already in rc.local")
def network_refresh(self, kwargs=None): """List of docker networks :returns: None :rtype: NoneType """ filter = {} scode, hosts = Rest.get('Host', filter) filter = {} n = 1 e = {} data = [] for host in hosts: os.environ["DOCKER_HOST"] = host['Ip'] + ":" + str(host['Port']) filter = {} filter['Ip'] = os.environ["DOCKER_HOST"].split(':')[0] if host['Swarmmode'] == 'Worker': Rest.delete('Network', filter) continue self.client = docker.from_env() try: networks = self.client.networks.list(**kwargs) except docker.errors.APIError as e: Console.error(e.explanation) return if len(networks) == 0: Console.info("No network exist" + host['Ip']) continue for networkm in networks: network = networkm.__dict__['attrs'] network['Ip'] = os.environ["DOCKER_HOST"].split(':')[0] data.append(network) d = {} d['Ip'] = os.environ["DOCKER_HOST"].split(':')[0] d['Id'] = network['Id'] d['Name'] = network['Name'] d['Containers'] = network['Containers'] e[n] = d n = n + 1 r = Rest.delete('Network', filter) r = Rest.post('Network', data) Console.ok( str(Printer.dict_table(e, order=['Ip', 'Id', 'Name', 'Containers'])))
def ssh_config_add(self, label, host, user): config = readfile("~/.ssh/config") if f"Host {label}" in config: Console.warning(f"{label} is already in ~/.ssh/config") else: entry = textwrap.dedent(f""" Host {label} Hostname {host} User {user} IdentityFile ~/.ssh/id_rsa """) Console.info(f"adding {label} to ~/.ssh/config\n" + textwrap.indent(entry, prefix=" ")) config = config + entry writefile("~/.ssh/config", config)