def setup(self,master,hosts): # # SETUP MASTER # if self.master: banner(f"Setting up master {master}") self.run_script(name="spark.prereqs", hosts=master) self.run_script(name="spark.download.spark", hosts=master) self.run_script(name="spark.install", hosts=master) self.run_script(name="spark.bashrc.master", hosts=master) # # SETUP WORKER # if self.workers: # Copy setup files to each worker and execute the shell program in parallel workers = ', '.join(hosts) banner(f"Setup Workers: {workers}") command = "sh ~/spark-setup-worker.sh" jobSet = JobSet("spark_worker_install", executor=JobSet.ssh) for host in hosts: banner(f"Setting up worker {host}") command1 = f"scp /home/pi/cm/cloudmesh-pi-cluster/cloudmesh/pi/cluster/spark/bin/spark-setup-worker.sh pi@{host}:" print(command1) os.system(command1) command2 = f"scp ~/sparkout.tgz pi@{host}:" print(command2) os.system(command2) command3 = f"scp /home/pi/cm/cloudmesh-pi-cluster/cloudmesh/pi/cluster/spark/bin/spark-bashrc.txt pi@{host}:" print(command3) os.system(command3) jobSet.add({"name": host, "host": host, "command": command}) self.update_slaves(host) jobSet.run(parallel=len(hosts)) jobSet.Print() return
def run(self, beginLine=None, endLine=None): # Prepare script self.script = textwrap.dedent(str(self.script)) lines = self.script.splitlines() # Truncate lines if begin and/or line parameters are set if beginLine is not None and endLine is not None: lines = Shell.find_lines_between(lines, beginLine, endLine) elif beginLine is not None and endLine is None: lines = Shell.find_lines_from(lines, beginLine) elif beginLine is None and endLine is not None: lines = Shell.find_lines_to(lines, endLine) # Loop over each line for line in lines: stripped = line.strip() # Check if line is a comment if stripped.startswith("#") or stripped == "": pass else: # Create jobSet per line job = JobSet(self.name, executor=self.executor) if "# tag:" in line: line, tag = line.split("# tag:", 1) tag = tag.strip() line = line.strip() # Execute jobSet for each host in parallel for host in self.hosts: job.add({"name": host, "host": host, "command": line}); job.run(parallel=len(self.hosts)) job.Print()
def uninstall(self, hosts): job_set = JobSet("mongo_install", executor=JobSet.ssh) command = """ sudo apt-get -y remove mongodb sudo apt-get -y remove --purge mongodb sudo apt-get autoremove python3 -m pip uninstall pymongo """ for host in hosts: job_set.add({"name": host, "host": host, "command": command}) job_set.run(parallel=len(hosts)) job_set.Print() banner("MongoDB Removed Succesfully") return
def stop(self, master, workers): hosts = Parameter.expand(workers) command = "sudo service mongodb stop" # Stop mongo if master is not None: os.system(command) if hosts is not None: job_set = JobSet("mongo_stop", executor=JobSet.ssh) for host in hosts: job_set.add({"name": host, "host": host, "command": command}) job_set.run(parallel=len(hosts)) # job_set.Print() banner("MongoDB service stopped succesfully") return 1
def run(self, script=None, name="script", host=None, executor=JobSet.ssh, **kwargs): # Prepare parameters if script is None: Console.error("The script is not defined, found None as content") return host = host or os.uname()[1] if kwargs: parameters = dotdict(**kwargs) else: parameters = dotdict({}) parameters.host = host # Prepare script self.script = textwrap.dedent(str(script)) self.script = self.script.format(**kwargs) lines = self.script.splitlines() # Add script to jobset and run jobs = JobSet("onejob", executor=executor) counter = 1 for line in lines: stripped = line.strip() if stripped.startswith("#") or stripped == "": pass else: tag = counter if "# tag:" in line: line, tag = line.split("# tag:", 1) tag = tag.strip() line = line.strip() jobs.add({ "script": name, "name": tag, "tag": tag, "line": counter, "host": host, "counter": counter, "command": line }) counter = counter + 1 jobs.run(parallel=1) self.job = jobs.job return self.job
def ssh_keygen(hosts=None, filename="~/.ssh/id_rsa", username=None, processors=3, dryrun=False, verbose=True): """ generates the keys on the specified hosts. this fonction does not work well as it still will aski if we overwrite. :param hosts: :param filename: :param username: :param output: :param dryrun: :param verbose: :return: """ hosts = Parameter.expand(hosts) command = f'ssh-keygen -q -N "" -f {filename} <<< y' jobSet = JobSet("ssh_keygen", executor=JobSet.ssh) for host in hosts: jobSet.add({"name": host, "host": host, "command": command}) jobSet.run(parallel=len(hosts)) #result_keys = Host.ssh(hosts=hosts, # command=command, # username=username, # dryrun=dryrun, # processors=processors, # executor=os.system) #result_keys = Host.ssh(hosts=hosts, # processors=processors, # command='cat .ssh/id_rsa.pub', # username=username) jobSet.Print()
def delete(self, master=None, hosts=None): # Delete master node # TODO - k3s does not allow you to delete it's parent node #if master is not None: # banner(f"Deleting Master Node: {master}") # # command = Installer.oneline(f""" # sudo kubectl delete {master} # """) # jobSet = JobSet("kubernetes_master_delete", executor=JobSet.ssh) # jobSet.add({"name": self.hostname, "host": master, "command": command}) # jobSet.run() # Uninstall workers if hosts is not None: workers = ', '.join(hosts) banner(f"Deleting Worker Node(s): {workers}") jobSet = JobSet("kubernetes_worker_delete", executor=JobSet.ssh) for host in hosts: command = Installer.oneline(f""" sudo kubectl drain {host} --ignore-daemonsets --delete-local-data; sudo kubectl delete node {host} """) jobSet.add({ "name": self.hostname, "host": self.hostname, "command": command }) jobSet.run(parallel=len(hosts)) print( "Workers:", Printer.write(jobSet.array(), order=[ "name", "command", "status", "stdout", "returncode" ]))
def uninstall(self, master, hosts): # # Uninstall MASTER # if self.master: banner(f"Uninstall Master: {master}") self.run_script(name="spark.uninstall.master", hosts=master) # # Uninstall WORKER(S) # if self.workers: banner(f"Uninstall Workers: {hosts}") command7 = "sh ~/spark-uninstall-worker.sh" jobSet = JobSet("spark_worker_uninstall", executor=JobSet.ssh) for host in hosts: jobSet.add({"name": host, "host": host, "command": command7}) jobSet.run(parallel=len(hosts)) #jobSet.Print() banner("Successfully uninstalled workers") return
def install(self, hosts): job_set = JobSet("mongo_install", executor=JobSet.ssh) command = """ sudo apt update sudo apt -y upgrade sudo apt -y install mongodb sudo apt-get -y install python3-pip python3 -m pip install pymongo mkdir -p /home/pi/data/db """ # Install mongodb on all Pis for host in hosts: job_set.add({"name": host, "host": host, "command": command}) job_set.run(parallel=len(hosts)) job_set.Print() # Copy config files to all hosts # Have not used JobSet here. Copying serially... for host in hosts: command = f"scp /home/pi/cm/cloudmesh-pi-cluster/cloudmesh/pi/cluster/mongo/bin/local_setup.cfg pi@{host}:/home/pi/mongodb.conf" os.system(command) banner("MongoDB Setup and Configuration Complete")
def enable_containers(self, filename="/boot/cmdline.txt", hosts=None): line = "cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory" warning = "Your master already has containers enabled" self.add_to_file(filename, line, warning) if hosts is not None: # Create tmp file on master with enable_containers line source = "~/container_tmp.txt" command = f'echo "{line}" >> {source}' os.system(command) # Copy over temporary file with container line for host in hosts: command = "scp {0} pi@{1}:~/".format(source, host) os.system(command) # Check if workers already have line and if not, append to /boot/cmdline.txt tmp_cmdline = "~/cmdline.txt" command = f""" if grep -q "{line}" '/boot/cmdline.txt' then rm {source}; else cp /boot/cmdline.txt {tmp_cmdline}; cat {source} >> {tmp_cmdline}; sudo cp {tmp_cmdline} {filename}; rm {tmp_cmdline} {source}; fi""" jobSet = JobSet("kubernetes_worker_enable_containers", executor=JobSet.ssh) for host in hosts: jobSet.add({"name": host, "host": host, "command": command}) jobSet.run() #jobSet.Print() # Delete tmp file on master command = f"rm {source}" os.system(command)
def uninstall(self, master=None, hosts=None): # Uninstall master if master is not None: banner(f"Uninstalling Master {master}") command = "/usr/local/bin/k3s-uninstall.sh" jobSet = JobSet("kubernetes_master_uninstall", executor=JobSet.ssh) jobSet.add({"name": master, "host": master, "command": command}) jobSet.run() jobSet.Print() # Uninstall workers if hosts is not None: workers = ', '.join(hosts) banner(f"Uninstalling Workers: {workers}") #command = "/usr/local/bin/k3s-agent-uninstall.sh" command = "/usr/local/bin/k3s-uninstall.sh" jobSet = JobSet("kubernetes_worker_uninstall", executor=JobSet.ssh) for host in hosts: jobSet.add({"name": host, "host": host, "command": command}) jobSet.run(parallel=len(hosts)) jobSet.Print() self.delete(hosts=hosts)
def join(self, master=None, hosts=None): if hosts is not None and master is not None: banner(f"Get Join Token From {master}") command = "sudo cat /var/lib/rancher/k3s/server/node-token" jobSet = JobSet("kubernetes_token_retrieval", executor=JobSet.ssh) jobSet.add({"name": master, "host": master, "command": command}) jobSet.run() token = jobSet.array()[0]['stdout'].decode('UTF-8') # TODO - Currently get ip address from eth0 instead of using hostname # because worker does not know master's host name ip = self.get_master_ip_address('eth0') command = f""" sudo k3s agent --server https://{ip}:{self.port} --token {token}""" jobSet = JobSet("kubernetes_worker_join", executor=JobSet.ssh) for host in hosts: jobSet.add({"name": host, "host": host, "command": command}) jobSet.run(parallel=len(hosts)) jobSet.Print() self.view()
def install(self, master=None, hosts=None, step=None): # Setup containers on master if master is None and hosts: Console.error("You must specify a master to set up nodes") raise ValueError if step is not None: if step in 'enable_containers': self.enable_containers(hosts=hosts) # Install K3S on the master if master is not None: if type(master) != list: master = Parameter.expand(master) # # TODO - bug I should be able to run this even if I am not on master # banner(f"Setup Master: {master[0]}") command = Installer.oneline(f""" curl -sfL https://get.k3s.io | sh - """) jobSet = JobSet("kubernetes_master_install", executor=JobSet.ssh) jobSet.add({ "name": self.hostname, "host": master[0], "command": command }) jobSet.run() result_stdout = jobSet.array()[0]['stdout'].decode('UTF-8') if "No change detected" in result_stdout: print() Console.info("Service already running") # Setup workers and join to cluster # # TODO - bug I should be able to run this even if I am not on master # if hosts is not None: if master is not None: banner(f"Get Join Token From {master[0]}") command = "sudo cat /var/lib/rancher/k3s/server/node-token" jobSet = JobSet("kubernetes_token_retrieval", executor=JobSet.ssh) jobSet.add({ "name": master[0], "host": master[0], "command": command }) jobSet.run() token = jobSet.array()[0]['stdout'].decode('UTF-8') # Install kubernetes to workers workers = ', '.join(hosts) banner(f"Setup Workers: {workers}") command = "curl -sfL http://get.k3s.io | sh -" #worker_install = Host.ssh(hosts=hosts, command=command, executor=os.system) jobSet = JobSet("kubernetes_worker_install", executor=JobSet.ssh) for host in hosts: jobSet.add({ "name": host, "host": host, "command": command }) jobSet.run(parallel=len(hosts)) jobSet.Print() # Join workers to master's cluster # TODO - Currently get ip address from eth0 instead of using hostname # because worker does not know master's host name ip = self.get_master_ip_address('eth0') command = f"""sudo k3s agent --server https://{ip}:{self.port} --token {token}""" # TODO - This currently does not work, command runs fine but # "k3s agent" having trouble creating node. jobSet = JobSet("kubernetes_worker_join", executor=JobSet.ssh) for host in hosts: jobSet.add({ "name": host, "host": host, "command": command }) jobSet.run(parallel=len(hosts)) jobSet.Print() else: Console.warning( "You must have the master parameter set to burn workers") # Print created cluster self.view()
def do_host(self, args, arguments): """ :: Usage: host scp NAMES SOURCE DESTINATION [--dryrun] host ssh NAMES COMMAND [--dryrun] [--output=FORMAT] host config NAMES [IPS] [--user=USER] [--key=PUBLIC] host check NAMES [--user=USER] [--key=PUBLIC] host key create NAMES [--user=USER] [--dryrun] [--output=FORMAT] host key list NAMES [--output=FORMAT] host key gather NAMES [--authorized_keys] [FILE] host key scatter USERNAME NAMES FILE This command does some useful things. Arguments: FILE a file name Options: --dryrun shows what would be done but does not execute --output=FORMAT the format of the output Description: host scp NAMES SOURCE DESTINATION TBD host ssh NAMES COMMAND runs the command on all specified hosts Example: ssh red[01-10] \"uname -a\" host key create NAMES create a ~/.ssh/id_rsa and id_rsa.pub on all hosts specified Example: ssh key create "red[01-10]" host key list NAMES list all id_rsa.pub keys from all hosts specifed Example: ssh key list red[01-10] host key gather HOSTS FILE gathers all keys from file FILE including the one from localhost. ssh key gather "red[01-10]" keys.txt host key scatter USERNAME HOSTS FILE copies all keys from file FILE to authorized_keys on all hosts, but also makes sure that the users ~/.ssh/id_rsa.pub key is in the file. 1) adds ~/.id_rsa.pub to the FILE only if its not already in it 2) removes all duplicated keys Example: ssh key scatter pi "red[01-10]" ~/keys.txt host key scp NAMES FILE copies all keys from file FILE to authorized_keys on all hosts but also makes sure that the users ~/.ssh/id_rsa.pub key is in the file and removes duplicates, e.g. it calls fix before upload Example: ssh key list red[01-10] > pubkeys.txt ssh key scp red[01-10] pubkeys.txt host config NAMES IPS [--user=USER] [--key=PUBLIC] generates an ssh config file tempalte that can be added to your .ssh/config file Example: cms host config "red,red[01-03]" "198.168.1.[1-4]" --user=pi host check NAMES [--user=USER] [--key=PUBLIC] This command is used to test if you can login to the specified hosts. It executes the hostname command and compares it. It provides a table with a sucess column cms host check "red,red[01-03]" +-------+---------+--------+ | host | success | stdout | +-------+---------+--------+ | red | True | red | | red01 | True | red01 | | red02 | True | red02 | | red03 | True | red03 | +-------+---------+--------+ """ def _print(results): arguments.output = arguments.output or 'table' if arguments.output == 'table': print( Printer.write(results, order=['host', 'success', 'stdout'])) else: pprint(results) map_parameters(arguments, 'dryrun', 'output', 'user') dryrun = arguments.dryrun if dryrun: VERBOSE(arguments) if arguments.scp and not arguments.key: destinations = Parameter.expand(arguments.DESTINATION) source = arguments.SOURCE results_key = Host.scp(source, destinations, output="lines") elif arguments.ssh: names = Parameter.expand(arguments.NAMES) # print (names) results = Host.ssh(hosts=names, command=arguments.COMMAND) _print(results) elif arguments.key and arguments.create: Host.ssh_keygen(hosts=arguments.NAMES, username=arguments.user, dryrun=dryrun) #_print(results) elif arguments.key and arguments.list: names = Parameter.expand(arguments.NAMES) jobSet = JobSet("key_list", executor=JobSet.ssh) command = "cat .ssh/id_rsa.pub" for host in names: jobSet.add({"name": host, "host": host, "command": command}) jobSet.run(parallel=len(names)) #results = Host.ssh(hosts=names, # command='cat .ssh/id_rsa.pub', # username=arguments.user) jobSet.Print() elif arguments.key and arguments.gather: output = Host.gather_keys(username=arguments.user, hosts=arguments.NAMES, filename="~/.ssh/id_rsa.pub", key="~/.ssh/id_rsa", processors=3, dryrun=False) if arguments.FILE: filename = path_expand(arguments.FILE) directory = os.path.dirname(filename) if directory: Shell.mkdir(directory) with open(filename, "w") as f: f.write(output) else: print(output) elif arguments.key and arguments.scatter: names = arguments.NAMES file = arguments.get("FILE") if not os.path.isfile(file): Console.error("The file does not exist") return "" Host.put(hosts=names, source=file, username=arguments.USERNAME, destination=".ssh/authorized_keys") #_print(result) elif arguments.config: key = arguments.key or "~/.ssh/id_rsa.pub" result = Host.config(hosts=arguments.NAMES, ips=arguments.IPS, username=arguments.user, key=key) print(result) elif arguments.check: key = arguments.key or "~/.ssh/id_rsa.pub" result = Host.check(hosts=arguments.NAMES, username=arguments.user, key=key) for entry in result: entry['success'] = entry['stdout'] == entry['host'] _print(result) return ""
def gather_keys(username=None, hosts=None, filename="~/.ssh/id_rsa.pub", key="~/.ssh/id_rsa", processors=3, dryrun=False): """ returns in a list the keys of the specified hosts :param username: :param hosts: :param filename: :param key: :param dryrun: :return: """ names = Parameter.expand(hosts) jobSet = JobSet("ssh_keygen", executor=JobSet.ssh) command = "cat .ssh/id_rsa.pub" for host in names: jobSet.add({"name": host, "host": host, "command": command}) jobSet.run(parallel=len(hosts)) results_key = [] #jobSet.Print() for key in jobSet.array(): stdout = key['stdout'].decode('UTF-8') if "Command could not run" not in stdout: results_key.append(stdout) #results_key = Host.ssh(hosts=names, # command='cat .ssh/id_rsa.pub', # username=username, # verbose=False) #results_authorized = Host.ssh(hosts=names, # command='cat .ssh/id_rsa.pub', # username=username, # verbose=False) filename = path_expand(filename) localkey = { 'host': "localhost", 'command': [''], 'execute': "", 'stdout': readfile(filename).strip(), 'stderr': None, 'returncode': True, 'success': True, 'date': DateTime.now() } if results_key is None: # and results_authorized is None: return "" # geting the output and also removing duplicates output = [localkey['stdout']] + \ results_key output = '\n'.join(output) + "\n" return output
def put(hosts=None, source=None, destination=None, username=None, key="~/.ssh/id_rsa.pub", shell=False, processors=3, dryrun=False, verbose=False): """ :param command: the command to be executed :param hosts: a list of hosts to be checked :param username: the usernames for the hosts :param key: the key for logging in :param processors: the number of parallel checks :return: list of dicts representing the ping result """ hosts = Parameter.expand(hosts) key = path_expand(key) # Copy over your file to temporary destination on each host jobSet = JobSet("put_tmp_file", executor=JobSet.execute) master = os.uname()[1] randomInt = random.randint(1000, 2000) tmp_destination = "~/.cloudmesh/tmp_file-" + str(randomInt) for host in hosts: command = f"""ssh {username}@{host} "mkdir -p ~/.cloudmesh/"; scp {source} {username}@{host}:{tmp_destination}""" jobSet.add({"name": host, "host": master, "command": command}) jobSet.run(parallel=len(hosts)) jobSet.Print() # Overwrite desintation file with your tempoary file jobSet = JobSet("put_overwrite", executor=JobSet.ssh) command = f"sudo cp {tmp_destination} {destination}; rm -f {tmp_destination}" for host in hosts: jobSet.add({"name": host, "host": host, "command": command}) jobSet.run(parallel=len(hosts)) jobSet.Print()